]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/net/tulip/winbond-840.c
[TG3]: Use pci_device_to_OF_node() on sparc.
[mirror_ubuntu-artful-kernel.git] / drivers / net / tulip / winbond-840.c
CommitLineData
1da177e4
LT
1/* winbond-840.c: A Linux PCI network adapter device driver. */
2/*
3 Written 1998-2001 by Donald Becker.
4
5 This software may be used and distributed according to the terms of
6 the GNU General Public License (GPL), incorporated herein by reference.
7 Drivers based on or derived from this code fall under the GPL and must
8 retain the authorship, copyright and license notice. This file is not
9 a complete program and may only be used when the entire operating
10 system is licensed under the GPL.
11
12 The author may be reached as becker@scyld.com, or C/O
13 Scyld Computing Corporation
14 410 Severn Ave., Suite 210
15 Annapolis MD 21403
16
17 Support and updates available at
18 http://www.scyld.com/network/drivers.html
19
20 Do not remove the copyright information.
21 Do not change the version information unless an improvement has been made.
22 Merely removing my name, as Compex has done in the past, does not count
23 as an improvement.
24
25 Changelog:
26 * ported to 2.4
27 ???
28 * spin lock update, memory barriers, new style dma mappings
29 limit each tx buffer to < 1024 bytes
30 remove DescIntr from Rx descriptors (that's an Tx flag)
31 remove next pointer from Tx descriptors
32 synchronize tx_q_bytes
33 software reset in tx_timeout
34 Copyright (C) 2000 Manfred Spraul
35 * further cleanups
36 power management.
37 support for big endian descriptors
38 Copyright (C) 2001 Manfred Spraul
39 * ethtool support (jgarzik)
40 * Replace some MII-related magic numbers with constants (jgarzik)
f3b197ac 41
1da177e4
LT
42 TODO:
43 * enable pci_power_off
44 * Wake-On-LAN
45*/
f3b197ac 46
1da177e4 47#define DRV_NAME "winbond-840"
d5b20697
AG
48#define DRV_VERSION "1.01-e"
49#define DRV_RELDATE "Sep-11-2006"
1da177e4
LT
50
51
52/* Automatically extracted configuration info:
53probe-func: winbond840_probe
54config-in: tristate 'Winbond W89c840 Ethernet support' CONFIG_WINBOND_840
55
56c-help-name: Winbond W89c840 PCI Ethernet support
57c-help-symbol: CONFIG_WINBOND_840
58c-help: This driver is for the Winbond W89c840 chip. It also works with
59c-help: the TX9882 chip on the Compex RL100-ATX board.
f3b197ac 60c-help: More specific information and updates are available from
1da177e4
LT
61c-help: http://www.scyld.com/network/drivers.html
62*/
63
64/* The user-configurable values.
65 These may be modified when a driver module is loaded.*/
66
67static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
68static int max_interrupt_work = 20;
69/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
70 The '840 uses a 64 element hash table based on the Ethernet CRC. */
71static int multicast_filter_limit = 32;
72
73/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
74 Setting to > 1518 effectively disables this feature. */
75static int rx_copybreak;
76
77/* Used to pass the media type, etc.
78 Both 'options[]' and 'full_duplex[]' should exist for driver
79 interoperability.
80 The media type is usually passed in 'options[]'.
81*/
82#define MAX_UNITS 8 /* More are supported, limit only on options */
83static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
84static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
85
86/* Operational parameters that are set at compile time. */
87
88/* Keep the ring sizes a power of two for compile efficiency.
89 The compiler will convert <unsigned>'%'<2^N> into a bit mask.
90 Making the Tx ring too large decreases the effectiveness of channel
91 bonding and packet priority.
92 There are no ill effects from too-large receive rings. */
1da177e4
LT
93#define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */
94#define TX_QUEUE_LEN_RESTART 5
1da177e4
LT
95
96#define TX_BUFLIMIT (1024-128)
97
98/* The presumed FIFO size for working around the Tx-FIFO-overflow bug.
99 To avoid overflowing we don't queue again until we have room for a
100 full-size packet.
101 */
102#define TX_FIFO_SIZE (2048)
103#define TX_BUG_FIFO_LIMIT (TX_FIFO_SIZE-1514-16)
104
105
106/* Operational parameters that usually are not changed. */
107/* Time in jiffies before concluding the transmitter is hung. */
108#define TX_TIMEOUT (2*HZ)
109
110#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
111
112/* Include files, designed to support most kernel versions 2.0.0 and later. */
113#include <linux/module.h>
114#include <linux/kernel.h>
115#include <linux/string.h>
116#include <linux/timer.h>
117#include <linux/errno.h>
118#include <linux/ioport.h>
119#include <linux/slab.h>
120#include <linux/interrupt.h>
121#include <linux/pci.h>
10a87fcf 122#include <linux/dma-mapping.h>
1da177e4
LT
123#include <linux/netdevice.h>
124#include <linux/etherdevice.h>
125#include <linux/skbuff.h>
126#include <linux/init.h>
127#include <linux/delay.h>
128#include <linux/ethtool.h>
129#include <linux/mii.h>
130#include <linux/rtnetlink.h>
131#include <linux/crc32.h>
132#include <linux/bitops.h>
133#include <asm/uaccess.h>
134#include <asm/processor.h> /* Processor type for cache alignment. */
135#include <asm/io.h>
136#include <asm/irq.h>
137
42eab567
GG
138#include "tulip.h"
139
1da177e4 140/* These identify the driver base version and may not be removed. */
c9d26c97 141static char version[] =
1da177e4
LT
142KERN_INFO DRV_NAME ".c:v" DRV_VERSION " (2.4 port) " DRV_RELDATE " Donald Becker <becker@scyld.com>\n"
143KERN_INFO " http://www.scyld.com/network/drivers.html\n";
144
145MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
146MODULE_DESCRIPTION("Winbond W89c840 Ethernet driver");
147MODULE_LICENSE("GPL");
148MODULE_VERSION(DRV_VERSION);
149
150module_param(max_interrupt_work, int, 0);
151module_param(debug, int, 0);
152module_param(rx_copybreak, int, 0);
153module_param(multicast_filter_limit, int, 0);
154module_param_array(options, int, NULL, 0);
155module_param_array(full_duplex, int, NULL, 0);
156MODULE_PARM_DESC(max_interrupt_work, "winbond-840 maximum events handled per interrupt");
157MODULE_PARM_DESC(debug, "winbond-840 debug level (0-6)");
158MODULE_PARM_DESC(rx_copybreak, "winbond-840 copy breakpoint for copy-only-tiny-frames");
159MODULE_PARM_DESC(multicast_filter_limit, "winbond-840 maximum number of filtered multicast addresses");
160MODULE_PARM_DESC(options, "winbond-840: Bits 0-3: media type, bit 17: full duplex");
161MODULE_PARM_DESC(full_duplex, "winbond-840 full duplex setting(s) (1)");
162
163/*
164 Theory of Operation
165
166I. Board Compatibility
167
168This driver is for the Winbond w89c840 chip.
169
170II. Board-specific settings
171
172None.
173
174III. Driver operation
175
176This chip is very similar to the Digital 21*4* "Tulip" family. The first
177twelve registers and the descriptor format are nearly identical. Read a
178Tulip manual for operational details.
179
180A significant difference is that the multicast filter and station address are
181stored in registers rather than loaded through a pseudo-transmit packet.
182
183Unlike the Tulip, transmit buffers are limited to 1KB. To transmit a
184full-sized packet we must use both data buffers in a descriptor. Thus the
185driver uses ring mode where descriptors are implicitly sequential in memory,
186rather than using the second descriptor address as a chain pointer to
187subsequent descriptors.
188
189IV. Notes
190
191If you are going to almost clone a Tulip, why not go all the way and avoid
192the need for a new driver?
193
194IVb. References
195
196http://www.scyld.com/expert/100mbps.html
197http://www.scyld.com/expert/NWay.html
198http://www.winbond.com.tw/
199
200IVc. Errata
201
202A horrible bug exists in the transmit FIFO. Apparently the chip doesn't
203correctly detect a full FIFO, and queuing more than 2048 bytes may result in
204silent data corruption.
205
206Test with 'ping -s 10000' on a fast computer.
207
208*/
209
f3b197ac 210
1da177e4
LT
211
212/*
213 PCI probe table.
214*/
1da177e4 215enum chip_capability_flags {
1f1bd5fc
JG
216 CanHaveMII=1, HasBrokenTx=2, AlwaysFDX=4, FDXOnNoMII=8,
217};
218
219static const struct pci_device_id w840_pci_tbl[] = {
1da177e4
LT
220 { 0x1050, 0x0840, PCI_ANY_ID, 0x8153, 0, 0, 0 },
221 { 0x1050, 0x0840, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
222 { 0x11f6, 0x2011, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 },
1f1bd5fc 223 { }
1da177e4
LT
224};
225MODULE_DEVICE_TABLE(pci, w840_pci_tbl);
226
c3d8e682
JG
227enum {
228 netdev_res_size = 128, /* size of PCI BAR resource */
229};
230
1da177e4
LT
231struct pci_id_info {
232 const char *name;
c3d8e682 233 int drv_flags; /* Driver use, intended as capability flags. */
1da177e4 234};
c3d8e682
JG
235
236static const struct pci_id_info pci_id_tbl[] __devinitdata = {
237 { /* Sometime a Level-One switch card. */
238 "Winbond W89c840", CanHaveMII | HasBrokenTx | FDXOnNoMII},
239 { "Winbond W89c840", CanHaveMII | HasBrokenTx},
240 { "Compex RL100-ATX", CanHaveMII | HasBrokenTx},
241 { } /* terminate list. */
1da177e4
LT
242};
243
244/* This driver was written to use PCI memory space, however some x86 systems
42eab567
GG
245 work only with I/O space accesses. See CONFIG_TULIP_MMIO in .config
246*/
1da177e4
LT
247
248/* Offsets to the Command and Status Registers, "CSRs".
249 While similar to the Tulip, these registers are longword aligned.
250 Note: It's not useful to define symbolic names for every register bit in
251 the device. The name can only partially document the semantics and make
252 the driver longer and more difficult to read.
253*/
254enum w840_offsets {
255 PCIBusCfg=0x00, TxStartDemand=0x04, RxStartDemand=0x08,
256 RxRingPtr=0x0C, TxRingPtr=0x10,
257 IntrStatus=0x14, NetworkConfig=0x18, IntrEnable=0x1C,
258 RxMissed=0x20, EECtrl=0x24, MIICtrl=0x24, BootRom=0x28, GPTimer=0x2C,
259 CurRxDescAddr=0x30, CurRxBufAddr=0x34, /* Debug use */
260 MulticastFilter0=0x38, MulticastFilter1=0x3C, StationAddr=0x40,
261 CurTxDescAddr=0x4C, CurTxBufAddr=0x50,
262};
263
1da177e4
LT
264/* Bits in the NetworkConfig register. */
265enum rx_mode_bits {
42eab567
GG
266 AcceptErr=0x80,
267 RxAcceptBroadcast=0x20, AcceptMulticast=0x10,
268 RxAcceptAllPhys=0x08, AcceptMyPhys=0x02,
1da177e4
LT
269};
270
271enum mii_reg_bits {
272 MDIO_ShiftClk=0x10000, MDIO_DataIn=0x80000, MDIO_DataOut=0x20000,
273 MDIO_EnbOutput=0x40000, MDIO_EnbIn = 0x00000,
274};
275
276/* The Tulip Rx and Tx buffer descriptors. */
277struct w840_rx_desc {
278 s32 status;
279 s32 length;
280 u32 buffer1;
281 u32 buffer2;
282};
283
284struct w840_tx_desc {
285 s32 status;
286 s32 length;
287 u32 buffer1, buffer2;
288};
289
1da177e4
LT
290#define MII_CNT 1 /* winbond only supports one MII */
291struct netdev_private {
292 struct w840_rx_desc *rx_ring;
293 dma_addr_t rx_addr[RX_RING_SIZE];
294 struct w840_tx_desc *tx_ring;
295 dma_addr_t tx_addr[TX_RING_SIZE];
296 dma_addr_t ring_dma_addr;
297 /* The addresses of receive-in-place skbuffs. */
298 struct sk_buff* rx_skbuff[RX_RING_SIZE];
299 /* The saved address of a sent-in-place packet/buffer, for later free(). */
300 struct sk_buff* tx_skbuff[TX_RING_SIZE];
301 struct net_device_stats stats;
302 struct timer_list timer; /* Media monitoring timer. */
303 /* Frequently used values: keep some adjacent for cache effect. */
304 spinlock_t lock;
305 int chip_id, drv_flags;
306 struct pci_dev *pci_dev;
307 int csr6;
308 struct w840_rx_desc *rx_head_desc;
309 unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
310 unsigned int rx_buf_sz; /* Based on MTU+slack. */
311 unsigned int cur_tx, dirty_tx;
312 unsigned int tx_q_bytes;
313 unsigned int tx_full; /* The Tx queue is full. */
314 /* MII transceiver section. */
315 int mii_cnt; /* MII device addresses. */
316 unsigned char phys[MII_CNT]; /* MII device addresses, but only the first is used */
317 u32 mii;
318 struct mii_if_info mii_if;
319 void __iomem *base_addr;
320};
321
322static int eeprom_read(void __iomem *ioaddr, int location);
323static int mdio_read(struct net_device *dev, int phy_id, int location);
324static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
325static int netdev_open(struct net_device *dev);
326static int update_link(struct net_device *dev);
327static void netdev_timer(unsigned long data);
328static void init_rxtx_rings(struct net_device *dev);
329static void free_rxtx_rings(struct netdev_private *np);
330static void init_registers(struct net_device *dev);
331static void tx_timeout(struct net_device *dev);
332static int alloc_ringdesc(struct net_device *dev);
333static void free_ringdesc(struct netdev_private *np);
334static int start_tx(struct sk_buff *skb, struct net_device *dev);
7d12e780 335static irqreturn_t intr_handler(int irq, void *dev_instance);
1da177e4
LT
336static void netdev_error(struct net_device *dev, int intr_status);
337static int netdev_rx(struct net_device *dev);
338static u32 __set_rx_mode(struct net_device *dev);
339static void set_rx_mode(struct net_device *dev);
340static struct net_device_stats *get_stats(struct net_device *dev);
341static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
7282d491 342static const struct ethtool_ops netdev_ethtool_ops;
1da177e4
LT
343static int netdev_close(struct net_device *dev);
344
f3b197ac 345
1da177e4
LT
346
347static int __devinit w840_probe1 (struct pci_dev *pdev,
348 const struct pci_device_id *ent)
349{
350 struct net_device *dev;
351 struct netdev_private *np;
352 static int find_cnt;
353 int chip_idx = ent->driver_data;
354 int irq;
355 int i, option = find_cnt < MAX_UNITS ? options[find_cnt] : 0;
356 void __iomem *ioaddr;
1da177e4
LT
357
358 i = pci_enable_device(pdev);
359 if (i) return i;
360
361 pci_set_master(pdev);
362
363 irq = pdev->irq;
364
10a87fcf 365 if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
1da177e4
LT
366 printk(KERN_WARNING "Winbond-840: Device %s disabled due to DMA limitations.\n",
367 pci_name(pdev));
368 return -EIO;
369 }
370 dev = alloc_etherdev(sizeof(*np));
371 if (!dev)
372 return -ENOMEM;
373 SET_MODULE_OWNER(dev);
374 SET_NETDEV_DEV(dev, &pdev->dev);
375
376 if (pci_request_regions(pdev, DRV_NAME))
377 goto err_out_netdev;
42eab567
GG
378
379 ioaddr = pci_iomap(pdev, TULIP_BAR, netdev_res_size);
1da177e4
LT
380 if (!ioaddr)
381 goto err_out_free_res;
382
383 for (i = 0; i < 3; i++)
384 ((u16 *)dev->dev_addr)[i] = le16_to_cpu(eeprom_read(ioaddr, i));
385
386 /* Reset the chip to erase previous misconfiguration.
387 No hold time required! */
388 iowrite32(0x00000001, ioaddr + PCIBusCfg);
389
390 dev->base_addr = (unsigned long)ioaddr;
391 dev->irq = irq;
392
393 np = netdev_priv(dev);
394 np->pci_dev = pdev;
395 np->chip_id = chip_idx;
396 np->drv_flags = pci_id_tbl[chip_idx].drv_flags;
397 spin_lock_init(&np->lock);
398 np->mii_if.dev = dev;
399 np->mii_if.mdio_read = mdio_read;
400 np->mii_if.mdio_write = mdio_write;
401 np->base_addr = ioaddr;
f3b197ac 402
1da177e4
LT
403 pci_set_drvdata(pdev, dev);
404
405 if (dev->mem_start)
406 option = dev->mem_start;
407
408 /* The lower four bits are the media type. */
409 if (option > 0) {
410 if (option & 0x200)
411 np->mii_if.full_duplex = 1;
412 if (option & 15)
413 printk(KERN_INFO "%s: ignoring user supplied media type %d",
414 dev->name, option & 15);
415 }
416 if (find_cnt < MAX_UNITS && full_duplex[find_cnt] > 0)
417 np->mii_if.full_duplex = 1;
418
419 if (np->mii_if.full_duplex)
420 np->mii_if.force_media = 1;
421
422 /* The chip-specific entries in the device structure. */
423 dev->open = &netdev_open;
424 dev->hard_start_xmit = &start_tx;
425 dev->stop = &netdev_close;
426 dev->get_stats = &get_stats;
427 dev->set_multicast_list = &set_rx_mode;
428 dev->do_ioctl = &netdev_ioctl;
429 dev->ethtool_ops = &netdev_ethtool_ops;
430 dev->tx_timeout = &tx_timeout;
431 dev->watchdog_timeo = TX_TIMEOUT;
432
433 i = register_netdev(dev);
434 if (i)
435 goto err_out_cleardev;
436
437 printk(KERN_INFO "%s: %s at %p, ",
438 dev->name, pci_id_tbl[chip_idx].name, ioaddr);
439 for (i = 0; i < 5; i++)
440 printk("%2.2x:", dev->dev_addr[i]);
441 printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], irq);
442
443 if (np->drv_flags & CanHaveMII) {
444 int phy, phy_idx = 0;
445 for (phy = 1; phy < 32 && phy_idx < MII_CNT; phy++) {
446 int mii_status = mdio_read(dev, phy, MII_BMSR);
447 if (mii_status != 0xffff && mii_status != 0x0000) {
448 np->phys[phy_idx++] = phy;
449 np->mii_if.advertising = mdio_read(dev, phy, MII_ADVERTISE);
450 np->mii = (mdio_read(dev, phy, MII_PHYSID1) << 16)+
451 mdio_read(dev, phy, MII_PHYSID2);
452 printk(KERN_INFO "%s: MII PHY %8.8xh found at address %d, status "
453 "0x%4.4x advertising %4.4x.\n",
454 dev->name, np->mii, phy, mii_status, np->mii_if.advertising);
455 }
456 }
457 np->mii_cnt = phy_idx;
458 np->mii_if.phy_id = np->phys[0];
459 if (phy_idx == 0) {
460 printk(KERN_WARNING "%s: MII PHY not found -- this device may "
461 "not operate correctly.\n", dev->name);
462 }
463 }
464
465 find_cnt++;
466 return 0;
467
468err_out_cleardev:
469 pci_set_drvdata(pdev, NULL);
470 pci_iounmap(pdev, ioaddr);
471err_out_free_res:
472 pci_release_regions(pdev);
473err_out_netdev:
474 free_netdev (dev);
475 return -ENODEV;
476}
477
f3b197ac 478
1da177e4
LT
479/* Read the EEPROM and MII Management Data I/O (MDIO) interfaces. These are
480 often serial bit streams generated by the host processor.
481 The example below is for the common 93c46 EEPROM, 64 16 bit words. */
482
483/* Delay between EEPROM clock transitions.
484 No extra delay is needed with 33Mhz PCI, but future 66Mhz access may need
485 a delay. Note that pre-2.0.34 kernels had a cache-alignment bug that
486 made udelay() unreliable.
487 The old method of using an ISA access as a delay, __SLOW_DOWN_IO__, is
488 depricated.
489*/
490#define eeprom_delay(ee_addr) ioread32(ee_addr)
491
492enum EEPROM_Ctrl_Bits {
493 EE_ShiftClk=0x02, EE_Write0=0x801, EE_Write1=0x805,
494 EE_ChipSelect=0x801, EE_DataIn=0x08,
495};
496
497/* The EEPROM commands include the alway-set leading bit. */
498enum EEPROM_Cmds {
499 EE_WriteCmd=(5 << 6), EE_ReadCmd=(6 << 6), EE_EraseCmd=(7 << 6),
500};
501
502static int eeprom_read(void __iomem *addr, int location)
503{
504 int i;
505 int retval = 0;
506 void __iomem *ee_addr = addr + EECtrl;
507 int read_cmd = location | EE_ReadCmd;
508 iowrite32(EE_ChipSelect, ee_addr);
509
510 /* Shift the read command bits out. */
511 for (i = 10; i >= 0; i--) {
512 short dataval = (read_cmd & (1 << i)) ? EE_Write1 : EE_Write0;
513 iowrite32(dataval, ee_addr);
514 eeprom_delay(ee_addr);
515 iowrite32(dataval | EE_ShiftClk, ee_addr);
516 eeprom_delay(ee_addr);
517 }
518 iowrite32(EE_ChipSelect, ee_addr);
519 eeprom_delay(ee_addr);
520
521 for (i = 16; i > 0; i--) {
522 iowrite32(EE_ChipSelect | EE_ShiftClk, ee_addr);
523 eeprom_delay(ee_addr);
524 retval = (retval << 1) | ((ioread32(ee_addr) & EE_DataIn) ? 1 : 0);
525 iowrite32(EE_ChipSelect, ee_addr);
526 eeprom_delay(ee_addr);
527 }
528
529 /* Terminate the EEPROM access. */
530 iowrite32(0, ee_addr);
531 return retval;
532}
533
534/* MII transceiver control section.
535 Read and write the MII registers using software-generated serial
536 MDIO protocol. See the MII specifications or DP83840A data sheet
537 for details.
538
539 The maximum data clock rate is 2.5 Mhz. The minimum timing is usually
540 met by back-to-back 33Mhz PCI cycles. */
541#define mdio_delay(mdio_addr) ioread32(mdio_addr)
542
543/* Set iff a MII transceiver on any interface requires mdio preamble.
544 This only set with older transceivers, so the extra
545 code size of a per-interface flag is not worthwhile. */
546static char mii_preamble_required = 1;
547
548#define MDIO_WRITE0 (MDIO_EnbOutput)
549#define MDIO_WRITE1 (MDIO_DataOut | MDIO_EnbOutput)
550
551/* Generate the preamble required for initial synchronization and
552 a few older transceivers. */
553static void mdio_sync(void __iomem *mdio_addr)
554{
555 int bits = 32;
556
557 /* Establish sync by sending at least 32 logic ones. */
558 while (--bits >= 0) {
559 iowrite32(MDIO_WRITE1, mdio_addr);
560 mdio_delay(mdio_addr);
561 iowrite32(MDIO_WRITE1 | MDIO_ShiftClk, mdio_addr);
562 mdio_delay(mdio_addr);
563 }
564}
565
566static int mdio_read(struct net_device *dev, int phy_id, int location)
567{
568 struct netdev_private *np = netdev_priv(dev);
569 void __iomem *mdio_addr = np->base_addr + MIICtrl;
570 int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
571 int i, retval = 0;
572
573 if (mii_preamble_required)
574 mdio_sync(mdio_addr);
575
576 /* Shift the read command bits out. */
577 for (i = 15; i >= 0; i--) {
578 int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
579
580 iowrite32(dataval, mdio_addr);
581 mdio_delay(mdio_addr);
582 iowrite32(dataval | MDIO_ShiftClk, mdio_addr);
583 mdio_delay(mdio_addr);
584 }
585 /* Read the two transition, 16 data, and wire-idle bits. */
586 for (i = 20; i > 0; i--) {
587 iowrite32(MDIO_EnbIn, mdio_addr);
588 mdio_delay(mdio_addr);
589 retval = (retval << 1) | ((ioread32(mdio_addr) & MDIO_DataIn) ? 1 : 0);
590 iowrite32(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
591 mdio_delay(mdio_addr);
592 }
593 return (retval>>1) & 0xffff;
594}
595
596static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
597{
598 struct netdev_private *np = netdev_priv(dev);
599 void __iomem *mdio_addr = np->base_addr + MIICtrl;
600 int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value;
601 int i;
602
603 if (location == 4 && phy_id == np->phys[0])
604 np->mii_if.advertising = value;
605
606 if (mii_preamble_required)
607 mdio_sync(mdio_addr);
608
609 /* Shift the command bits out. */
610 for (i = 31; i >= 0; i--) {
611 int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
612
613 iowrite32(dataval, mdio_addr);
614 mdio_delay(mdio_addr);
615 iowrite32(dataval | MDIO_ShiftClk, mdio_addr);
616 mdio_delay(mdio_addr);
617 }
618 /* Clear out extra bits. */
619 for (i = 2; i > 0; i--) {
620 iowrite32(MDIO_EnbIn, mdio_addr);
621 mdio_delay(mdio_addr);
622 iowrite32(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
623 mdio_delay(mdio_addr);
624 }
625 return;
626}
627
f3b197ac 628
1da177e4
LT
629static int netdev_open(struct net_device *dev)
630{
631 struct netdev_private *np = netdev_priv(dev);
632 void __iomem *ioaddr = np->base_addr;
633 int i;
634
635 iowrite32(0x00000001, ioaddr + PCIBusCfg); /* Reset */
636
637 netif_device_detach(dev);
1fb9df5d 638 i = request_irq(dev->irq, &intr_handler, IRQF_SHARED, dev->name, dev);
1da177e4
LT
639 if (i)
640 goto out_err;
641
642 if (debug > 1)
643 printk(KERN_DEBUG "%s: w89c840_open() irq %d.\n",
644 dev->name, dev->irq);
645
646 if((i=alloc_ringdesc(dev)))
647 goto out_err;
648
649 spin_lock_irq(&np->lock);
650 netif_device_attach(dev);
651 init_registers(dev);
652 spin_unlock_irq(&np->lock);
653
654 netif_start_queue(dev);
655 if (debug > 2)
656 printk(KERN_DEBUG "%s: Done netdev_open().\n", dev->name);
657
658 /* Set the timer to check for link beat. */
659 init_timer(&np->timer);
660 np->timer.expires = jiffies + 1*HZ;
661 np->timer.data = (unsigned long)dev;
662 np->timer.function = &netdev_timer; /* timer handler */
663 add_timer(&np->timer);
664 return 0;
665out_err:
666 netif_device_attach(dev);
667 return i;
668}
669
670#define MII_DAVICOM_DM9101 0x0181b800
671
672static int update_link(struct net_device *dev)
673{
674 struct netdev_private *np = netdev_priv(dev);
675 int duplex, fasteth, result, mii_reg;
676
677 /* BSMR */
678 mii_reg = mdio_read(dev, np->phys[0], MII_BMSR);
679
680 if (mii_reg == 0xffff)
681 return np->csr6;
682 /* reread: the link status bit is sticky */
683 mii_reg = mdio_read(dev, np->phys[0], MII_BMSR);
684 if (!(mii_reg & 0x4)) {
685 if (netif_carrier_ok(dev)) {
686 if (debug)
687 printk(KERN_INFO "%s: MII #%d reports no link. Disabling watchdog.\n",
688 dev->name, np->phys[0]);
689 netif_carrier_off(dev);
690 }
691 return np->csr6;
692 }
693 if (!netif_carrier_ok(dev)) {
694 if (debug)
695 printk(KERN_INFO "%s: MII #%d link is back. Enabling watchdog.\n",
696 dev->name, np->phys[0]);
697 netif_carrier_on(dev);
698 }
f3b197ac 699
1da177e4
LT
700 if ((np->mii & ~0xf) == MII_DAVICOM_DM9101) {
701 /* If the link partner doesn't support autonegotiation
702 * the MII detects it's abilities with the "parallel detection".
703 * Some MIIs update the LPA register to the result of the parallel
704 * detection, some don't.
705 * The Davicom PHY [at least 0181b800] doesn't.
706 * Instead bit 9 and 13 of the BMCR are updated to the result
707 * of the negotiation..
708 */
709 mii_reg = mdio_read(dev, np->phys[0], MII_BMCR);
710 duplex = mii_reg & BMCR_FULLDPLX;
711 fasteth = mii_reg & BMCR_SPEED100;
712 } else {
713 int negotiated;
714 mii_reg = mdio_read(dev, np->phys[0], MII_LPA);
715 negotiated = mii_reg & np->mii_if.advertising;
716
717 duplex = (negotiated & LPA_100FULL) || ((negotiated & 0x02C0) == LPA_10FULL);
718 fasteth = negotiated & 0x380;
719 }
720 duplex |= np->mii_if.force_media;
721 /* remove fastether and fullduplex */
722 result = np->csr6 & ~0x20000200;
723 if (duplex)
724 result |= 0x200;
725 if (fasteth)
726 result |= 0x20000000;
727 if (result != np->csr6 && debug)
728 printk(KERN_INFO "%s: Setting %dMBit-%s-duplex based on MII#%d\n",
f3b197ac 729 dev->name, fasteth ? 100 : 10,
1da177e4
LT
730 duplex ? "full" : "half", np->phys[0]);
731 return result;
732}
733
734#define RXTX_TIMEOUT 2000
735static inline void update_csr6(struct net_device *dev, int new)
736{
737 struct netdev_private *np = netdev_priv(dev);
738 void __iomem *ioaddr = np->base_addr;
739 int limit = RXTX_TIMEOUT;
740
741 if (!netif_device_present(dev))
742 new = 0;
743 if (new==np->csr6)
744 return;
745 /* stop both Tx and Rx processes */
746 iowrite32(np->csr6 & ~0x2002, ioaddr + NetworkConfig);
747 /* wait until they have really stopped */
748 for (;;) {
749 int csr5 = ioread32(ioaddr + IntrStatus);
750 int t;
751
752 t = (csr5 >> 17) & 0x07;
753 if (t==0||t==1) {
754 /* rx stopped */
755 t = (csr5 >> 20) & 0x07;
756 if (t==0||t==1)
757 break;
758 }
759
760 limit--;
761 if(!limit) {
762 printk(KERN_INFO "%s: couldn't stop rxtx, IntrStatus %xh.\n",
763 dev->name, csr5);
764 break;
765 }
766 udelay(1);
767 }
768 np->csr6 = new;
769 /* and restart them with the new configuration */
770 iowrite32(np->csr6, ioaddr + NetworkConfig);
771 if (new & 0x200)
772 np->mii_if.full_duplex = 1;
773}
774
775static void netdev_timer(unsigned long data)
776{
777 struct net_device *dev = (struct net_device *)data;
778 struct netdev_private *np = netdev_priv(dev);
779 void __iomem *ioaddr = np->base_addr;
780
781 if (debug > 2)
782 printk(KERN_DEBUG "%s: Media selection timer tick, status %8.8x "
783 "config %8.8x.\n",
784 dev->name, ioread32(ioaddr + IntrStatus),
785 ioread32(ioaddr + NetworkConfig));
786 spin_lock_irq(&np->lock);
787 update_csr6(dev, update_link(dev));
788 spin_unlock_irq(&np->lock);
789 np->timer.expires = jiffies + 10*HZ;
790 add_timer(&np->timer);
791}
792
793static void init_rxtx_rings(struct net_device *dev)
794{
795 struct netdev_private *np = netdev_priv(dev);
796 int i;
797
798 np->rx_head_desc = &np->rx_ring[0];
799 np->tx_ring = (struct w840_tx_desc*)&np->rx_ring[RX_RING_SIZE];
800
801 /* Initial all Rx descriptors. */
802 for (i = 0; i < RX_RING_SIZE; i++) {
803 np->rx_ring[i].length = np->rx_buf_sz;
804 np->rx_ring[i].status = 0;
805 np->rx_skbuff[i] = NULL;
806 }
807 /* Mark the last entry as wrapping the ring. */
808 np->rx_ring[i-1].length |= DescEndRing;
809
810 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
811 for (i = 0; i < RX_RING_SIZE; i++) {
812 struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
813 np->rx_skbuff[i] = skb;
814 if (skb == NULL)
815 break;
816 skb->dev = dev; /* Mark as being used by this device. */
689be439 817 np->rx_addr[i] = pci_map_single(np->pci_dev,skb->data,
bb02aacc 818 np->rx_buf_sz,PCI_DMA_FROMDEVICE);
1da177e4
LT
819
820 np->rx_ring[i].buffer1 = np->rx_addr[i];
42eab567 821 np->rx_ring[i].status = DescOwned;
1da177e4
LT
822 }
823
824 np->cur_rx = 0;
825 np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
826
827 /* Initialize the Tx descriptors */
828 for (i = 0; i < TX_RING_SIZE; i++) {
829 np->tx_skbuff[i] = NULL;
830 np->tx_ring[i].status = 0;
831 }
832 np->tx_full = 0;
833 np->tx_q_bytes = np->dirty_tx = np->cur_tx = 0;
834
835 iowrite32(np->ring_dma_addr, np->base_addr + RxRingPtr);
836 iowrite32(np->ring_dma_addr+sizeof(struct w840_rx_desc)*RX_RING_SIZE,
837 np->base_addr + TxRingPtr);
838
839}
840
841static void free_rxtx_rings(struct netdev_private* np)
842{
843 int i;
844 /* Free all the skbuffs in the Rx queue. */
845 for (i = 0; i < RX_RING_SIZE; i++) {
846 np->rx_ring[i].status = 0;
847 if (np->rx_skbuff[i]) {
848 pci_unmap_single(np->pci_dev,
849 np->rx_addr[i],
850 np->rx_skbuff[i]->len,
851 PCI_DMA_FROMDEVICE);
852 dev_kfree_skb(np->rx_skbuff[i]);
853 }
854 np->rx_skbuff[i] = NULL;
855 }
856 for (i = 0; i < TX_RING_SIZE; i++) {
857 if (np->tx_skbuff[i]) {
858 pci_unmap_single(np->pci_dev,
859 np->tx_addr[i],
860 np->tx_skbuff[i]->len,
861 PCI_DMA_TODEVICE);
862 dev_kfree_skb(np->tx_skbuff[i]);
863 }
864 np->tx_skbuff[i] = NULL;
865 }
866}
867
868static void init_registers(struct net_device *dev)
869{
870 struct netdev_private *np = netdev_priv(dev);
871 void __iomem *ioaddr = np->base_addr;
872 int i;
873
874 for (i = 0; i < 6; i++)
875 iowrite8(dev->dev_addr[i], ioaddr + StationAddr + i);
876
877 /* Initialize other registers. */
878#ifdef __BIG_ENDIAN
879 i = (1<<20); /* Big-endian descriptors */
880#else
881 i = 0;
882#endif
883 i |= (0x04<<2); /* skip length 4 u32 */
884 i |= 0x02; /* give Rx priority */
885
886 /* Configure the PCI bus bursts and FIFO thresholds.
887 486: Set 8 longword cache alignment, 8 longword burst.
888 586: Set 16 longword cache alignment, no burst limit.
889 Cache alignment bits 15:14 Burst length 13:8
890 0000 <not allowed> 0000 align to cache 0800 8 longwords
891 4000 8 longwords 0100 1 longword 1000 16 longwords
892 8000 16 longwords 0200 2 longwords 2000 32 longwords
893 C000 32 longwords 0400 4 longwords */
894
895#if defined (__i386__) && !defined(MODULE)
896 /* When not a module we can work around broken '486 PCI boards. */
897 if (boot_cpu_data.x86 <= 4) {
898 i |= 0x4800;
899 printk(KERN_INFO "%s: This is a 386/486 PCI system, setting cache "
900 "alignment to 8 longwords.\n", dev->name);
901 } else {
902 i |= 0xE000;
903 }
904#elif defined(__powerpc__) || defined(__i386__) || defined(__alpha__) || defined(__ia64__) || defined(__x86_64__)
905 i |= 0xE000;
42eab567 906#elif defined(__sparc__) || defined (CONFIG_PARISC)
1da177e4
LT
907 i |= 0x4800;
908#else
909#warning Processor architecture undefined
910 i |= 0x4800;
911#endif
912 iowrite32(i, ioaddr + PCIBusCfg);
913
914 np->csr6 = 0;
f3b197ac 915 /* 128 byte Tx threshold;
1da177e4
LT
916 Transmit on; Receive on; */
917 update_csr6(dev, 0x00022002 | update_link(dev) | __set_rx_mode(dev));
918
919 /* Clear and Enable interrupts by setting the interrupt mask. */
920 iowrite32(0x1A0F5, ioaddr + IntrStatus);
921 iowrite32(0x1A0F5, ioaddr + IntrEnable);
922
923 iowrite32(0, ioaddr + RxStartDemand);
924}
925
926static void tx_timeout(struct net_device *dev)
927{
928 struct netdev_private *np = netdev_priv(dev);
929 void __iomem *ioaddr = np->base_addr;
930
931 printk(KERN_WARNING "%s: Transmit timed out, status %8.8x,"
932 " resetting...\n", dev->name, ioread32(ioaddr + IntrStatus));
933
934 {
935 int i;
936 printk(KERN_DEBUG " Rx ring %p: ", np->rx_ring);
937 for (i = 0; i < RX_RING_SIZE; i++)
938 printk(" %8.8x", (unsigned int)np->rx_ring[i].status);
939 printk("\n"KERN_DEBUG" Tx ring %p: ", np->tx_ring);
940 for (i = 0; i < TX_RING_SIZE; i++)
941 printk(" %8.8x", np->tx_ring[i].status);
942 printk("\n");
943 }
944 printk(KERN_DEBUG "Tx cur %d Tx dirty %d Tx Full %d, q bytes %d.\n",
945 np->cur_tx, np->dirty_tx, np->tx_full, np->tx_q_bytes);
946 printk(KERN_DEBUG "Tx Descriptor addr %xh.\n",ioread32(ioaddr+0x4C));
947
948 disable_irq(dev->irq);
949 spin_lock_irq(&np->lock);
950 /*
951 * Under high load dirty_tx and the internal tx descriptor pointer
952 * come out of sync, thus perform a software reset and reinitialize
953 * everything.
954 */
955
956 iowrite32(1, np->base_addr+PCIBusCfg);
957 udelay(1);
958
959 free_rxtx_rings(np);
960 init_rxtx_rings(dev);
961 init_registers(dev);
962 spin_unlock_irq(&np->lock);
963 enable_irq(dev->irq);
964
965 netif_wake_queue(dev);
966 dev->trans_start = jiffies;
967 np->stats.tx_errors++;
968 return;
969}
970
971/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
972static int alloc_ringdesc(struct net_device *dev)
973{
974 struct netdev_private *np = netdev_priv(dev);
975
976 np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
977
978 np->rx_ring = pci_alloc_consistent(np->pci_dev,
979 sizeof(struct w840_rx_desc)*RX_RING_SIZE +
980 sizeof(struct w840_tx_desc)*TX_RING_SIZE,
981 &np->ring_dma_addr);
982 if(!np->rx_ring)
983 return -ENOMEM;
984 init_rxtx_rings(dev);
985 return 0;
986}
987
988static void free_ringdesc(struct netdev_private *np)
989{
990 pci_free_consistent(np->pci_dev,
991 sizeof(struct w840_rx_desc)*RX_RING_SIZE +
992 sizeof(struct w840_tx_desc)*TX_RING_SIZE,
993 np->rx_ring, np->ring_dma_addr);
994
995}
996
997static int start_tx(struct sk_buff *skb, struct net_device *dev)
998{
999 struct netdev_private *np = netdev_priv(dev);
1000 unsigned entry;
1001
1002 /* Caution: the write order is important here, set the field
1003 with the "ownership" bits last. */
1004
1005 /* Calculate the next Tx descriptor entry. */
1006 entry = np->cur_tx % TX_RING_SIZE;
1007
1008 np->tx_addr[entry] = pci_map_single(np->pci_dev,
1009 skb->data,skb->len, PCI_DMA_TODEVICE);
1010 np->tx_skbuff[entry] = skb;
1011
1012 np->tx_ring[entry].buffer1 = np->tx_addr[entry];
1013 if (skb->len < TX_BUFLIMIT) {
1014 np->tx_ring[entry].length = DescWholePkt | skb->len;
1015 } else {
1016 int len = skb->len - TX_BUFLIMIT;
1017
1018 np->tx_ring[entry].buffer2 = np->tx_addr[entry]+TX_BUFLIMIT;
1019 np->tx_ring[entry].length = DescWholePkt | (len << 11) | TX_BUFLIMIT;
1020 }
1021 if(entry == TX_RING_SIZE-1)
1022 np->tx_ring[entry].length |= DescEndRing;
1023
1024 /* Now acquire the irq spinlock.
1025 * The difficult race is the the ordering between
42eab567 1026 * increasing np->cur_tx and setting DescOwned:
1da177e4
LT
1027 * - if np->cur_tx is increased first the interrupt
1028 * handler could consider the packet as transmitted
42eab567
GG
1029 * since DescOwned is cleared.
1030 * - If DescOwned is set first the NIC could report the
1da177e4
LT
1031 * packet as sent, but the interrupt handler would ignore it
1032 * since the np->cur_tx was not yet increased.
1033 */
1034 spin_lock_irq(&np->lock);
1035 np->cur_tx++;
1036
1037 wmb(); /* flush length, buffer1, buffer2 */
42eab567 1038 np->tx_ring[entry].status = DescOwned;
1da177e4
LT
1039 wmb(); /* flush status and kick the hardware */
1040 iowrite32(0, np->base_addr + TxStartDemand);
1041 np->tx_q_bytes += skb->len;
1042 /* Work around horrible bug in the chip by marking the queue as full
1043 when we do not have FIFO room for a maximum sized packet. */
1044 if (np->cur_tx - np->dirty_tx > TX_QUEUE_LEN ||
1045 ((np->drv_flags & HasBrokenTx) && np->tx_q_bytes > TX_BUG_FIFO_LIMIT)) {
1046 netif_stop_queue(dev);
1047 wmb();
1048 np->tx_full = 1;
1049 }
1050 spin_unlock_irq(&np->lock);
1051
1052 dev->trans_start = jiffies;
1053
1054 if (debug > 4) {
1055 printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n",
1056 dev->name, np->cur_tx, entry);
1057 }
1058 return 0;
1059}
1060
1061static void netdev_tx_done(struct net_device *dev)
1062{
1063 struct netdev_private *np = netdev_priv(dev);
1064 for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1065 int entry = np->dirty_tx % TX_RING_SIZE;
1066 int tx_status = np->tx_ring[entry].status;
1067
1068 if (tx_status < 0)
1069 break;
1070 if (tx_status & 0x8000) { /* There was an error, log it. */
1071#ifndef final_version
1072 if (debug > 1)
1073 printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
1074 dev->name, tx_status);
1075#endif
1076 np->stats.tx_errors++;
1077 if (tx_status & 0x0104) np->stats.tx_aborted_errors++;
1078 if (tx_status & 0x0C80) np->stats.tx_carrier_errors++;
1079 if (tx_status & 0x0200) np->stats.tx_window_errors++;
1080 if (tx_status & 0x0002) np->stats.tx_fifo_errors++;
1081 if ((tx_status & 0x0080) && np->mii_if.full_duplex == 0)
1082 np->stats.tx_heartbeat_errors++;
1083 } else {
1084#ifndef final_version
1085 if (debug > 3)
1086 printk(KERN_DEBUG "%s: Transmit slot %d ok, Tx status %8.8x.\n",
1087 dev->name, entry, tx_status);
1088#endif
1089 np->stats.tx_bytes += np->tx_skbuff[entry]->len;
1090 np->stats.collisions += (tx_status >> 3) & 15;
1091 np->stats.tx_packets++;
1092 }
1093 /* Free the original skb. */
1094 pci_unmap_single(np->pci_dev,np->tx_addr[entry],
1095 np->tx_skbuff[entry]->len,
1096 PCI_DMA_TODEVICE);
1097 np->tx_q_bytes -= np->tx_skbuff[entry]->len;
1098 dev_kfree_skb_irq(np->tx_skbuff[entry]);
1099 np->tx_skbuff[entry] = NULL;
1100 }
1101 if (np->tx_full &&
1102 np->cur_tx - np->dirty_tx < TX_QUEUE_LEN_RESTART &&
1103 np->tx_q_bytes < TX_BUG_FIFO_LIMIT) {
1104 /* The ring is no longer full, clear tbusy. */
1105 np->tx_full = 0;
1106 wmb();
1107 netif_wake_queue(dev);
1108 }
1109}
1110
1111/* The interrupt handler does all of the Rx thread work and cleans up
1112 after the Tx thread. */
7d12e780 1113static irqreturn_t intr_handler(int irq, void *dev_instance)
1da177e4
LT
1114{
1115 struct net_device *dev = (struct net_device *)dev_instance;
1116 struct netdev_private *np = netdev_priv(dev);
1117 void __iomem *ioaddr = np->base_addr;
1118 int work_limit = max_interrupt_work;
1119 int handled = 0;
1120
1121 if (!netif_device_present(dev))
1122 return IRQ_NONE;
1123 do {
1124 u32 intr_status = ioread32(ioaddr + IntrStatus);
1125
1126 /* Acknowledge all of the current interrupt sources ASAP. */
1127 iowrite32(intr_status & 0x001ffff, ioaddr + IntrStatus);
1128
1129 if (debug > 4)
1130 printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n",
1131 dev->name, intr_status);
1132
1133 if ((intr_status & (NormalIntr|AbnormalIntr)) == 0)
1134 break;
1135
1136 handled = 1;
1137
42eab567 1138 if (intr_status & (RxIntr | RxNoBuf))
1da177e4
LT
1139 netdev_rx(dev);
1140 if (intr_status & RxNoBuf)
1141 iowrite32(0, ioaddr + RxStartDemand);
1142
42eab567 1143 if (intr_status & (TxNoBuf | TxIntr) &&
1da177e4
LT
1144 np->cur_tx != np->dirty_tx) {
1145 spin_lock(&np->lock);
1146 netdev_tx_done(dev);
1147 spin_unlock(&np->lock);
1148 }
1149
1150 /* Abnormal error summary/uncommon events handlers. */
42eab567
GG
1151 if (intr_status & (AbnormalIntr | TxFIFOUnderflow | SytemError |
1152 TimerInt | TxDied))
1da177e4
LT
1153 netdev_error(dev, intr_status);
1154
1155 if (--work_limit < 0) {
1156 printk(KERN_WARNING "%s: Too much work at interrupt, "
1157 "status=0x%4.4x.\n", dev->name, intr_status);
1158 /* Set the timer to re-enable the other interrupts after
1159 10*82usec ticks. */
1160 spin_lock(&np->lock);
1161 if (netif_device_present(dev)) {
1162 iowrite32(AbnormalIntr | TimerInt, ioaddr + IntrEnable);
1163 iowrite32(10, ioaddr + GPTimer);
1164 }
1165 spin_unlock(&np->lock);
1166 break;
1167 }
1168 } while (1);
1169
1170 if (debug > 3)
1171 printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
1172 dev->name, ioread32(ioaddr + IntrStatus));
1173 return IRQ_RETVAL(handled);
1174}
1175
1176/* This routine is logically part of the interrupt handler, but separated
1177 for clarity and better register allocation. */
1178static int netdev_rx(struct net_device *dev)
1179{
1180 struct netdev_private *np = netdev_priv(dev);
1181 int entry = np->cur_rx % RX_RING_SIZE;
1182 int work_limit = np->dirty_rx + RX_RING_SIZE - np->cur_rx;
1183
1184 if (debug > 4) {
1185 printk(KERN_DEBUG " In netdev_rx(), entry %d status %4.4x.\n",
1186 entry, np->rx_ring[entry].status);
1187 }
1188
1189 /* If EOP is set on the next entry, it's a new packet. Send it up. */
1190 while (--work_limit >= 0) {
1191 struct w840_rx_desc *desc = np->rx_head_desc;
1192 s32 status = desc->status;
1193
1194 if (debug > 4)
1195 printk(KERN_DEBUG " netdev_rx() status was %8.8x.\n",
1196 status);
1197 if (status < 0)
1198 break;
1199 if ((status & 0x38008300) != 0x0300) {
1200 if ((status & 0x38000300) != 0x0300) {
1201 /* Ingore earlier buffers. */
1202 if ((status & 0xffff) != 0x7fff) {
1203 printk(KERN_WARNING "%s: Oversized Ethernet frame spanned "
1204 "multiple buffers, entry %#x status %4.4x!\n",
1205 dev->name, np->cur_rx, status);
1206 np->stats.rx_length_errors++;
1207 }
1208 } else if (status & 0x8000) {
1209 /* There was a fatal error. */
1210 if (debug > 2)
1211 printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n",
1212 dev->name, status);
1213 np->stats.rx_errors++; /* end of a packet.*/
1214 if (status & 0x0890) np->stats.rx_length_errors++;
1215 if (status & 0x004C) np->stats.rx_frame_errors++;
1216 if (status & 0x0002) np->stats.rx_crc_errors++;
1217 }
1218 } else {
1219 struct sk_buff *skb;
1220 /* Omit the four octet CRC from the length. */
1221 int pkt_len = ((status >> 16) & 0x7ff) - 4;
1222
1223#ifndef final_version
1224 if (debug > 4)
1225 printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d"
1226 " status %x.\n", pkt_len, status);
1227#endif
1228 /* Check if the packet is long enough to accept without copying
1229 to a minimally-sized skbuff. */
1230 if (pkt_len < rx_copybreak
1231 && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1232 skb->dev = dev;
1233 skb_reserve(skb, 2); /* 16 byte align the IP header */
1234 pci_dma_sync_single_for_cpu(np->pci_dev,np->rx_addr[entry],
1235 np->rx_skbuff[entry]->len,
1236 PCI_DMA_FROMDEVICE);
689be439 1237 eth_copy_and_sum(skb, np->rx_skbuff[entry]->data, pkt_len, 0);
1da177e4
LT
1238 skb_put(skb, pkt_len);
1239 pci_dma_sync_single_for_device(np->pci_dev,np->rx_addr[entry],
1240 np->rx_skbuff[entry]->len,
1241 PCI_DMA_FROMDEVICE);
1242 } else {
1243 pci_unmap_single(np->pci_dev,np->rx_addr[entry],
1244 np->rx_skbuff[entry]->len,
1245 PCI_DMA_FROMDEVICE);
1246 skb_put(skb = np->rx_skbuff[entry], pkt_len);
1247 np->rx_skbuff[entry] = NULL;
1248 }
1249#ifndef final_version /* Remove after testing. */
1250 /* You will want this info for the initial debug. */
1251 if (debug > 5)
1252 printk(KERN_DEBUG " Rx data %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:"
1253 "%2.2x %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x %2.2x%2.2x "
1254 "%d.%d.%d.%d.\n",
1255 skb->data[0], skb->data[1], skb->data[2], skb->data[3],
1256 skb->data[4], skb->data[5], skb->data[6], skb->data[7],
1257 skb->data[8], skb->data[9], skb->data[10],
1258 skb->data[11], skb->data[12], skb->data[13],
1259 skb->data[14], skb->data[15], skb->data[16],
1260 skb->data[17]);
1261#endif
1262 skb->protocol = eth_type_trans(skb, dev);
1263 netif_rx(skb);
1264 dev->last_rx = jiffies;
1265 np->stats.rx_packets++;
1266 np->stats.rx_bytes += pkt_len;
1267 }
1268 entry = (++np->cur_rx) % RX_RING_SIZE;
1269 np->rx_head_desc = &np->rx_ring[entry];
1270 }
1271
1272 /* Refill the Rx ring buffers. */
1273 for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
1274 struct sk_buff *skb;
1275 entry = np->dirty_rx % RX_RING_SIZE;
1276 if (np->rx_skbuff[entry] == NULL) {
1277 skb = dev_alloc_skb(np->rx_buf_sz);
1278 np->rx_skbuff[entry] = skb;
1279 if (skb == NULL)
1280 break; /* Better luck next round. */
1281 skb->dev = dev; /* Mark as being used by this device. */
1282 np->rx_addr[entry] = pci_map_single(np->pci_dev,
689be439 1283 skb->data,
bb02aacc 1284 np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1da177e4
LT
1285 np->rx_ring[entry].buffer1 = np->rx_addr[entry];
1286 }
1287 wmb();
42eab567 1288 np->rx_ring[entry].status = DescOwned;
1da177e4
LT
1289 }
1290
1291 return 0;
1292}
1293
1294static void netdev_error(struct net_device *dev, int intr_status)
1295{
1296 struct netdev_private *np = netdev_priv(dev);
1297 void __iomem *ioaddr = np->base_addr;
1298
1299 if (debug > 2)
1300 printk(KERN_DEBUG "%s: Abnormal event, %8.8x.\n",
1301 dev->name, intr_status);
1302 if (intr_status == 0xffffffff)
1303 return;
1304 spin_lock(&np->lock);
1305 if (intr_status & TxFIFOUnderflow) {
1306 int new;
1307 /* Bump up the Tx threshold */
1308#if 0
1309 /* This causes lots of dropped packets,
1310 * and under high load even tx_timeouts
1311 */
1312 new = np->csr6 + 0x4000;
1313#else
1314 new = (np->csr6 >> 14)&0x7f;
1315 if (new < 64)
1316 new *= 2;
1317 else
1318 new = 127; /* load full packet before starting */
1319 new = (np->csr6 & ~(0x7F << 14)) | (new<<14);
1320#endif
1321 printk(KERN_DEBUG "%s: Tx underflow, new csr6 %8.8x.\n",
1322 dev->name, new);
1323 update_csr6(dev, new);
1324 }
42eab567 1325 if (intr_status & RxDied) { /* Missed a Rx frame. */
1da177e4
LT
1326 np->stats.rx_errors++;
1327 }
1328 if (intr_status & TimerInt) {
1329 /* Re-enable other interrupts. */
1330 if (netif_device_present(dev))
1331 iowrite32(0x1A0F5, ioaddr + IntrEnable);
1332 }
1333 np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff;
1334 iowrite32(0, ioaddr + RxStartDemand);
1335 spin_unlock(&np->lock);
1336}
1337
1338static struct net_device_stats *get_stats(struct net_device *dev)
1339{
1340 struct netdev_private *np = netdev_priv(dev);
1341 void __iomem *ioaddr = np->base_addr;
1342
1343 /* The chip only need report frame silently dropped. */
1344 spin_lock_irq(&np->lock);
1345 if (netif_running(dev) && netif_device_present(dev))
1346 np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff;
1347 spin_unlock_irq(&np->lock);
1348
1349 return &np->stats;
1350}
1351
1352
1353static u32 __set_rx_mode(struct net_device *dev)
1354{
1355 struct netdev_private *np = netdev_priv(dev);
1356 void __iomem *ioaddr = np->base_addr;
1357 u32 mc_filter[2]; /* Multicast hash filter */
1358 u32 rx_mode;
1359
1360 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1da177e4 1361 memset(mc_filter, 0xff, sizeof(mc_filter));
42eab567 1362 rx_mode = RxAcceptBroadcast | AcceptMulticast | RxAcceptAllPhys
1da177e4
LT
1363 | AcceptMyPhys;
1364 } else if ((dev->mc_count > multicast_filter_limit)
1365 || (dev->flags & IFF_ALLMULTI)) {
1366 /* Too many to match, or accept all multicasts. */
1367 memset(mc_filter, 0xff, sizeof(mc_filter));
42eab567 1368 rx_mode = RxAcceptBroadcast | AcceptMulticast | AcceptMyPhys;
1da177e4
LT
1369 } else {
1370 struct dev_mc_list *mclist;
1371 int i;
1372 memset(mc_filter, 0, sizeof(mc_filter));
1373 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
1374 i++, mclist = mclist->next) {
1375 int filterbit = (ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26) ^ 0x3F;
1376 filterbit &= 0x3f;
1377 mc_filter[filterbit >> 5] |= 1 << (filterbit & 31);
1378 }
42eab567 1379 rx_mode = RxAcceptBroadcast | AcceptMulticast | AcceptMyPhys;
1da177e4
LT
1380 }
1381 iowrite32(mc_filter[0], ioaddr + MulticastFilter0);
1382 iowrite32(mc_filter[1], ioaddr + MulticastFilter1);
1383 return rx_mode;
1384}
1385
1386static void set_rx_mode(struct net_device *dev)
1387{
1388 struct netdev_private *np = netdev_priv(dev);
1389 u32 rx_mode = __set_rx_mode(dev);
1390 spin_lock_irq(&np->lock);
1391 update_csr6(dev, (np->csr6 & ~0x00F8) | rx_mode);
1392 spin_unlock_irq(&np->lock);
1393}
1394
1395static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1396{
1397 struct netdev_private *np = netdev_priv(dev);
1398
1399 strcpy (info->driver, DRV_NAME);
1400 strcpy (info->version, DRV_VERSION);
1401 strcpy (info->bus_info, pci_name(np->pci_dev));
1402}
1403
1404static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1405{
1406 struct netdev_private *np = netdev_priv(dev);
1407 int rc;
1408
1409 spin_lock_irq(&np->lock);
1410 rc = mii_ethtool_gset(&np->mii_if, cmd);
1411 spin_unlock_irq(&np->lock);
1412
1413 return rc;
1414}
1415
1416static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1417{
1418 struct netdev_private *np = netdev_priv(dev);
1419 int rc;
1420
1421 spin_lock_irq(&np->lock);
1422 rc = mii_ethtool_sset(&np->mii_if, cmd);
1423 spin_unlock_irq(&np->lock);
1424
1425 return rc;
1426}
1427
1428static int netdev_nway_reset(struct net_device *dev)
1429{
1430 struct netdev_private *np = netdev_priv(dev);
1431 return mii_nway_restart(&np->mii_if);
1432}
1433
1434static u32 netdev_get_link(struct net_device *dev)
1435{
1436 struct netdev_private *np = netdev_priv(dev);
1437 return mii_link_ok(&np->mii_if);
1438}
1439
1440static u32 netdev_get_msglevel(struct net_device *dev)
1441{
1442 return debug;
1443}
1444
1445static void netdev_set_msglevel(struct net_device *dev, u32 value)
1446{
1447 debug = value;
1448}
1449
7282d491 1450static const struct ethtool_ops netdev_ethtool_ops = {
1da177e4
LT
1451 .get_drvinfo = netdev_get_drvinfo,
1452 .get_settings = netdev_get_settings,
1453 .set_settings = netdev_set_settings,
1454 .nway_reset = netdev_nway_reset,
1455 .get_link = netdev_get_link,
1456 .get_msglevel = netdev_get_msglevel,
1457 .set_msglevel = netdev_set_msglevel,
1458 .get_sg = ethtool_op_get_sg,
1459 .get_tx_csum = ethtool_op_get_tx_csum,
1460};
1461
1462static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1463{
1464 struct mii_ioctl_data *data = if_mii(rq);
1465 struct netdev_private *np = netdev_priv(dev);
1466
1467 switch(cmd) {
1468 case SIOCGMIIPHY: /* Get address of MII PHY in use. */
1469 data->phy_id = ((struct netdev_private *)netdev_priv(dev))->phys[0] & 0x1f;
1470 /* Fall Through */
1471
1472 case SIOCGMIIREG: /* Read MII PHY register. */
1473 spin_lock_irq(&np->lock);
1474 data->val_out = mdio_read(dev, data->phy_id & 0x1f, data->reg_num & 0x1f);
1475 spin_unlock_irq(&np->lock);
1476 return 0;
1477
1478 case SIOCSMIIREG: /* Write MII PHY register. */
1479 if (!capable(CAP_NET_ADMIN))
1480 return -EPERM;
1481 spin_lock_irq(&np->lock);
1482 mdio_write(dev, data->phy_id & 0x1f, data->reg_num & 0x1f, data->val_in);
1483 spin_unlock_irq(&np->lock);
1484 return 0;
1485 default:
1486 return -EOPNOTSUPP;
1487 }
1488}
1489
1490static int netdev_close(struct net_device *dev)
1491{
1492 struct netdev_private *np = netdev_priv(dev);
1493 void __iomem *ioaddr = np->base_addr;
1494
1495 netif_stop_queue(dev);
1496
1497 if (debug > 1) {
1498 printk(KERN_DEBUG "%s: Shutting down ethercard, status was %8.8x "
1499 "Config %8.8x.\n", dev->name, ioread32(ioaddr + IntrStatus),
1500 ioread32(ioaddr + NetworkConfig));
1501 printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
1502 dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx);
1503 }
1504
1505 /* Stop the chip's Tx and Rx processes. */
1506 spin_lock_irq(&np->lock);
1507 netif_device_detach(dev);
1508 update_csr6(dev, 0);
1509 iowrite32(0x0000, ioaddr + IntrEnable);
1510 spin_unlock_irq(&np->lock);
1511
1512 free_irq(dev->irq, dev);
1513 wmb();
1514 netif_device_attach(dev);
1515
1516 if (ioread32(ioaddr + NetworkConfig) != 0xffffffff)
1517 np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff;
1518
1519#ifdef __i386__
1520 if (debug > 2) {
1521 int i;
1522
1523 printk(KERN_DEBUG" Tx ring at %8.8x:\n",
1524 (int)np->tx_ring);
1525 for (i = 0; i < TX_RING_SIZE; i++)
1526 printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x.\n",
1527 i, np->tx_ring[i].length,
1528 np->tx_ring[i].status, np->tx_ring[i].buffer1);
1529 printk("\n"KERN_DEBUG " Rx ring %8.8x:\n",
1530 (int)np->rx_ring);
1531 for (i = 0; i < RX_RING_SIZE; i++) {
1532 printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x\n",
1533 i, np->rx_ring[i].length,
1534 np->rx_ring[i].status, np->rx_ring[i].buffer1);
1535 }
1536 }
1537#endif /* __i386__ debugging only */
1538
1539 del_timer_sync(&np->timer);
1540
1541 free_rxtx_rings(np);
1542 free_ringdesc(np);
1543
1544 return 0;
1545}
1546
1547static void __devexit w840_remove1 (struct pci_dev *pdev)
1548{
1549 struct net_device *dev = pci_get_drvdata(pdev);
f3b197ac 1550
1da177e4
LT
1551 if (dev) {
1552 struct netdev_private *np = netdev_priv(dev);
1553 unregister_netdev(dev);
1554 pci_release_regions(pdev);
1555 pci_iounmap(pdev, np->base_addr);
1556 free_netdev(dev);
1557 }
1558
1559 pci_set_drvdata(pdev, NULL);
1560}
1561
1562#ifdef CONFIG_PM
1563
1564/*
1565 * suspend/resume synchronization:
1566 * - open, close, do_ioctl:
1567 * rtnl_lock, & netif_device_detach after the rtnl_unlock.
1568 * - get_stats:
1569 * spin_lock_irq(np->lock), doesn't touch hw if not present
1570 * - hard_start_xmit:
932ff279 1571 * synchronize_irq + netif_tx_disable;
1da177e4 1572 * - tx_timeout:
932ff279 1573 * netif_device_detach + netif_tx_disable;
1da177e4 1574 * - set_multicast_list
932ff279 1575 * netif_device_detach + netif_tx_disable;
1da177e4
LT
1576 * - interrupt handler
1577 * doesn't touch hw if not present, synchronize_irq waits for
1578 * running instances of the interrupt handler.
1579 *
1580 * Disabling hw requires clearing csr6 & IntrEnable.
1581 * update_csr6 & all function that write IntrEnable check netif_device_present
1582 * before settings any bits.
1583 *
1584 * Detach must occur under spin_unlock_irq(), interrupts from a detached
1585 * device would cause an irq storm.
1586 */
05adc3b7 1587static int w840_suspend (struct pci_dev *pdev, pm_message_t state)
1da177e4
LT
1588{
1589 struct net_device *dev = pci_get_drvdata (pdev);
1590 struct netdev_private *np = netdev_priv(dev);
1591 void __iomem *ioaddr = np->base_addr;
1592
1593 rtnl_lock();
1594 if (netif_running (dev)) {
1595 del_timer_sync(&np->timer);
1596
1597 spin_lock_irq(&np->lock);
1598 netif_device_detach(dev);
1599 update_csr6(dev, 0);
1600 iowrite32(0, ioaddr + IntrEnable);
1da177e4
LT
1601 spin_unlock_irq(&np->lock);
1602
1da177e4 1603 synchronize_irq(dev->irq);
932ff279 1604 netif_tx_disable(dev);
6aa20a22 1605
1da177e4
LT
1606 np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff;
1607
1608 /* no more hardware accesses behind this line. */
1609
cca4aa83 1610 BUG_ON(np->csr6);
1da177e4
LT
1611 if (ioread32(ioaddr + IntrEnable)) BUG();
1612
1613 /* pci_power_off(pdev, -1); */
1614
1615 free_rxtx_rings(np);
1616 } else {
1617 netif_device_detach(dev);
1618 }
1619 rtnl_unlock();
1620 return 0;
1621}
1622
1623static int w840_resume (struct pci_dev *pdev)
1624{
1625 struct net_device *dev = pci_get_drvdata (pdev);
1626 struct netdev_private *np = netdev_priv(dev);
9f486ae1 1627 int retval = 0;
1da177e4
LT
1628
1629 rtnl_lock();
1630 if (netif_device_present(dev))
1631 goto out; /* device not suspended */
1632 if (netif_running(dev)) {
9f486ae1
VH
1633 if ((retval = pci_enable_device(pdev))) {
1634 printk (KERN_ERR
1635 "%s: pci_enable_device failed in resume\n",
1636 dev->name);
1637 goto out;
1638 }
1da177e4
LT
1639 spin_lock_irq(&np->lock);
1640 iowrite32(1, np->base_addr+PCIBusCfg);
1641 ioread32(np->base_addr+PCIBusCfg);
1642 udelay(1);
1643 netif_device_attach(dev);
1644 init_rxtx_rings(dev);
1645 init_registers(dev);
1646 spin_unlock_irq(&np->lock);
1647
1648 netif_wake_queue(dev);
1649
1650 mod_timer(&np->timer, jiffies + 1*HZ);
1651 } else {
1652 netif_device_attach(dev);
1653 }
1654out:
1655 rtnl_unlock();
9f486ae1 1656 return retval;
1da177e4
LT
1657}
1658#endif
1659
1660static struct pci_driver w840_driver = {
1661 .name = DRV_NAME,
1662 .id_table = w840_pci_tbl,
1663 .probe = w840_probe1,
1664 .remove = __devexit_p(w840_remove1),
1665#ifdef CONFIG_PM
1666 .suspend = w840_suspend,
1667 .resume = w840_resume,
1668#endif
1669};
1670
1671static int __init w840_init(void)
1672{
1673 printk(version);
29917620 1674 return pci_register_driver(&w840_driver);
1da177e4
LT
1675}
1676
1677static void __exit w840_exit(void)
1678{
1679 pci_unregister_driver(&w840_driver);
1680}
1681
1682module_init(w840_init);
1683module_exit(w840_exit);