]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/net/tulip/winbond-840.c
Linux-2.6.12-rc2
[mirror_ubuntu-artful-kernel.git] / drivers / net / tulip / winbond-840.c
1 /* winbond-840.c: A Linux PCI network adapter device driver. */
2 /*
3 Written 1998-2001 by Donald Becker.
4
5 This software may be used and distributed according to the terms of
6 the GNU General Public License (GPL), incorporated herein by reference.
7 Drivers based on or derived from this code fall under the GPL and must
8 retain the authorship, copyright and license notice. This file is not
9 a complete program and may only be used when the entire operating
10 system is licensed under the GPL.
11
12 The author may be reached as becker@scyld.com, or C/O
13 Scyld Computing Corporation
14 410 Severn Ave., Suite 210
15 Annapolis MD 21403
16
17 Support and updates available at
18 http://www.scyld.com/network/drivers.html
19
20 Do not remove the copyright information.
21 Do not change the version information unless an improvement has been made.
22 Merely removing my name, as Compex has done in the past, does not count
23 as an improvement.
24
25 Changelog:
26 * ported to 2.4
27 ???
28 * spin lock update, memory barriers, new style dma mappings
29 limit each tx buffer to < 1024 bytes
30 remove DescIntr from Rx descriptors (that's an Tx flag)
31 remove next pointer from Tx descriptors
32 synchronize tx_q_bytes
33 software reset in tx_timeout
34 Copyright (C) 2000 Manfred Spraul
35 * further cleanups
36 power management.
37 support for big endian descriptors
38 Copyright (C) 2001 Manfred Spraul
39 * ethtool support (jgarzik)
40 * Replace some MII-related magic numbers with constants (jgarzik)
41
42 TODO:
43 * enable pci_power_off
44 * Wake-On-LAN
45 */
46
47 #define DRV_NAME "winbond-840"
48 #define DRV_VERSION "1.01-d"
49 #define DRV_RELDATE "Nov-17-2001"
50
51
52 /* Automatically extracted configuration info:
53 probe-func: winbond840_probe
54 config-in: tristate 'Winbond W89c840 Ethernet support' CONFIG_WINBOND_840
55
56 c-help-name: Winbond W89c840 PCI Ethernet support
57 c-help-symbol: CONFIG_WINBOND_840
58 c-help: This driver is for the Winbond W89c840 chip. It also works with
59 c-help: the TX9882 chip on the Compex RL100-ATX board.
60 c-help: More specific information and updates are available from
61 c-help: http://www.scyld.com/network/drivers.html
62 */
63
64 /* The user-configurable values.
65 These may be modified when a driver module is loaded.*/
66
67 static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
68 static int max_interrupt_work = 20;
69 /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
70 The '840 uses a 64 element hash table based on the Ethernet CRC. */
71 static int multicast_filter_limit = 32;
72
73 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
74 Setting to > 1518 effectively disables this feature. */
75 static int rx_copybreak;
76
77 /* Used to pass the media type, etc.
78 Both 'options[]' and 'full_duplex[]' should exist for driver
79 interoperability.
80 The media type is usually passed in 'options[]'.
81 */
82 #define MAX_UNITS 8 /* More are supported, limit only on options */
83 static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
84 static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
85
86 /* Operational parameters that are set at compile time. */
87
88 /* Keep the ring sizes a power of two for compile efficiency.
89 The compiler will convert <unsigned>'%'<2^N> into a bit mask.
90 Making the Tx ring too large decreases the effectiveness of channel
91 bonding and packet priority.
92 There are no ill effects from too-large receive rings. */
93 #define TX_RING_SIZE 16
94 #define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */
95 #define TX_QUEUE_LEN_RESTART 5
96 #define RX_RING_SIZE 32
97
98 #define TX_BUFLIMIT (1024-128)
99
100 /* The presumed FIFO size for working around the Tx-FIFO-overflow bug.
101 To avoid overflowing we don't queue again until we have room for a
102 full-size packet.
103 */
104 #define TX_FIFO_SIZE (2048)
105 #define TX_BUG_FIFO_LIMIT (TX_FIFO_SIZE-1514-16)
106
107
108 /* Operational parameters that usually are not changed. */
109 /* Time in jiffies before concluding the transmitter is hung. */
110 #define TX_TIMEOUT (2*HZ)
111
112 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
113
114 /* Include files, designed to support most kernel versions 2.0.0 and later. */
115 #include <linux/module.h>
116 #include <linux/kernel.h>
117 #include <linux/string.h>
118 #include <linux/timer.h>
119 #include <linux/errno.h>
120 #include <linux/ioport.h>
121 #include <linux/slab.h>
122 #include <linux/interrupt.h>
123 #include <linux/pci.h>
124 #include <linux/netdevice.h>
125 #include <linux/etherdevice.h>
126 #include <linux/skbuff.h>
127 #include <linux/init.h>
128 #include <linux/delay.h>
129 #include <linux/ethtool.h>
130 #include <linux/mii.h>
131 #include <linux/rtnetlink.h>
132 #include <linux/crc32.h>
133 #include <linux/bitops.h>
134 #include <asm/uaccess.h>
135 #include <asm/processor.h> /* Processor type for cache alignment. */
136 #include <asm/io.h>
137 #include <asm/irq.h>
138
139 /* These identify the driver base version and may not be removed. */
140 static char version[] __devinitdata =
141 KERN_INFO DRV_NAME ".c:v" DRV_VERSION " (2.4 port) " DRV_RELDATE " Donald Becker <becker@scyld.com>\n"
142 KERN_INFO " http://www.scyld.com/network/drivers.html\n";
143
144 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
145 MODULE_DESCRIPTION("Winbond W89c840 Ethernet driver");
146 MODULE_LICENSE("GPL");
147 MODULE_VERSION(DRV_VERSION);
148
149 module_param(max_interrupt_work, int, 0);
150 module_param(debug, int, 0);
151 module_param(rx_copybreak, int, 0);
152 module_param(multicast_filter_limit, int, 0);
153 module_param_array(options, int, NULL, 0);
154 module_param_array(full_duplex, int, NULL, 0);
155 MODULE_PARM_DESC(max_interrupt_work, "winbond-840 maximum events handled per interrupt");
156 MODULE_PARM_DESC(debug, "winbond-840 debug level (0-6)");
157 MODULE_PARM_DESC(rx_copybreak, "winbond-840 copy breakpoint for copy-only-tiny-frames");
158 MODULE_PARM_DESC(multicast_filter_limit, "winbond-840 maximum number of filtered multicast addresses");
159 MODULE_PARM_DESC(options, "winbond-840: Bits 0-3: media type, bit 17: full duplex");
160 MODULE_PARM_DESC(full_duplex, "winbond-840 full duplex setting(s) (1)");
161
162 /*
163 Theory of Operation
164
165 I. Board Compatibility
166
167 This driver is for the Winbond w89c840 chip.
168
169 II. Board-specific settings
170
171 None.
172
173 III. Driver operation
174
175 This chip is very similar to the Digital 21*4* "Tulip" family. The first
176 twelve registers and the descriptor format are nearly identical. Read a
177 Tulip manual for operational details.
178
179 A significant difference is that the multicast filter and station address are
180 stored in registers rather than loaded through a pseudo-transmit packet.
181
182 Unlike the Tulip, transmit buffers are limited to 1KB. To transmit a
183 full-sized packet we must use both data buffers in a descriptor. Thus the
184 driver uses ring mode where descriptors are implicitly sequential in memory,
185 rather than using the second descriptor address as a chain pointer to
186 subsequent descriptors.
187
188 IV. Notes
189
190 If you are going to almost clone a Tulip, why not go all the way and avoid
191 the need for a new driver?
192
193 IVb. References
194
195 http://www.scyld.com/expert/100mbps.html
196 http://www.scyld.com/expert/NWay.html
197 http://www.winbond.com.tw/
198
199 IVc. Errata
200
201 A horrible bug exists in the transmit FIFO. Apparently the chip doesn't
202 correctly detect a full FIFO, and queuing more than 2048 bytes may result in
203 silent data corruption.
204
205 Test with 'ping -s 10000' on a fast computer.
206
207 */
208
209 \f
210
211 /*
212 PCI probe table.
213 */
214 enum pci_id_flags_bits {
215 /* Set PCI command register bits before calling probe1(). */
216 PCI_USES_IO=1, PCI_USES_MEM=2, PCI_USES_MASTER=4,
217 /* Read and map the single following PCI BAR. */
218 PCI_ADDR0=0<<4, PCI_ADDR1=1<<4, PCI_ADDR2=2<<4, PCI_ADDR3=3<<4,
219 PCI_ADDR_64BITS=0x100, PCI_NO_ACPI_WAKE=0x200, PCI_NO_MIN_LATENCY=0x400,
220 };
221 enum chip_capability_flags {
222 CanHaveMII=1, HasBrokenTx=2, AlwaysFDX=4, FDXOnNoMII=8,};
223 #ifdef USE_IO_OPS
224 #define W840_FLAGS (PCI_USES_IO | PCI_ADDR0 | PCI_USES_MASTER)
225 #else
226 #define W840_FLAGS (PCI_USES_MEM | PCI_ADDR1 | PCI_USES_MASTER)
227 #endif
228
229 static struct pci_device_id w840_pci_tbl[] = {
230 { 0x1050, 0x0840, PCI_ANY_ID, 0x8153, 0, 0, 0 },
231 { 0x1050, 0x0840, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
232 { 0x11f6, 0x2011, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 },
233 { 0, }
234 };
235 MODULE_DEVICE_TABLE(pci, w840_pci_tbl);
236
237 struct pci_id_info {
238 const char *name;
239 struct match_info {
240 int pci, pci_mask, subsystem, subsystem_mask;
241 int revision, revision_mask; /* Only 8 bits. */
242 } id;
243 enum pci_id_flags_bits pci_flags;
244 int io_size; /* Needed for I/O region check or ioremap(). */
245 int drv_flags; /* Driver use, intended as capability flags. */
246 };
247 static struct pci_id_info pci_id_tbl[] = {
248 {"Winbond W89c840", /* Sometime a Level-One switch card. */
249 { 0x08401050, 0xffffffff, 0x81530000, 0xffff0000 },
250 W840_FLAGS, 128, CanHaveMII | HasBrokenTx | FDXOnNoMII},
251 {"Winbond W89c840", { 0x08401050, 0xffffffff, },
252 W840_FLAGS, 128, CanHaveMII | HasBrokenTx},
253 {"Compex RL100-ATX", { 0x201111F6, 0xffffffff,},
254 W840_FLAGS, 128, CanHaveMII | HasBrokenTx},
255 {NULL,}, /* 0 terminated list. */
256 };
257
258 /* This driver was written to use PCI memory space, however some x86 systems
259 work only with I/O space accesses. Pass -DUSE_IO_OPS to use PCI I/O space
260 accesses instead of memory space. */
261
262 /* Offsets to the Command and Status Registers, "CSRs".
263 While similar to the Tulip, these registers are longword aligned.
264 Note: It's not useful to define symbolic names for every register bit in
265 the device. The name can only partially document the semantics and make
266 the driver longer and more difficult to read.
267 */
268 enum w840_offsets {
269 PCIBusCfg=0x00, TxStartDemand=0x04, RxStartDemand=0x08,
270 RxRingPtr=0x0C, TxRingPtr=0x10,
271 IntrStatus=0x14, NetworkConfig=0x18, IntrEnable=0x1C,
272 RxMissed=0x20, EECtrl=0x24, MIICtrl=0x24, BootRom=0x28, GPTimer=0x2C,
273 CurRxDescAddr=0x30, CurRxBufAddr=0x34, /* Debug use */
274 MulticastFilter0=0x38, MulticastFilter1=0x3C, StationAddr=0x40,
275 CurTxDescAddr=0x4C, CurTxBufAddr=0x50,
276 };
277
278 /* Bits in the interrupt status/enable registers. */
279 /* The bits in the Intr Status/Enable registers, mostly interrupt sources. */
280 enum intr_status_bits {
281 NormalIntr=0x10000, AbnormalIntr=0x8000,
282 IntrPCIErr=0x2000, TimerInt=0x800,
283 IntrRxDied=0x100, RxNoBuf=0x80, IntrRxDone=0x40,
284 TxFIFOUnderflow=0x20, RxErrIntr=0x10,
285 TxIdle=0x04, IntrTxStopped=0x02, IntrTxDone=0x01,
286 };
287
288 /* Bits in the NetworkConfig register. */
289 enum rx_mode_bits {
290 AcceptErr=0x80, AcceptRunt=0x40,
291 AcceptBroadcast=0x20, AcceptMulticast=0x10,
292 AcceptAllPhys=0x08, AcceptMyPhys=0x02,
293 };
294
295 enum mii_reg_bits {
296 MDIO_ShiftClk=0x10000, MDIO_DataIn=0x80000, MDIO_DataOut=0x20000,
297 MDIO_EnbOutput=0x40000, MDIO_EnbIn = 0x00000,
298 };
299
300 /* The Tulip Rx and Tx buffer descriptors. */
301 struct w840_rx_desc {
302 s32 status;
303 s32 length;
304 u32 buffer1;
305 u32 buffer2;
306 };
307
308 struct w840_tx_desc {
309 s32 status;
310 s32 length;
311 u32 buffer1, buffer2;
312 };
313
314 /* Bits in network_desc.status */
315 enum desc_status_bits {
316 DescOwn=0x80000000, DescEndRing=0x02000000, DescUseLink=0x01000000,
317 DescWholePkt=0x60000000, DescStartPkt=0x20000000, DescEndPkt=0x40000000,
318 DescIntr=0x80000000,
319 };
320
321 #define MII_CNT 1 /* winbond only supports one MII */
322 struct netdev_private {
323 struct w840_rx_desc *rx_ring;
324 dma_addr_t rx_addr[RX_RING_SIZE];
325 struct w840_tx_desc *tx_ring;
326 dma_addr_t tx_addr[TX_RING_SIZE];
327 dma_addr_t ring_dma_addr;
328 /* The addresses of receive-in-place skbuffs. */
329 struct sk_buff* rx_skbuff[RX_RING_SIZE];
330 /* The saved address of a sent-in-place packet/buffer, for later free(). */
331 struct sk_buff* tx_skbuff[TX_RING_SIZE];
332 struct net_device_stats stats;
333 struct timer_list timer; /* Media monitoring timer. */
334 /* Frequently used values: keep some adjacent for cache effect. */
335 spinlock_t lock;
336 int chip_id, drv_flags;
337 struct pci_dev *pci_dev;
338 int csr6;
339 struct w840_rx_desc *rx_head_desc;
340 unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
341 unsigned int rx_buf_sz; /* Based on MTU+slack. */
342 unsigned int cur_tx, dirty_tx;
343 unsigned int tx_q_bytes;
344 unsigned int tx_full; /* The Tx queue is full. */
345 /* MII transceiver section. */
346 int mii_cnt; /* MII device addresses. */
347 unsigned char phys[MII_CNT]; /* MII device addresses, but only the first is used */
348 u32 mii;
349 struct mii_if_info mii_if;
350 void __iomem *base_addr;
351 };
352
353 static int eeprom_read(void __iomem *ioaddr, int location);
354 static int mdio_read(struct net_device *dev, int phy_id, int location);
355 static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
356 static int netdev_open(struct net_device *dev);
357 static int update_link(struct net_device *dev);
358 static void netdev_timer(unsigned long data);
359 static void init_rxtx_rings(struct net_device *dev);
360 static void free_rxtx_rings(struct netdev_private *np);
361 static void init_registers(struct net_device *dev);
362 static void tx_timeout(struct net_device *dev);
363 static int alloc_ringdesc(struct net_device *dev);
364 static void free_ringdesc(struct netdev_private *np);
365 static int start_tx(struct sk_buff *skb, struct net_device *dev);
366 static irqreturn_t intr_handler(int irq, void *dev_instance, struct pt_regs *regs);
367 static void netdev_error(struct net_device *dev, int intr_status);
368 static int netdev_rx(struct net_device *dev);
369 static u32 __set_rx_mode(struct net_device *dev);
370 static void set_rx_mode(struct net_device *dev);
371 static struct net_device_stats *get_stats(struct net_device *dev);
372 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
373 static struct ethtool_ops netdev_ethtool_ops;
374 static int netdev_close(struct net_device *dev);
375
376 \f
377
378 static int __devinit w840_probe1 (struct pci_dev *pdev,
379 const struct pci_device_id *ent)
380 {
381 struct net_device *dev;
382 struct netdev_private *np;
383 static int find_cnt;
384 int chip_idx = ent->driver_data;
385 int irq;
386 int i, option = find_cnt < MAX_UNITS ? options[find_cnt] : 0;
387 void __iomem *ioaddr;
388 int bar = 1;
389
390 i = pci_enable_device(pdev);
391 if (i) return i;
392
393 pci_set_master(pdev);
394
395 irq = pdev->irq;
396
397 if (pci_set_dma_mask(pdev,0xFFFFffff)) {
398 printk(KERN_WARNING "Winbond-840: Device %s disabled due to DMA limitations.\n",
399 pci_name(pdev));
400 return -EIO;
401 }
402 dev = alloc_etherdev(sizeof(*np));
403 if (!dev)
404 return -ENOMEM;
405 SET_MODULE_OWNER(dev);
406 SET_NETDEV_DEV(dev, &pdev->dev);
407
408 if (pci_request_regions(pdev, DRV_NAME))
409 goto err_out_netdev;
410 #ifdef USE_IO_OPS
411 bar = 0;
412 #endif
413 ioaddr = pci_iomap(pdev, bar, pci_id_tbl[chip_idx].io_size);
414 if (!ioaddr)
415 goto err_out_free_res;
416
417 for (i = 0; i < 3; i++)
418 ((u16 *)dev->dev_addr)[i] = le16_to_cpu(eeprom_read(ioaddr, i));
419
420 /* Reset the chip to erase previous misconfiguration.
421 No hold time required! */
422 iowrite32(0x00000001, ioaddr + PCIBusCfg);
423
424 dev->base_addr = (unsigned long)ioaddr;
425 dev->irq = irq;
426
427 np = netdev_priv(dev);
428 np->pci_dev = pdev;
429 np->chip_id = chip_idx;
430 np->drv_flags = pci_id_tbl[chip_idx].drv_flags;
431 spin_lock_init(&np->lock);
432 np->mii_if.dev = dev;
433 np->mii_if.mdio_read = mdio_read;
434 np->mii_if.mdio_write = mdio_write;
435 np->base_addr = ioaddr;
436
437 pci_set_drvdata(pdev, dev);
438
439 if (dev->mem_start)
440 option = dev->mem_start;
441
442 /* The lower four bits are the media type. */
443 if (option > 0) {
444 if (option & 0x200)
445 np->mii_if.full_duplex = 1;
446 if (option & 15)
447 printk(KERN_INFO "%s: ignoring user supplied media type %d",
448 dev->name, option & 15);
449 }
450 if (find_cnt < MAX_UNITS && full_duplex[find_cnt] > 0)
451 np->mii_if.full_duplex = 1;
452
453 if (np->mii_if.full_duplex)
454 np->mii_if.force_media = 1;
455
456 /* The chip-specific entries in the device structure. */
457 dev->open = &netdev_open;
458 dev->hard_start_xmit = &start_tx;
459 dev->stop = &netdev_close;
460 dev->get_stats = &get_stats;
461 dev->set_multicast_list = &set_rx_mode;
462 dev->do_ioctl = &netdev_ioctl;
463 dev->ethtool_ops = &netdev_ethtool_ops;
464 dev->tx_timeout = &tx_timeout;
465 dev->watchdog_timeo = TX_TIMEOUT;
466
467 i = register_netdev(dev);
468 if (i)
469 goto err_out_cleardev;
470
471 printk(KERN_INFO "%s: %s at %p, ",
472 dev->name, pci_id_tbl[chip_idx].name, ioaddr);
473 for (i = 0; i < 5; i++)
474 printk("%2.2x:", dev->dev_addr[i]);
475 printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], irq);
476
477 if (np->drv_flags & CanHaveMII) {
478 int phy, phy_idx = 0;
479 for (phy = 1; phy < 32 && phy_idx < MII_CNT; phy++) {
480 int mii_status = mdio_read(dev, phy, MII_BMSR);
481 if (mii_status != 0xffff && mii_status != 0x0000) {
482 np->phys[phy_idx++] = phy;
483 np->mii_if.advertising = mdio_read(dev, phy, MII_ADVERTISE);
484 np->mii = (mdio_read(dev, phy, MII_PHYSID1) << 16)+
485 mdio_read(dev, phy, MII_PHYSID2);
486 printk(KERN_INFO "%s: MII PHY %8.8xh found at address %d, status "
487 "0x%4.4x advertising %4.4x.\n",
488 dev->name, np->mii, phy, mii_status, np->mii_if.advertising);
489 }
490 }
491 np->mii_cnt = phy_idx;
492 np->mii_if.phy_id = np->phys[0];
493 if (phy_idx == 0) {
494 printk(KERN_WARNING "%s: MII PHY not found -- this device may "
495 "not operate correctly.\n", dev->name);
496 }
497 }
498
499 find_cnt++;
500 return 0;
501
502 err_out_cleardev:
503 pci_set_drvdata(pdev, NULL);
504 pci_iounmap(pdev, ioaddr);
505 err_out_free_res:
506 pci_release_regions(pdev);
507 err_out_netdev:
508 free_netdev (dev);
509 return -ENODEV;
510 }
511
512 \f
513 /* Read the EEPROM and MII Management Data I/O (MDIO) interfaces. These are
514 often serial bit streams generated by the host processor.
515 The example below is for the common 93c46 EEPROM, 64 16 bit words. */
516
517 /* Delay between EEPROM clock transitions.
518 No extra delay is needed with 33Mhz PCI, but future 66Mhz access may need
519 a delay. Note that pre-2.0.34 kernels had a cache-alignment bug that
520 made udelay() unreliable.
521 The old method of using an ISA access as a delay, __SLOW_DOWN_IO__, is
522 depricated.
523 */
524 #define eeprom_delay(ee_addr) ioread32(ee_addr)
525
526 enum EEPROM_Ctrl_Bits {
527 EE_ShiftClk=0x02, EE_Write0=0x801, EE_Write1=0x805,
528 EE_ChipSelect=0x801, EE_DataIn=0x08,
529 };
530
531 /* The EEPROM commands include the alway-set leading bit. */
532 enum EEPROM_Cmds {
533 EE_WriteCmd=(5 << 6), EE_ReadCmd=(6 << 6), EE_EraseCmd=(7 << 6),
534 };
535
536 static int eeprom_read(void __iomem *addr, int location)
537 {
538 int i;
539 int retval = 0;
540 void __iomem *ee_addr = addr + EECtrl;
541 int read_cmd = location | EE_ReadCmd;
542 iowrite32(EE_ChipSelect, ee_addr);
543
544 /* Shift the read command bits out. */
545 for (i = 10; i >= 0; i--) {
546 short dataval = (read_cmd & (1 << i)) ? EE_Write1 : EE_Write0;
547 iowrite32(dataval, ee_addr);
548 eeprom_delay(ee_addr);
549 iowrite32(dataval | EE_ShiftClk, ee_addr);
550 eeprom_delay(ee_addr);
551 }
552 iowrite32(EE_ChipSelect, ee_addr);
553 eeprom_delay(ee_addr);
554
555 for (i = 16; i > 0; i--) {
556 iowrite32(EE_ChipSelect | EE_ShiftClk, ee_addr);
557 eeprom_delay(ee_addr);
558 retval = (retval << 1) | ((ioread32(ee_addr) & EE_DataIn) ? 1 : 0);
559 iowrite32(EE_ChipSelect, ee_addr);
560 eeprom_delay(ee_addr);
561 }
562
563 /* Terminate the EEPROM access. */
564 iowrite32(0, ee_addr);
565 return retval;
566 }
567
568 /* MII transceiver control section.
569 Read and write the MII registers using software-generated serial
570 MDIO protocol. See the MII specifications or DP83840A data sheet
571 for details.
572
573 The maximum data clock rate is 2.5 Mhz. The minimum timing is usually
574 met by back-to-back 33Mhz PCI cycles. */
575 #define mdio_delay(mdio_addr) ioread32(mdio_addr)
576
577 /* Set iff a MII transceiver on any interface requires mdio preamble.
578 This only set with older transceivers, so the extra
579 code size of a per-interface flag is not worthwhile. */
580 static char mii_preamble_required = 1;
581
582 #define MDIO_WRITE0 (MDIO_EnbOutput)
583 #define MDIO_WRITE1 (MDIO_DataOut | MDIO_EnbOutput)
584
585 /* Generate the preamble required for initial synchronization and
586 a few older transceivers. */
587 static void mdio_sync(void __iomem *mdio_addr)
588 {
589 int bits = 32;
590
591 /* Establish sync by sending at least 32 logic ones. */
592 while (--bits >= 0) {
593 iowrite32(MDIO_WRITE1, mdio_addr);
594 mdio_delay(mdio_addr);
595 iowrite32(MDIO_WRITE1 | MDIO_ShiftClk, mdio_addr);
596 mdio_delay(mdio_addr);
597 }
598 }
599
600 static int mdio_read(struct net_device *dev, int phy_id, int location)
601 {
602 struct netdev_private *np = netdev_priv(dev);
603 void __iomem *mdio_addr = np->base_addr + MIICtrl;
604 int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
605 int i, retval = 0;
606
607 if (mii_preamble_required)
608 mdio_sync(mdio_addr);
609
610 /* Shift the read command bits out. */
611 for (i = 15; i >= 0; i--) {
612 int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
613
614 iowrite32(dataval, mdio_addr);
615 mdio_delay(mdio_addr);
616 iowrite32(dataval | MDIO_ShiftClk, mdio_addr);
617 mdio_delay(mdio_addr);
618 }
619 /* Read the two transition, 16 data, and wire-idle bits. */
620 for (i = 20; i > 0; i--) {
621 iowrite32(MDIO_EnbIn, mdio_addr);
622 mdio_delay(mdio_addr);
623 retval = (retval << 1) | ((ioread32(mdio_addr) & MDIO_DataIn) ? 1 : 0);
624 iowrite32(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
625 mdio_delay(mdio_addr);
626 }
627 return (retval>>1) & 0xffff;
628 }
629
630 static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
631 {
632 struct netdev_private *np = netdev_priv(dev);
633 void __iomem *mdio_addr = np->base_addr + MIICtrl;
634 int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value;
635 int i;
636
637 if (location == 4 && phy_id == np->phys[0])
638 np->mii_if.advertising = value;
639
640 if (mii_preamble_required)
641 mdio_sync(mdio_addr);
642
643 /* Shift the command bits out. */
644 for (i = 31; i >= 0; i--) {
645 int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
646
647 iowrite32(dataval, mdio_addr);
648 mdio_delay(mdio_addr);
649 iowrite32(dataval | MDIO_ShiftClk, mdio_addr);
650 mdio_delay(mdio_addr);
651 }
652 /* Clear out extra bits. */
653 for (i = 2; i > 0; i--) {
654 iowrite32(MDIO_EnbIn, mdio_addr);
655 mdio_delay(mdio_addr);
656 iowrite32(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
657 mdio_delay(mdio_addr);
658 }
659 return;
660 }
661
662 \f
663 static int netdev_open(struct net_device *dev)
664 {
665 struct netdev_private *np = netdev_priv(dev);
666 void __iomem *ioaddr = np->base_addr;
667 int i;
668
669 iowrite32(0x00000001, ioaddr + PCIBusCfg); /* Reset */
670
671 netif_device_detach(dev);
672 i = request_irq(dev->irq, &intr_handler, SA_SHIRQ, dev->name, dev);
673 if (i)
674 goto out_err;
675
676 if (debug > 1)
677 printk(KERN_DEBUG "%s: w89c840_open() irq %d.\n",
678 dev->name, dev->irq);
679
680 if((i=alloc_ringdesc(dev)))
681 goto out_err;
682
683 spin_lock_irq(&np->lock);
684 netif_device_attach(dev);
685 init_registers(dev);
686 spin_unlock_irq(&np->lock);
687
688 netif_start_queue(dev);
689 if (debug > 2)
690 printk(KERN_DEBUG "%s: Done netdev_open().\n", dev->name);
691
692 /* Set the timer to check for link beat. */
693 init_timer(&np->timer);
694 np->timer.expires = jiffies + 1*HZ;
695 np->timer.data = (unsigned long)dev;
696 np->timer.function = &netdev_timer; /* timer handler */
697 add_timer(&np->timer);
698 return 0;
699 out_err:
700 netif_device_attach(dev);
701 return i;
702 }
703
704 #define MII_DAVICOM_DM9101 0x0181b800
705
706 static int update_link(struct net_device *dev)
707 {
708 struct netdev_private *np = netdev_priv(dev);
709 int duplex, fasteth, result, mii_reg;
710
711 /* BSMR */
712 mii_reg = mdio_read(dev, np->phys[0], MII_BMSR);
713
714 if (mii_reg == 0xffff)
715 return np->csr6;
716 /* reread: the link status bit is sticky */
717 mii_reg = mdio_read(dev, np->phys[0], MII_BMSR);
718 if (!(mii_reg & 0x4)) {
719 if (netif_carrier_ok(dev)) {
720 if (debug)
721 printk(KERN_INFO "%s: MII #%d reports no link. Disabling watchdog.\n",
722 dev->name, np->phys[0]);
723 netif_carrier_off(dev);
724 }
725 return np->csr6;
726 }
727 if (!netif_carrier_ok(dev)) {
728 if (debug)
729 printk(KERN_INFO "%s: MII #%d link is back. Enabling watchdog.\n",
730 dev->name, np->phys[0]);
731 netif_carrier_on(dev);
732 }
733
734 if ((np->mii & ~0xf) == MII_DAVICOM_DM9101) {
735 /* If the link partner doesn't support autonegotiation
736 * the MII detects it's abilities with the "parallel detection".
737 * Some MIIs update the LPA register to the result of the parallel
738 * detection, some don't.
739 * The Davicom PHY [at least 0181b800] doesn't.
740 * Instead bit 9 and 13 of the BMCR are updated to the result
741 * of the negotiation..
742 */
743 mii_reg = mdio_read(dev, np->phys[0], MII_BMCR);
744 duplex = mii_reg & BMCR_FULLDPLX;
745 fasteth = mii_reg & BMCR_SPEED100;
746 } else {
747 int negotiated;
748 mii_reg = mdio_read(dev, np->phys[0], MII_LPA);
749 negotiated = mii_reg & np->mii_if.advertising;
750
751 duplex = (negotiated & LPA_100FULL) || ((negotiated & 0x02C0) == LPA_10FULL);
752 fasteth = negotiated & 0x380;
753 }
754 duplex |= np->mii_if.force_media;
755 /* remove fastether and fullduplex */
756 result = np->csr6 & ~0x20000200;
757 if (duplex)
758 result |= 0x200;
759 if (fasteth)
760 result |= 0x20000000;
761 if (result != np->csr6 && debug)
762 printk(KERN_INFO "%s: Setting %dMBit-%s-duplex based on MII#%d\n",
763 dev->name, fasteth ? 100 : 10,
764 duplex ? "full" : "half", np->phys[0]);
765 return result;
766 }
767
768 #define RXTX_TIMEOUT 2000
769 static inline void update_csr6(struct net_device *dev, int new)
770 {
771 struct netdev_private *np = netdev_priv(dev);
772 void __iomem *ioaddr = np->base_addr;
773 int limit = RXTX_TIMEOUT;
774
775 if (!netif_device_present(dev))
776 new = 0;
777 if (new==np->csr6)
778 return;
779 /* stop both Tx and Rx processes */
780 iowrite32(np->csr6 & ~0x2002, ioaddr + NetworkConfig);
781 /* wait until they have really stopped */
782 for (;;) {
783 int csr5 = ioread32(ioaddr + IntrStatus);
784 int t;
785
786 t = (csr5 >> 17) & 0x07;
787 if (t==0||t==1) {
788 /* rx stopped */
789 t = (csr5 >> 20) & 0x07;
790 if (t==0||t==1)
791 break;
792 }
793
794 limit--;
795 if(!limit) {
796 printk(KERN_INFO "%s: couldn't stop rxtx, IntrStatus %xh.\n",
797 dev->name, csr5);
798 break;
799 }
800 udelay(1);
801 }
802 np->csr6 = new;
803 /* and restart them with the new configuration */
804 iowrite32(np->csr6, ioaddr + NetworkConfig);
805 if (new & 0x200)
806 np->mii_if.full_duplex = 1;
807 }
808
809 static void netdev_timer(unsigned long data)
810 {
811 struct net_device *dev = (struct net_device *)data;
812 struct netdev_private *np = netdev_priv(dev);
813 void __iomem *ioaddr = np->base_addr;
814
815 if (debug > 2)
816 printk(KERN_DEBUG "%s: Media selection timer tick, status %8.8x "
817 "config %8.8x.\n",
818 dev->name, ioread32(ioaddr + IntrStatus),
819 ioread32(ioaddr + NetworkConfig));
820 spin_lock_irq(&np->lock);
821 update_csr6(dev, update_link(dev));
822 spin_unlock_irq(&np->lock);
823 np->timer.expires = jiffies + 10*HZ;
824 add_timer(&np->timer);
825 }
826
827 static void init_rxtx_rings(struct net_device *dev)
828 {
829 struct netdev_private *np = netdev_priv(dev);
830 int i;
831
832 np->rx_head_desc = &np->rx_ring[0];
833 np->tx_ring = (struct w840_tx_desc*)&np->rx_ring[RX_RING_SIZE];
834
835 /* Initial all Rx descriptors. */
836 for (i = 0; i < RX_RING_SIZE; i++) {
837 np->rx_ring[i].length = np->rx_buf_sz;
838 np->rx_ring[i].status = 0;
839 np->rx_skbuff[i] = NULL;
840 }
841 /* Mark the last entry as wrapping the ring. */
842 np->rx_ring[i-1].length |= DescEndRing;
843
844 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
845 for (i = 0; i < RX_RING_SIZE; i++) {
846 struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
847 np->rx_skbuff[i] = skb;
848 if (skb == NULL)
849 break;
850 skb->dev = dev; /* Mark as being used by this device. */
851 np->rx_addr[i] = pci_map_single(np->pci_dev,skb->tail,
852 skb->len,PCI_DMA_FROMDEVICE);
853
854 np->rx_ring[i].buffer1 = np->rx_addr[i];
855 np->rx_ring[i].status = DescOwn;
856 }
857
858 np->cur_rx = 0;
859 np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
860
861 /* Initialize the Tx descriptors */
862 for (i = 0; i < TX_RING_SIZE; i++) {
863 np->tx_skbuff[i] = NULL;
864 np->tx_ring[i].status = 0;
865 }
866 np->tx_full = 0;
867 np->tx_q_bytes = np->dirty_tx = np->cur_tx = 0;
868
869 iowrite32(np->ring_dma_addr, np->base_addr + RxRingPtr);
870 iowrite32(np->ring_dma_addr+sizeof(struct w840_rx_desc)*RX_RING_SIZE,
871 np->base_addr + TxRingPtr);
872
873 }
874
875 static void free_rxtx_rings(struct netdev_private* np)
876 {
877 int i;
878 /* Free all the skbuffs in the Rx queue. */
879 for (i = 0; i < RX_RING_SIZE; i++) {
880 np->rx_ring[i].status = 0;
881 if (np->rx_skbuff[i]) {
882 pci_unmap_single(np->pci_dev,
883 np->rx_addr[i],
884 np->rx_skbuff[i]->len,
885 PCI_DMA_FROMDEVICE);
886 dev_kfree_skb(np->rx_skbuff[i]);
887 }
888 np->rx_skbuff[i] = NULL;
889 }
890 for (i = 0; i < TX_RING_SIZE; i++) {
891 if (np->tx_skbuff[i]) {
892 pci_unmap_single(np->pci_dev,
893 np->tx_addr[i],
894 np->tx_skbuff[i]->len,
895 PCI_DMA_TODEVICE);
896 dev_kfree_skb(np->tx_skbuff[i]);
897 }
898 np->tx_skbuff[i] = NULL;
899 }
900 }
901
902 static void init_registers(struct net_device *dev)
903 {
904 struct netdev_private *np = netdev_priv(dev);
905 void __iomem *ioaddr = np->base_addr;
906 int i;
907
908 for (i = 0; i < 6; i++)
909 iowrite8(dev->dev_addr[i], ioaddr + StationAddr + i);
910
911 /* Initialize other registers. */
912 #ifdef __BIG_ENDIAN
913 i = (1<<20); /* Big-endian descriptors */
914 #else
915 i = 0;
916 #endif
917 i |= (0x04<<2); /* skip length 4 u32 */
918 i |= 0x02; /* give Rx priority */
919
920 /* Configure the PCI bus bursts and FIFO thresholds.
921 486: Set 8 longword cache alignment, 8 longword burst.
922 586: Set 16 longword cache alignment, no burst limit.
923 Cache alignment bits 15:14 Burst length 13:8
924 0000 <not allowed> 0000 align to cache 0800 8 longwords
925 4000 8 longwords 0100 1 longword 1000 16 longwords
926 8000 16 longwords 0200 2 longwords 2000 32 longwords
927 C000 32 longwords 0400 4 longwords */
928
929 #if defined (__i386__) && !defined(MODULE)
930 /* When not a module we can work around broken '486 PCI boards. */
931 if (boot_cpu_data.x86 <= 4) {
932 i |= 0x4800;
933 printk(KERN_INFO "%s: This is a 386/486 PCI system, setting cache "
934 "alignment to 8 longwords.\n", dev->name);
935 } else {
936 i |= 0xE000;
937 }
938 #elif defined(__powerpc__) || defined(__i386__) || defined(__alpha__) || defined(__ia64__) || defined(__x86_64__)
939 i |= 0xE000;
940 #elif defined(__sparc__)
941 i |= 0x4800;
942 #else
943 #warning Processor architecture undefined
944 i |= 0x4800;
945 #endif
946 iowrite32(i, ioaddr + PCIBusCfg);
947
948 np->csr6 = 0;
949 /* 128 byte Tx threshold;
950 Transmit on; Receive on; */
951 update_csr6(dev, 0x00022002 | update_link(dev) | __set_rx_mode(dev));
952
953 /* Clear and Enable interrupts by setting the interrupt mask. */
954 iowrite32(0x1A0F5, ioaddr + IntrStatus);
955 iowrite32(0x1A0F5, ioaddr + IntrEnable);
956
957 iowrite32(0, ioaddr + RxStartDemand);
958 }
959
960 static void tx_timeout(struct net_device *dev)
961 {
962 struct netdev_private *np = netdev_priv(dev);
963 void __iomem *ioaddr = np->base_addr;
964
965 printk(KERN_WARNING "%s: Transmit timed out, status %8.8x,"
966 " resetting...\n", dev->name, ioread32(ioaddr + IntrStatus));
967
968 {
969 int i;
970 printk(KERN_DEBUG " Rx ring %p: ", np->rx_ring);
971 for (i = 0; i < RX_RING_SIZE; i++)
972 printk(" %8.8x", (unsigned int)np->rx_ring[i].status);
973 printk("\n"KERN_DEBUG" Tx ring %p: ", np->tx_ring);
974 for (i = 0; i < TX_RING_SIZE; i++)
975 printk(" %8.8x", np->tx_ring[i].status);
976 printk("\n");
977 }
978 printk(KERN_DEBUG "Tx cur %d Tx dirty %d Tx Full %d, q bytes %d.\n",
979 np->cur_tx, np->dirty_tx, np->tx_full, np->tx_q_bytes);
980 printk(KERN_DEBUG "Tx Descriptor addr %xh.\n",ioread32(ioaddr+0x4C));
981
982 disable_irq(dev->irq);
983 spin_lock_irq(&np->lock);
984 /*
985 * Under high load dirty_tx and the internal tx descriptor pointer
986 * come out of sync, thus perform a software reset and reinitialize
987 * everything.
988 */
989
990 iowrite32(1, np->base_addr+PCIBusCfg);
991 udelay(1);
992
993 free_rxtx_rings(np);
994 init_rxtx_rings(dev);
995 init_registers(dev);
996 spin_unlock_irq(&np->lock);
997 enable_irq(dev->irq);
998
999 netif_wake_queue(dev);
1000 dev->trans_start = jiffies;
1001 np->stats.tx_errors++;
1002 return;
1003 }
1004
1005 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
1006 static int alloc_ringdesc(struct net_device *dev)
1007 {
1008 struct netdev_private *np = netdev_priv(dev);
1009
1010 np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
1011
1012 np->rx_ring = pci_alloc_consistent(np->pci_dev,
1013 sizeof(struct w840_rx_desc)*RX_RING_SIZE +
1014 sizeof(struct w840_tx_desc)*TX_RING_SIZE,
1015 &np->ring_dma_addr);
1016 if(!np->rx_ring)
1017 return -ENOMEM;
1018 init_rxtx_rings(dev);
1019 return 0;
1020 }
1021
1022 static void free_ringdesc(struct netdev_private *np)
1023 {
1024 pci_free_consistent(np->pci_dev,
1025 sizeof(struct w840_rx_desc)*RX_RING_SIZE +
1026 sizeof(struct w840_tx_desc)*TX_RING_SIZE,
1027 np->rx_ring, np->ring_dma_addr);
1028
1029 }
1030
1031 static int start_tx(struct sk_buff *skb, struct net_device *dev)
1032 {
1033 struct netdev_private *np = netdev_priv(dev);
1034 unsigned entry;
1035
1036 /* Caution: the write order is important here, set the field
1037 with the "ownership" bits last. */
1038
1039 /* Calculate the next Tx descriptor entry. */
1040 entry = np->cur_tx % TX_RING_SIZE;
1041
1042 np->tx_addr[entry] = pci_map_single(np->pci_dev,
1043 skb->data,skb->len, PCI_DMA_TODEVICE);
1044 np->tx_skbuff[entry] = skb;
1045
1046 np->tx_ring[entry].buffer1 = np->tx_addr[entry];
1047 if (skb->len < TX_BUFLIMIT) {
1048 np->tx_ring[entry].length = DescWholePkt | skb->len;
1049 } else {
1050 int len = skb->len - TX_BUFLIMIT;
1051
1052 np->tx_ring[entry].buffer2 = np->tx_addr[entry]+TX_BUFLIMIT;
1053 np->tx_ring[entry].length = DescWholePkt | (len << 11) | TX_BUFLIMIT;
1054 }
1055 if(entry == TX_RING_SIZE-1)
1056 np->tx_ring[entry].length |= DescEndRing;
1057
1058 /* Now acquire the irq spinlock.
1059 * The difficult race is the the ordering between
1060 * increasing np->cur_tx and setting DescOwn:
1061 * - if np->cur_tx is increased first the interrupt
1062 * handler could consider the packet as transmitted
1063 * since DescOwn is cleared.
1064 * - If DescOwn is set first the NIC could report the
1065 * packet as sent, but the interrupt handler would ignore it
1066 * since the np->cur_tx was not yet increased.
1067 */
1068 spin_lock_irq(&np->lock);
1069 np->cur_tx++;
1070
1071 wmb(); /* flush length, buffer1, buffer2 */
1072 np->tx_ring[entry].status = DescOwn;
1073 wmb(); /* flush status and kick the hardware */
1074 iowrite32(0, np->base_addr + TxStartDemand);
1075 np->tx_q_bytes += skb->len;
1076 /* Work around horrible bug in the chip by marking the queue as full
1077 when we do not have FIFO room for a maximum sized packet. */
1078 if (np->cur_tx - np->dirty_tx > TX_QUEUE_LEN ||
1079 ((np->drv_flags & HasBrokenTx) && np->tx_q_bytes > TX_BUG_FIFO_LIMIT)) {
1080 netif_stop_queue(dev);
1081 wmb();
1082 np->tx_full = 1;
1083 }
1084 spin_unlock_irq(&np->lock);
1085
1086 dev->trans_start = jiffies;
1087
1088 if (debug > 4) {
1089 printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n",
1090 dev->name, np->cur_tx, entry);
1091 }
1092 return 0;
1093 }
1094
1095 static void netdev_tx_done(struct net_device *dev)
1096 {
1097 struct netdev_private *np = netdev_priv(dev);
1098 for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1099 int entry = np->dirty_tx % TX_RING_SIZE;
1100 int tx_status = np->tx_ring[entry].status;
1101
1102 if (tx_status < 0)
1103 break;
1104 if (tx_status & 0x8000) { /* There was an error, log it. */
1105 #ifndef final_version
1106 if (debug > 1)
1107 printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
1108 dev->name, tx_status);
1109 #endif
1110 np->stats.tx_errors++;
1111 if (tx_status & 0x0104) np->stats.tx_aborted_errors++;
1112 if (tx_status & 0x0C80) np->stats.tx_carrier_errors++;
1113 if (tx_status & 0x0200) np->stats.tx_window_errors++;
1114 if (tx_status & 0x0002) np->stats.tx_fifo_errors++;
1115 if ((tx_status & 0x0080) && np->mii_if.full_duplex == 0)
1116 np->stats.tx_heartbeat_errors++;
1117 } else {
1118 #ifndef final_version
1119 if (debug > 3)
1120 printk(KERN_DEBUG "%s: Transmit slot %d ok, Tx status %8.8x.\n",
1121 dev->name, entry, tx_status);
1122 #endif
1123 np->stats.tx_bytes += np->tx_skbuff[entry]->len;
1124 np->stats.collisions += (tx_status >> 3) & 15;
1125 np->stats.tx_packets++;
1126 }
1127 /* Free the original skb. */
1128 pci_unmap_single(np->pci_dev,np->tx_addr[entry],
1129 np->tx_skbuff[entry]->len,
1130 PCI_DMA_TODEVICE);
1131 np->tx_q_bytes -= np->tx_skbuff[entry]->len;
1132 dev_kfree_skb_irq(np->tx_skbuff[entry]);
1133 np->tx_skbuff[entry] = NULL;
1134 }
1135 if (np->tx_full &&
1136 np->cur_tx - np->dirty_tx < TX_QUEUE_LEN_RESTART &&
1137 np->tx_q_bytes < TX_BUG_FIFO_LIMIT) {
1138 /* The ring is no longer full, clear tbusy. */
1139 np->tx_full = 0;
1140 wmb();
1141 netif_wake_queue(dev);
1142 }
1143 }
1144
1145 /* The interrupt handler does all of the Rx thread work and cleans up
1146 after the Tx thread. */
1147 static irqreturn_t intr_handler(int irq, void *dev_instance, struct pt_regs *rgs)
1148 {
1149 struct net_device *dev = (struct net_device *)dev_instance;
1150 struct netdev_private *np = netdev_priv(dev);
1151 void __iomem *ioaddr = np->base_addr;
1152 int work_limit = max_interrupt_work;
1153 int handled = 0;
1154
1155 if (!netif_device_present(dev))
1156 return IRQ_NONE;
1157 do {
1158 u32 intr_status = ioread32(ioaddr + IntrStatus);
1159
1160 /* Acknowledge all of the current interrupt sources ASAP. */
1161 iowrite32(intr_status & 0x001ffff, ioaddr + IntrStatus);
1162
1163 if (debug > 4)
1164 printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n",
1165 dev->name, intr_status);
1166
1167 if ((intr_status & (NormalIntr|AbnormalIntr)) == 0)
1168 break;
1169
1170 handled = 1;
1171
1172 if (intr_status & (IntrRxDone | RxNoBuf))
1173 netdev_rx(dev);
1174 if (intr_status & RxNoBuf)
1175 iowrite32(0, ioaddr + RxStartDemand);
1176
1177 if (intr_status & (TxIdle | IntrTxDone) &&
1178 np->cur_tx != np->dirty_tx) {
1179 spin_lock(&np->lock);
1180 netdev_tx_done(dev);
1181 spin_unlock(&np->lock);
1182 }
1183
1184 /* Abnormal error summary/uncommon events handlers. */
1185 if (intr_status & (AbnormalIntr | TxFIFOUnderflow | IntrPCIErr |
1186 TimerInt | IntrTxStopped))
1187 netdev_error(dev, intr_status);
1188
1189 if (--work_limit < 0) {
1190 printk(KERN_WARNING "%s: Too much work at interrupt, "
1191 "status=0x%4.4x.\n", dev->name, intr_status);
1192 /* Set the timer to re-enable the other interrupts after
1193 10*82usec ticks. */
1194 spin_lock(&np->lock);
1195 if (netif_device_present(dev)) {
1196 iowrite32(AbnormalIntr | TimerInt, ioaddr + IntrEnable);
1197 iowrite32(10, ioaddr + GPTimer);
1198 }
1199 spin_unlock(&np->lock);
1200 break;
1201 }
1202 } while (1);
1203
1204 if (debug > 3)
1205 printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
1206 dev->name, ioread32(ioaddr + IntrStatus));
1207 return IRQ_RETVAL(handled);
1208 }
1209
1210 /* This routine is logically part of the interrupt handler, but separated
1211 for clarity and better register allocation. */
1212 static int netdev_rx(struct net_device *dev)
1213 {
1214 struct netdev_private *np = netdev_priv(dev);
1215 int entry = np->cur_rx % RX_RING_SIZE;
1216 int work_limit = np->dirty_rx + RX_RING_SIZE - np->cur_rx;
1217
1218 if (debug > 4) {
1219 printk(KERN_DEBUG " In netdev_rx(), entry %d status %4.4x.\n",
1220 entry, np->rx_ring[entry].status);
1221 }
1222
1223 /* If EOP is set on the next entry, it's a new packet. Send it up. */
1224 while (--work_limit >= 0) {
1225 struct w840_rx_desc *desc = np->rx_head_desc;
1226 s32 status = desc->status;
1227
1228 if (debug > 4)
1229 printk(KERN_DEBUG " netdev_rx() status was %8.8x.\n",
1230 status);
1231 if (status < 0)
1232 break;
1233 if ((status & 0x38008300) != 0x0300) {
1234 if ((status & 0x38000300) != 0x0300) {
1235 /* Ingore earlier buffers. */
1236 if ((status & 0xffff) != 0x7fff) {
1237 printk(KERN_WARNING "%s: Oversized Ethernet frame spanned "
1238 "multiple buffers, entry %#x status %4.4x!\n",
1239 dev->name, np->cur_rx, status);
1240 np->stats.rx_length_errors++;
1241 }
1242 } else if (status & 0x8000) {
1243 /* There was a fatal error. */
1244 if (debug > 2)
1245 printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n",
1246 dev->name, status);
1247 np->stats.rx_errors++; /* end of a packet.*/
1248 if (status & 0x0890) np->stats.rx_length_errors++;
1249 if (status & 0x004C) np->stats.rx_frame_errors++;
1250 if (status & 0x0002) np->stats.rx_crc_errors++;
1251 }
1252 } else {
1253 struct sk_buff *skb;
1254 /* Omit the four octet CRC from the length. */
1255 int pkt_len = ((status >> 16) & 0x7ff) - 4;
1256
1257 #ifndef final_version
1258 if (debug > 4)
1259 printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d"
1260 " status %x.\n", pkt_len, status);
1261 #endif
1262 /* Check if the packet is long enough to accept without copying
1263 to a minimally-sized skbuff. */
1264 if (pkt_len < rx_copybreak
1265 && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1266 skb->dev = dev;
1267 skb_reserve(skb, 2); /* 16 byte align the IP header */
1268 pci_dma_sync_single_for_cpu(np->pci_dev,np->rx_addr[entry],
1269 np->rx_skbuff[entry]->len,
1270 PCI_DMA_FROMDEVICE);
1271 eth_copy_and_sum(skb, np->rx_skbuff[entry]->tail, pkt_len, 0);
1272 skb_put(skb, pkt_len);
1273 pci_dma_sync_single_for_device(np->pci_dev,np->rx_addr[entry],
1274 np->rx_skbuff[entry]->len,
1275 PCI_DMA_FROMDEVICE);
1276 } else {
1277 pci_unmap_single(np->pci_dev,np->rx_addr[entry],
1278 np->rx_skbuff[entry]->len,
1279 PCI_DMA_FROMDEVICE);
1280 skb_put(skb = np->rx_skbuff[entry], pkt_len);
1281 np->rx_skbuff[entry] = NULL;
1282 }
1283 #ifndef final_version /* Remove after testing. */
1284 /* You will want this info for the initial debug. */
1285 if (debug > 5)
1286 printk(KERN_DEBUG " Rx data %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:"
1287 "%2.2x %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x %2.2x%2.2x "
1288 "%d.%d.%d.%d.\n",
1289 skb->data[0], skb->data[1], skb->data[2], skb->data[3],
1290 skb->data[4], skb->data[5], skb->data[6], skb->data[7],
1291 skb->data[8], skb->data[9], skb->data[10],
1292 skb->data[11], skb->data[12], skb->data[13],
1293 skb->data[14], skb->data[15], skb->data[16],
1294 skb->data[17]);
1295 #endif
1296 skb->protocol = eth_type_trans(skb, dev);
1297 netif_rx(skb);
1298 dev->last_rx = jiffies;
1299 np->stats.rx_packets++;
1300 np->stats.rx_bytes += pkt_len;
1301 }
1302 entry = (++np->cur_rx) % RX_RING_SIZE;
1303 np->rx_head_desc = &np->rx_ring[entry];
1304 }
1305
1306 /* Refill the Rx ring buffers. */
1307 for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
1308 struct sk_buff *skb;
1309 entry = np->dirty_rx % RX_RING_SIZE;
1310 if (np->rx_skbuff[entry] == NULL) {
1311 skb = dev_alloc_skb(np->rx_buf_sz);
1312 np->rx_skbuff[entry] = skb;
1313 if (skb == NULL)
1314 break; /* Better luck next round. */
1315 skb->dev = dev; /* Mark as being used by this device. */
1316 np->rx_addr[entry] = pci_map_single(np->pci_dev,
1317 skb->tail,
1318 skb->len, PCI_DMA_FROMDEVICE);
1319 np->rx_ring[entry].buffer1 = np->rx_addr[entry];
1320 }
1321 wmb();
1322 np->rx_ring[entry].status = DescOwn;
1323 }
1324
1325 return 0;
1326 }
1327
1328 static void netdev_error(struct net_device *dev, int intr_status)
1329 {
1330 struct netdev_private *np = netdev_priv(dev);
1331 void __iomem *ioaddr = np->base_addr;
1332
1333 if (debug > 2)
1334 printk(KERN_DEBUG "%s: Abnormal event, %8.8x.\n",
1335 dev->name, intr_status);
1336 if (intr_status == 0xffffffff)
1337 return;
1338 spin_lock(&np->lock);
1339 if (intr_status & TxFIFOUnderflow) {
1340 int new;
1341 /* Bump up the Tx threshold */
1342 #if 0
1343 /* This causes lots of dropped packets,
1344 * and under high load even tx_timeouts
1345 */
1346 new = np->csr6 + 0x4000;
1347 #else
1348 new = (np->csr6 >> 14)&0x7f;
1349 if (new < 64)
1350 new *= 2;
1351 else
1352 new = 127; /* load full packet before starting */
1353 new = (np->csr6 & ~(0x7F << 14)) | (new<<14);
1354 #endif
1355 printk(KERN_DEBUG "%s: Tx underflow, new csr6 %8.8x.\n",
1356 dev->name, new);
1357 update_csr6(dev, new);
1358 }
1359 if (intr_status & IntrRxDied) { /* Missed a Rx frame. */
1360 np->stats.rx_errors++;
1361 }
1362 if (intr_status & TimerInt) {
1363 /* Re-enable other interrupts. */
1364 if (netif_device_present(dev))
1365 iowrite32(0x1A0F5, ioaddr + IntrEnable);
1366 }
1367 np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff;
1368 iowrite32(0, ioaddr + RxStartDemand);
1369 spin_unlock(&np->lock);
1370 }
1371
1372 static struct net_device_stats *get_stats(struct net_device *dev)
1373 {
1374 struct netdev_private *np = netdev_priv(dev);
1375 void __iomem *ioaddr = np->base_addr;
1376
1377 /* The chip only need report frame silently dropped. */
1378 spin_lock_irq(&np->lock);
1379 if (netif_running(dev) && netif_device_present(dev))
1380 np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff;
1381 spin_unlock_irq(&np->lock);
1382
1383 return &np->stats;
1384 }
1385
1386
1387 static u32 __set_rx_mode(struct net_device *dev)
1388 {
1389 struct netdev_private *np = netdev_priv(dev);
1390 void __iomem *ioaddr = np->base_addr;
1391 u32 mc_filter[2]; /* Multicast hash filter */
1392 u32 rx_mode;
1393
1394 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1395 /* Unconditionally log net taps. */
1396 printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
1397 memset(mc_filter, 0xff, sizeof(mc_filter));
1398 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptAllPhys
1399 | AcceptMyPhys;
1400 } else if ((dev->mc_count > multicast_filter_limit)
1401 || (dev->flags & IFF_ALLMULTI)) {
1402 /* Too many to match, or accept all multicasts. */
1403 memset(mc_filter, 0xff, sizeof(mc_filter));
1404 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
1405 } else {
1406 struct dev_mc_list *mclist;
1407 int i;
1408 memset(mc_filter, 0, sizeof(mc_filter));
1409 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
1410 i++, mclist = mclist->next) {
1411 int filterbit = (ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26) ^ 0x3F;
1412 filterbit &= 0x3f;
1413 mc_filter[filterbit >> 5] |= 1 << (filterbit & 31);
1414 }
1415 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
1416 }
1417 iowrite32(mc_filter[0], ioaddr + MulticastFilter0);
1418 iowrite32(mc_filter[1], ioaddr + MulticastFilter1);
1419 return rx_mode;
1420 }
1421
1422 static void set_rx_mode(struct net_device *dev)
1423 {
1424 struct netdev_private *np = netdev_priv(dev);
1425 u32 rx_mode = __set_rx_mode(dev);
1426 spin_lock_irq(&np->lock);
1427 update_csr6(dev, (np->csr6 & ~0x00F8) | rx_mode);
1428 spin_unlock_irq(&np->lock);
1429 }
1430
1431 static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1432 {
1433 struct netdev_private *np = netdev_priv(dev);
1434
1435 strcpy (info->driver, DRV_NAME);
1436 strcpy (info->version, DRV_VERSION);
1437 strcpy (info->bus_info, pci_name(np->pci_dev));
1438 }
1439
1440 static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1441 {
1442 struct netdev_private *np = netdev_priv(dev);
1443 int rc;
1444
1445 spin_lock_irq(&np->lock);
1446 rc = mii_ethtool_gset(&np->mii_if, cmd);
1447 spin_unlock_irq(&np->lock);
1448
1449 return rc;
1450 }
1451
1452 static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1453 {
1454 struct netdev_private *np = netdev_priv(dev);
1455 int rc;
1456
1457 spin_lock_irq(&np->lock);
1458 rc = mii_ethtool_sset(&np->mii_if, cmd);
1459 spin_unlock_irq(&np->lock);
1460
1461 return rc;
1462 }
1463
1464 static int netdev_nway_reset(struct net_device *dev)
1465 {
1466 struct netdev_private *np = netdev_priv(dev);
1467 return mii_nway_restart(&np->mii_if);
1468 }
1469
1470 static u32 netdev_get_link(struct net_device *dev)
1471 {
1472 struct netdev_private *np = netdev_priv(dev);
1473 return mii_link_ok(&np->mii_if);
1474 }
1475
1476 static u32 netdev_get_msglevel(struct net_device *dev)
1477 {
1478 return debug;
1479 }
1480
1481 static void netdev_set_msglevel(struct net_device *dev, u32 value)
1482 {
1483 debug = value;
1484 }
1485
1486 static struct ethtool_ops netdev_ethtool_ops = {
1487 .get_drvinfo = netdev_get_drvinfo,
1488 .get_settings = netdev_get_settings,
1489 .set_settings = netdev_set_settings,
1490 .nway_reset = netdev_nway_reset,
1491 .get_link = netdev_get_link,
1492 .get_msglevel = netdev_get_msglevel,
1493 .set_msglevel = netdev_set_msglevel,
1494 .get_sg = ethtool_op_get_sg,
1495 .get_tx_csum = ethtool_op_get_tx_csum,
1496 };
1497
1498 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1499 {
1500 struct mii_ioctl_data *data = if_mii(rq);
1501 struct netdev_private *np = netdev_priv(dev);
1502
1503 switch(cmd) {
1504 case SIOCGMIIPHY: /* Get address of MII PHY in use. */
1505 data->phy_id = ((struct netdev_private *)netdev_priv(dev))->phys[0] & 0x1f;
1506 /* Fall Through */
1507
1508 case SIOCGMIIREG: /* Read MII PHY register. */
1509 spin_lock_irq(&np->lock);
1510 data->val_out = mdio_read(dev, data->phy_id & 0x1f, data->reg_num & 0x1f);
1511 spin_unlock_irq(&np->lock);
1512 return 0;
1513
1514 case SIOCSMIIREG: /* Write MII PHY register. */
1515 if (!capable(CAP_NET_ADMIN))
1516 return -EPERM;
1517 spin_lock_irq(&np->lock);
1518 mdio_write(dev, data->phy_id & 0x1f, data->reg_num & 0x1f, data->val_in);
1519 spin_unlock_irq(&np->lock);
1520 return 0;
1521 default:
1522 return -EOPNOTSUPP;
1523 }
1524 }
1525
1526 static int netdev_close(struct net_device *dev)
1527 {
1528 struct netdev_private *np = netdev_priv(dev);
1529 void __iomem *ioaddr = np->base_addr;
1530
1531 netif_stop_queue(dev);
1532
1533 if (debug > 1) {
1534 printk(KERN_DEBUG "%s: Shutting down ethercard, status was %8.8x "
1535 "Config %8.8x.\n", dev->name, ioread32(ioaddr + IntrStatus),
1536 ioread32(ioaddr + NetworkConfig));
1537 printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
1538 dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx);
1539 }
1540
1541 /* Stop the chip's Tx and Rx processes. */
1542 spin_lock_irq(&np->lock);
1543 netif_device_detach(dev);
1544 update_csr6(dev, 0);
1545 iowrite32(0x0000, ioaddr + IntrEnable);
1546 spin_unlock_irq(&np->lock);
1547
1548 free_irq(dev->irq, dev);
1549 wmb();
1550 netif_device_attach(dev);
1551
1552 if (ioread32(ioaddr + NetworkConfig) != 0xffffffff)
1553 np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff;
1554
1555 #ifdef __i386__
1556 if (debug > 2) {
1557 int i;
1558
1559 printk(KERN_DEBUG" Tx ring at %8.8x:\n",
1560 (int)np->tx_ring);
1561 for (i = 0; i < TX_RING_SIZE; i++)
1562 printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x.\n",
1563 i, np->tx_ring[i].length,
1564 np->tx_ring[i].status, np->tx_ring[i].buffer1);
1565 printk("\n"KERN_DEBUG " Rx ring %8.8x:\n",
1566 (int)np->rx_ring);
1567 for (i = 0; i < RX_RING_SIZE; i++) {
1568 printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x\n",
1569 i, np->rx_ring[i].length,
1570 np->rx_ring[i].status, np->rx_ring[i].buffer1);
1571 }
1572 }
1573 #endif /* __i386__ debugging only */
1574
1575 del_timer_sync(&np->timer);
1576
1577 free_rxtx_rings(np);
1578 free_ringdesc(np);
1579
1580 return 0;
1581 }
1582
1583 static void __devexit w840_remove1 (struct pci_dev *pdev)
1584 {
1585 struct net_device *dev = pci_get_drvdata(pdev);
1586
1587 if (dev) {
1588 struct netdev_private *np = netdev_priv(dev);
1589 unregister_netdev(dev);
1590 pci_release_regions(pdev);
1591 pci_iounmap(pdev, np->base_addr);
1592 free_netdev(dev);
1593 }
1594
1595 pci_set_drvdata(pdev, NULL);
1596 }
1597
1598 #ifdef CONFIG_PM
1599
1600 /*
1601 * suspend/resume synchronization:
1602 * - open, close, do_ioctl:
1603 * rtnl_lock, & netif_device_detach after the rtnl_unlock.
1604 * - get_stats:
1605 * spin_lock_irq(np->lock), doesn't touch hw if not present
1606 * - hard_start_xmit:
1607 * netif_stop_queue + spin_unlock_wait(&dev->xmit_lock);
1608 * - tx_timeout:
1609 * netif_device_detach + spin_unlock_wait(&dev->xmit_lock);
1610 * - set_multicast_list
1611 * netif_device_detach + spin_unlock_wait(&dev->xmit_lock);
1612 * - interrupt handler
1613 * doesn't touch hw if not present, synchronize_irq waits for
1614 * running instances of the interrupt handler.
1615 *
1616 * Disabling hw requires clearing csr6 & IntrEnable.
1617 * update_csr6 & all function that write IntrEnable check netif_device_present
1618 * before settings any bits.
1619 *
1620 * Detach must occur under spin_unlock_irq(), interrupts from a detached
1621 * device would cause an irq storm.
1622 */
1623 static int w840_suspend (struct pci_dev *pdev, u32 state)
1624 {
1625 struct net_device *dev = pci_get_drvdata (pdev);
1626 struct netdev_private *np = netdev_priv(dev);
1627 void __iomem *ioaddr = np->base_addr;
1628
1629 rtnl_lock();
1630 if (netif_running (dev)) {
1631 del_timer_sync(&np->timer);
1632
1633 spin_lock_irq(&np->lock);
1634 netif_device_detach(dev);
1635 update_csr6(dev, 0);
1636 iowrite32(0, ioaddr + IntrEnable);
1637 netif_stop_queue(dev);
1638 spin_unlock_irq(&np->lock);
1639
1640 spin_unlock_wait(&dev->xmit_lock);
1641 synchronize_irq(dev->irq);
1642
1643 np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff;
1644
1645 /* no more hardware accesses behind this line. */
1646
1647 if (np->csr6) BUG();
1648 if (ioread32(ioaddr + IntrEnable)) BUG();
1649
1650 /* pci_power_off(pdev, -1); */
1651
1652 free_rxtx_rings(np);
1653 } else {
1654 netif_device_detach(dev);
1655 }
1656 rtnl_unlock();
1657 return 0;
1658 }
1659
1660 static int w840_resume (struct pci_dev *pdev)
1661 {
1662 struct net_device *dev = pci_get_drvdata (pdev);
1663 struct netdev_private *np = netdev_priv(dev);
1664
1665 rtnl_lock();
1666 if (netif_device_present(dev))
1667 goto out; /* device not suspended */
1668 if (netif_running(dev)) {
1669 pci_enable_device(pdev);
1670 /* pci_power_on(pdev); */
1671
1672 spin_lock_irq(&np->lock);
1673 iowrite32(1, np->base_addr+PCIBusCfg);
1674 ioread32(np->base_addr+PCIBusCfg);
1675 udelay(1);
1676 netif_device_attach(dev);
1677 init_rxtx_rings(dev);
1678 init_registers(dev);
1679 spin_unlock_irq(&np->lock);
1680
1681 netif_wake_queue(dev);
1682
1683 mod_timer(&np->timer, jiffies + 1*HZ);
1684 } else {
1685 netif_device_attach(dev);
1686 }
1687 out:
1688 rtnl_unlock();
1689 return 0;
1690 }
1691 #endif
1692
1693 static struct pci_driver w840_driver = {
1694 .name = DRV_NAME,
1695 .id_table = w840_pci_tbl,
1696 .probe = w840_probe1,
1697 .remove = __devexit_p(w840_remove1),
1698 #ifdef CONFIG_PM
1699 .suspend = w840_suspend,
1700 .resume = w840_resume,
1701 #endif
1702 };
1703
1704 static int __init w840_init(void)
1705 {
1706 printk(version);
1707 return pci_module_init(&w840_driver);
1708 }
1709
1710 static void __exit w840_exit(void)
1711 {
1712 pci_unregister_driver(&w840_driver);
1713 }
1714
1715 module_init(w840_init);
1716 module_exit(w840_exit);