]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/net/pcnet32.c
[PATCH] pcnet32: Use PCI_DEVICE macro
[mirror_ubuntu-artful-kernel.git] / drivers / net / pcnet32.c
1 /* pcnet32.c: An AMD PCnet32 ethernet driver for linux. */
2 /*
3 * Copyright 1996-1999 Thomas Bogendoerfer
4 *
5 * Derived from the lance driver written 1993,1994,1995 by Donald Becker.
6 *
7 * Copyright 1993 United States Government as represented by the
8 * Director, National Security Agency.
9 *
10 * This software may be used and distributed according to the terms
11 * of the GNU General Public License, incorporated herein by reference.
12 *
13 * This driver is for PCnet32 and PCnetPCI based ethercards
14 */
15 /**************************************************************************
16 * 23 Oct, 2000.
17 * Fixed a few bugs, related to running the controller in 32bit mode.
18 *
19 * Carsten Langgaard, carstenl@mips.com
20 * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
21 *
22 *************************************************************************/
23
24 #define DRV_NAME "pcnet32"
25 #define DRV_VERSION "1.32"
26 #define DRV_RELDATE "18.Mar.2006"
27 #define PFX DRV_NAME ": "
28
29 static const char *const version =
30 DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " tsbogend@alpha.franken.de\n";
31
32 #include <linux/module.h>
33 #include <linux/kernel.h>
34 #include <linux/string.h>
35 #include <linux/errno.h>
36 #include <linux/ioport.h>
37 #include <linux/slab.h>
38 #include <linux/interrupt.h>
39 #include <linux/pci.h>
40 #include <linux/delay.h>
41 #include <linux/init.h>
42 #include <linux/ethtool.h>
43 #include <linux/mii.h>
44 #include <linux/crc32.h>
45 #include <linux/netdevice.h>
46 #include <linux/etherdevice.h>
47 #include <linux/skbuff.h>
48 #include <linux/spinlock.h>
49 #include <linux/moduleparam.h>
50 #include <linux/bitops.h>
51
52 #include <asm/dma.h>
53 #include <asm/io.h>
54 #include <asm/uaccess.h>
55 #include <asm/irq.h>
56
57 /*
58 * PCI device identifiers for "new style" Linux PCI Device Drivers
59 */
60 static struct pci_device_id pcnet32_pci_tbl[] = {
61 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE_HOME), },
62 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE), },
63
64 /*
65 * Adapters that were sold with IBM's RS/6000 or pSeries hardware have
66 * the incorrect vendor id.
67 */
68 { PCI_DEVICE(PCI_VENDOR_ID_TRIDENT, PCI_DEVICE_ID_AMD_LANCE),
69 .class = (PCI_CLASS_NETWORK_ETHERNET << 8), .class_mask = 0xffff00, },
70
71 { } /* terminate list */
72 };
73
74 MODULE_DEVICE_TABLE(pci, pcnet32_pci_tbl);
75
76 static int cards_found;
77
78 /*
79 * VLB I/O addresses
80 */
81 static unsigned int pcnet32_portlist[] __initdata =
82 { 0x300, 0x320, 0x340, 0x360, 0 };
83
84 static int pcnet32_debug = 0;
85 static int tx_start = 1; /* Mapping -- 0:20, 1:64, 2:128, 3:~220 (depends on chip vers) */
86 static int pcnet32vlb; /* check for VLB cards ? */
87
88 static struct net_device *pcnet32_dev;
89
90 static int max_interrupt_work = 2;
91 static int rx_copybreak = 200;
92
93 #define PCNET32_PORT_AUI 0x00
94 #define PCNET32_PORT_10BT 0x01
95 #define PCNET32_PORT_GPSI 0x02
96 #define PCNET32_PORT_MII 0x03
97
98 #define PCNET32_PORT_PORTSEL 0x03
99 #define PCNET32_PORT_ASEL 0x04
100 #define PCNET32_PORT_100 0x40
101 #define PCNET32_PORT_FD 0x80
102
103 #define PCNET32_DMA_MASK 0xffffffff
104
105 #define PCNET32_WATCHDOG_TIMEOUT (jiffies + (2 * HZ))
106 #define PCNET32_BLINK_TIMEOUT (jiffies + (HZ/4))
107
108 /*
109 * table to translate option values from tulip
110 * to internal options
111 */
112 static const unsigned char options_mapping[] = {
113 PCNET32_PORT_ASEL, /* 0 Auto-select */
114 PCNET32_PORT_AUI, /* 1 BNC/AUI */
115 PCNET32_PORT_AUI, /* 2 AUI/BNC */
116 PCNET32_PORT_ASEL, /* 3 not supported */
117 PCNET32_PORT_10BT | PCNET32_PORT_FD, /* 4 10baseT-FD */
118 PCNET32_PORT_ASEL, /* 5 not supported */
119 PCNET32_PORT_ASEL, /* 6 not supported */
120 PCNET32_PORT_ASEL, /* 7 not supported */
121 PCNET32_PORT_ASEL, /* 8 not supported */
122 PCNET32_PORT_MII, /* 9 MII 10baseT */
123 PCNET32_PORT_MII | PCNET32_PORT_FD, /* 10 MII 10baseT-FD */
124 PCNET32_PORT_MII, /* 11 MII (autosel) */
125 PCNET32_PORT_10BT, /* 12 10BaseT */
126 PCNET32_PORT_MII | PCNET32_PORT_100, /* 13 MII 100BaseTx */
127 /* 14 MII 100BaseTx-FD */
128 PCNET32_PORT_MII | PCNET32_PORT_100 | PCNET32_PORT_FD,
129 PCNET32_PORT_ASEL /* 15 not supported */
130 };
131
132 static const char pcnet32_gstrings_test[][ETH_GSTRING_LEN] = {
133 "Loopback test (offline)"
134 };
135
136 #define PCNET32_TEST_LEN (sizeof(pcnet32_gstrings_test) / ETH_GSTRING_LEN)
137
138 #define PCNET32_NUM_REGS 136
139
140 #define MAX_UNITS 8 /* More are supported, limit only on options */
141 static int options[MAX_UNITS];
142 static int full_duplex[MAX_UNITS];
143 static int homepna[MAX_UNITS];
144
145 /*
146 * Theory of Operation
147 *
148 * This driver uses the same software structure as the normal lance
149 * driver. So look for a verbose description in lance.c. The differences
150 * to the normal lance driver is the use of the 32bit mode of PCnet32
151 * and PCnetPCI chips. Because these chips are 32bit chips, there is no
152 * 16MB limitation and we don't need bounce buffers.
153 */
154
155 /*
156 * Set the number of Tx and Rx buffers, using Log_2(# buffers).
157 * Reasonable default values are 4 Tx buffers, and 16 Rx buffers.
158 * That translates to 2 (4 == 2^^2) and 4 (16 == 2^^4).
159 */
160 #ifndef PCNET32_LOG_TX_BUFFERS
161 #define PCNET32_LOG_TX_BUFFERS 4
162 #define PCNET32_LOG_RX_BUFFERS 5
163 #define PCNET32_LOG_MAX_TX_BUFFERS 9 /* 2^9 == 512 */
164 #define PCNET32_LOG_MAX_RX_BUFFERS 9
165 #endif
166
167 #define TX_RING_SIZE (1 << (PCNET32_LOG_TX_BUFFERS))
168 #define TX_MAX_RING_SIZE (1 << (PCNET32_LOG_MAX_TX_BUFFERS))
169
170 #define RX_RING_SIZE (1 << (PCNET32_LOG_RX_BUFFERS))
171 #define RX_MAX_RING_SIZE (1 << (PCNET32_LOG_MAX_RX_BUFFERS))
172
173 #define PKT_BUF_SZ 1544
174
175 /* Offsets from base I/O address. */
176 #define PCNET32_WIO_RDP 0x10
177 #define PCNET32_WIO_RAP 0x12
178 #define PCNET32_WIO_RESET 0x14
179 #define PCNET32_WIO_BDP 0x16
180
181 #define PCNET32_DWIO_RDP 0x10
182 #define PCNET32_DWIO_RAP 0x14
183 #define PCNET32_DWIO_RESET 0x18
184 #define PCNET32_DWIO_BDP 0x1C
185
186 #define PCNET32_TOTAL_SIZE 0x20
187
188 /* The PCNET32 Rx and Tx ring descriptors. */
189 struct pcnet32_rx_head {
190 u32 base;
191 s16 buf_length;
192 s16 status;
193 u32 msg_length;
194 u32 reserved;
195 };
196
197 struct pcnet32_tx_head {
198 u32 base;
199 s16 length;
200 s16 status;
201 u32 misc;
202 u32 reserved;
203 };
204
205 /* The PCNET32 32-Bit initialization block, described in databook. */
206 struct pcnet32_init_block {
207 u16 mode;
208 u16 tlen_rlen;
209 u8 phys_addr[6];
210 u16 reserved;
211 u32 filter[2];
212 /* Receive and transmit ring base, along with extra bits. */
213 u32 rx_ring;
214 u32 tx_ring;
215 };
216
217 /* PCnet32 access functions */
218 struct pcnet32_access {
219 u16 (*read_csr) (unsigned long, int);
220 void (*write_csr) (unsigned long, int, u16);
221 u16 (*read_bcr) (unsigned long, int);
222 void (*write_bcr) (unsigned long, int, u16);
223 u16 (*read_rap) (unsigned long);
224 void (*write_rap) (unsigned long, u16);
225 void (*reset) (unsigned long);
226 };
227
228 /*
229 * The first field of pcnet32_private is read by the ethernet device
230 * so the structure should be allocated using pci_alloc_consistent().
231 */
232 struct pcnet32_private {
233 struct pcnet32_init_block init_block;
234 /* The Tx and Rx ring entries must be aligned on 16-byte boundaries in 32bit mode. */
235 struct pcnet32_rx_head *rx_ring;
236 struct pcnet32_tx_head *tx_ring;
237 dma_addr_t dma_addr;/* DMA address of beginning of this
238 object, returned by pci_alloc_consistent */
239 struct pci_dev *pci_dev;
240 const char *name;
241 /* The saved address of a sent-in-place packet/buffer, for skfree(). */
242 struct sk_buff **tx_skbuff;
243 struct sk_buff **rx_skbuff;
244 dma_addr_t *tx_dma_addr;
245 dma_addr_t *rx_dma_addr;
246 struct pcnet32_access a;
247 spinlock_t lock; /* Guard lock */
248 unsigned int cur_rx, cur_tx; /* The next free ring entry */
249 unsigned int rx_ring_size; /* current rx ring size */
250 unsigned int tx_ring_size; /* current tx ring size */
251 unsigned int rx_mod_mask; /* rx ring modular mask */
252 unsigned int tx_mod_mask; /* tx ring modular mask */
253 unsigned short rx_len_bits;
254 unsigned short tx_len_bits;
255 dma_addr_t rx_ring_dma_addr;
256 dma_addr_t tx_ring_dma_addr;
257 unsigned int dirty_rx, /* ring entries to be freed. */
258 dirty_tx;
259
260 struct net_device_stats stats;
261 char tx_full;
262 char phycount; /* number of phys found */
263 int options;
264 unsigned int shared_irq:1, /* shared irq possible */
265 dxsuflo:1, /* disable transmit stop on uflo */
266 mii:1; /* mii port available */
267 struct net_device *next;
268 struct mii_if_info mii_if;
269 struct timer_list watchdog_timer;
270 struct timer_list blink_timer;
271 u32 msg_enable; /* debug message level */
272
273 /* each bit indicates an available PHY */
274 u32 phymask;
275 };
276
277 static int pcnet32_probe_pci(struct pci_dev *, const struct pci_device_id *);
278 static int pcnet32_probe1(unsigned long, int, struct pci_dev *);
279 static int pcnet32_open(struct net_device *);
280 static int pcnet32_init_ring(struct net_device *);
281 static int pcnet32_start_xmit(struct sk_buff *, struct net_device *);
282 static int pcnet32_rx(struct net_device *);
283 static void pcnet32_tx_timeout(struct net_device *dev);
284 static irqreturn_t pcnet32_interrupt(int, void *, struct pt_regs *);
285 static int pcnet32_close(struct net_device *);
286 static struct net_device_stats *pcnet32_get_stats(struct net_device *);
287 static void pcnet32_load_multicast(struct net_device *dev);
288 static void pcnet32_set_multicast_list(struct net_device *);
289 static int pcnet32_ioctl(struct net_device *, struct ifreq *, int);
290 static void pcnet32_watchdog(struct net_device *);
291 static int mdio_read(struct net_device *dev, int phy_id, int reg_num);
292 static void mdio_write(struct net_device *dev, int phy_id, int reg_num,
293 int val);
294 static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits);
295 static void pcnet32_ethtool_test(struct net_device *dev,
296 struct ethtool_test *eth_test, u64 * data);
297 static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1);
298 static int pcnet32_phys_id(struct net_device *dev, u32 data);
299 static void pcnet32_led_blink_callback(struct net_device *dev);
300 static int pcnet32_get_regs_len(struct net_device *dev);
301 static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,
302 void *ptr);
303 static void pcnet32_purge_tx_ring(struct net_device *dev);
304 static int pcnet32_alloc_ring(struct net_device *dev, char *name);
305 static void pcnet32_free_ring(struct net_device *dev);
306 static void pcnet32_check_media(struct net_device *dev, int verbose);
307
308 static u16 pcnet32_wio_read_csr(unsigned long addr, int index)
309 {
310 outw(index, addr + PCNET32_WIO_RAP);
311 return inw(addr + PCNET32_WIO_RDP);
312 }
313
314 static void pcnet32_wio_write_csr(unsigned long addr, int index, u16 val)
315 {
316 outw(index, addr + PCNET32_WIO_RAP);
317 outw(val, addr + PCNET32_WIO_RDP);
318 }
319
320 static u16 pcnet32_wio_read_bcr(unsigned long addr, int index)
321 {
322 outw(index, addr + PCNET32_WIO_RAP);
323 return inw(addr + PCNET32_WIO_BDP);
324 }
325
326 static void pcnet32_wio_write_bcr(unsigned long addr, int index, u16 val)
327 {
328 outw(index, addr + PCNET32_WIO_RAP);
329 outw(val, addr + PCNET32_WIO_BDP);
330 }
331
332 static u16 pcnet32_wio_read_rap(unsigned long addr)
333 {
334 return inw(addr + PCNET32_WIO_RAP);
335 }
336
337 static void pcnet32_wio_write_rap(unsigned long addr, u16 val)
338 {
339 outw(val, addr + PCNET32_WIO_RAP);
340 }
341
342 static void pcnet32_wio_reset(unsigned long addr)
343 {
344 inw(addr + PCNET32_WIO_RESET);
345 }
346
347 static int pcnet32_wio_check(unsigned long addr)
348 {
349 outw(88, addr + PCNET32_WIO_RAP);
350 return (inw(addr + PCNET32_WIO_RAP) == 88);
351 }
352
353 static struct pcnet32_access pcnet32_wio = {
354 .read_csr = pcnet32_wio_read_csr,
355 .write_csr = pcnet32_wio_write_csr,
356 .read_bcr = pcnet32_wio_read_bcr,
357 .write_bcr = pcnet32_wio_write_bcr,
358 .read_rap = pcnet32_wio_read_rap,
359 .write_rap = pcnet32_wio_write_rap,
360 .reset = pcnet32_wio_reset
361 };
362
363 static u16 pcnet32_dwio_read_csr(unsigned long addr, int index)
364 {
365 outl(index, addr + PCNET32_DWIO_RAP);
366 return (inl(addr + PCNET32_DWIO_RDP) & 0xffff);
367 }
368
369 static void pcnet32_dwio_write_csr(unsigned long addr, int index, u16 val)
370 {
371 outl(index, addr + PCNET32_DWIO_RAP);
372 outl(val, addr + PCNET32_DWIO_RDP);
373 }
374
375 static u16 pcnet32_dwio_read_bcr(unsigned long addr, int index)
376 {
377 outl(index, addr + PCNET32_DWIO_RAP);
378 return (inl(addr + PCNET32_DWIO_BDP) & 0xffff);
379 }
380
381 static void pcnet32_dwio_write_bcr(unsigned long addr, int index, u16 val)
382 {
383 outl(index, addr + PCNET32_DWIO_RAP);
384 outl(val, addr + PCNET32_DWIO_BDP);
385 }
386
387 static u16 pcnet32_dwio_read_rap(unsigned long addr)
388 {
389 return (inl(addr + PCNET32_DWIO_RAP) & 0xffff);
390 }
391
392 static void pcnet32_dwio_write_rap(unsigned long addr, u16 val)
393 {
394 outl(val, addr + PCNET32_DWIO_RAP);
395 }
396
397 static void pcnet32_dwio_reset(unsigned long addr)
398 {
399 inl(addr + PCNET32_DWIO_RESET);
400 }
401
402 static int pcnet32_dwio_check(unsigned long addr)
403 {
404 outl(88, addr + PCNET32_DWIO_RAP);
405 return ((inl(addr + PCNET32_DWIO_RAP) & 0xffff) == 88);
406 }
407
408 static struct pcnet32_access pcnet32_dwio = {
409 .read_csr = pcnet32_dwio_read_csr,
410 .write_csr = pcnet32_dwio_write_csr,
411 .read_bcr = pcnet32_dwio_read_bcr,
412 .write_bcr = pcnet32_dwio_write_bcr,
413 .read_rap = pcnet32_dwio_read_rap,
414 .write_rap = pcnet32_dwio_write_rap,
415 .reset = pcnet32_dwio_reset
416 };
417
418 #ifdef CONFIG_NET_POLL_CONTROLLER
419 static void pcnet32_poll_controller(struct net_device *dev)
420 {
421 disable_irq(dev->irq);
422 pcnet32_interrupt(0, dev, NULL);
423 enable_irq(dev->irq);
424 }
425 #endif
426
427 static int pcnet32_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
428 {
429 struct pcnet32_private *lp = dev->priv;
430 unsigned long flags;
431 int r = -EOPNOTSUPP;
432
433 if (lp->mii) {
434 spin_lock_irqsave(&lp->lock, flags);
435 mii_ethtool_gset(&lp->mii_if, cmd);
436 spin_unlock_irqrestore(&lp->lock, flags);
437 r = 0;
438 }
439 return r;
440 }
441
442 static int pcnet32_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
443 {
444 struct pcnet32_private *lp = dev->priv;
445 unsigned long flags;
446 int r = -EOPNOTSUPP;
447
448 if (lp->mii) {
449 spin_lock_irqsave(&lp->lock, flags);
450 r = mii_ethtool_sset(&lp->mii_if, cmd);
451 spin_unlock_irqrestore(&lp->lock, flags);
452 }
453 return r;
454 }
455
456 static void pcnet32_get_drvinfo(struct net_device *dev,
457 struct ethtool_drvinfo *info)
458 {
459 struct pcnet32_private *lp = dev->priv;
460
461 strcpy(info->driver, DRV_NAME);
462 strcpy(info->version, DRV_VERSION);
463 if (lp->pci_dev)
464 strcpy(info->bus_info, pci_name(lp->pci_dev));
465 else
466 sprintf(info->bus_info, "VLB 0x%lx", dev->base_addr);
467 }
468
469 static u32 pcnet32_get_link(struct net_device *dev)
470 {
471 struct pcnet32_private *lp = dev->priv;
472 unsigned long flags;
473 int r;
474
475 spin_lock_irqsave(&lp->lock, flags);
476 if (lp->mii) {
477 r = mii_link_ok(&lp->mii_if);
478 } else {
479 ulong ioaddr = dev->base_addr; /* card base I/O address */
480 r = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
481 }
482 spin_unlock_irqrestore(&lp->lock, flags);
483
484 return r;
485 }
486
487 static u32 pcnet32_get_msglevel(struct net_device *dev)
488 {
489 struct pcnet32_private *lp = dev->priv;
490 return lp->msg_enable;
491 }
492
493 static void pcnet32_set_msglevel(struct net_device *dev, u32 value)
494 {
495 struct pcnet32_private *lp = dev->priv;
496 lp->msg_enable = value;
497 }
498
499 static int pcnet32_nway_reset(struct net_device *dev)
500 {
501 struct pcnet32_private *lp = dev->priv;
502 unsigned long flags;
503 int r = -EOPNOTSUPP;
504
505 if (lp->mii) {
506 spin_lock_irqsave(&lp->lock, flags);
507 r = mii_nway_restart(&lp->mii_if);
508 spin_unlock_irqrestore(&lp->lock, flags);
509 }
510 return r;
511 }
512
513 static void pcnet32_get_ringparam(struct net_device *dev,
514 struct ethtool_ringparam *ering)
515 {
516 struct pcnet32_private *lp = dev->priv;
517
518 ering->tx_max_pending = TX_MAX_RING_SIZE - 1;
519 ering->tx_pending = lp->tx_ring_size - 1;
520 ering->rx_max_pending = RX_MAX_RING_SIZE - 1;
521 ering->rx_pending = lp->rx_ring_size - 1;
522 }
523
524 static int pcnet32_set_ringparam(struct net_device *dev,
525 struct ethtool_ringparam *ering)
526 {
527 struct pcnet32_private *lp = dev->priv;
528 unsigned long flags;
529 int i;
530
531 if (ering->rx_mini_pending || ering->rx_jumbo_pending)
532 return -EINVAL;
533
534 if (netif_running(dev))
535 pcnet32_close(dev);
536
537 spin_lock_irqsave(&lp->lock, flags);
538 pcnet32_free_ring(dev);
539 lp->tx_ring_size =
540 min(ering->tx_pending, (unsigned int)TX_MAX_RING_SIZE);
541 lp->rx_ring_size =
542 min(ering->rx_pending, (unsigned int)RX_MAX_RING_SIZE);
543
544 /* set the minimum ring size to 4, to allow the loopback test to work
545 * unchanged.
546 */
547 for (i = 2; i <= PCNET32_LOG_MAX_TX_BUFFERS; i++) {
548 if (lp->tx_ring_size <= (1 << i))
549 break;
550 }
551 lp->tx_ring_size = (1 << i);
552 lp->tx_mod_mask = lp->tx_ring_size - 1;
553 lp->tx_len_bits = (i << 12);
554
555 for (i = 2; i <= PCNET32_LOG_MAX_RX_BUFFERS; i++) {
556 if (lp->rx_ring_size <= (1 << i))
557 break;
558 }
559 lp->rx_ring_size = (1 << i);
560 lp->rx_mod_mask = lp->rx_ring_size - 1;
561 lp->rx_len_bits = (i << 4);
562
563 if (pcnet32_alloc_ring(dev, dev->name)) {
564 pcnet32_free_ring(dev);
565 spin_unlock_irqrestore(&lp->lock, flags);
566 return -ENOMEM;
567 }
568
569 spin_unlock_irqrestore(&lp->lock, flags);
570
571 if (pcnet32_debug & NETIF_MSG_DRV)
572 printk(KERN_INFO PFX
573 "%s: Ring Param Settings: RX: %d, TX: %d\n", dev->name,
574 lp->rx_ring_size, lp->tx_ring_size);
575
576 if (netif_running(dev))
577 pcnet32_open(dev);
578
579 return 0;
580 }
581
582 static void pcnet32_get_strings(struct net_device *dev, u32 stringset,
583 u8 * data)
584 {
585 memcpy(data, pcnet32_gstrings_test, sizeof(pcnet32_gstrings_test));
586 }
587
588 static int pcnet32_self_test_count(struct net_device *dev)
589 {
590 return PCNET32_TEST_LEN;
591 }
592
593 static void pcnet32_ethtool_test(struct net_device *dev,
594 struct ethtool_test *test, u64 * data)
595 {
596 struct pcnet32_private *lp = dev->priv;
597 int rc;
598
599 if (test->flags == ETH_TEST_FL_OFFLINE) {
600 rc = pcnet32_loopback_test(dev, data);
601 if (rc) {
602 if (netif_msg_hw(lp))
603 printk(KERN_DEBUG "%s: Loopback test failed.\n",
604 dev->name);
605 test->flags |= ETH_TEST_FL_FAILED;
606 } else if (netif_msg_hw(lp))
607 printk(KERN_DEBUG "%s: Loopback test passed.\n",
608 dev->name);
609 } else if (netif_msg_hw(lp))
610 printk(KERN_DEBUG
611 "%s: No tests to run (specify 'Offline' on ethtool).",
612 dev->name);
613 } /* end pcnet32_ethtool_test */
614
615 static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
616 {
617 struct pcnet32_private *lp = dev->priv;
618 struct pcnet32_access *a = &lp->a; /* access to registers */
619 ulong ioaddr = dev->base_addr; /* card base I/O address */
620 struct sk_buff *skb; /* sk buff */
621 int x, i; /* counters */
622 int numbuffs = 4; /* number of TX/RX buffers and descs */
623 u16 status = 0x8300; /* TX ring status */
624 u16 teststatus; /* test of ring status */
625 int rc; /* return code */
626 int size; /* size of packets */
627 unsigned char *packet; /* source packet data */
628 static const int data_len = 60; /* length of source packets */
629 unsigned long flags;
630 unsigned long ticks;
631
632 *data1 = 1; /* status of test, default to fail */
633 rc = 1; /* default to fail */
634
635 if (netif_running(dev))
636 pcnet32_close(dev);
637
638 spin_lock_irqsave(&lp->lock, flags);
639
640 /* Reset the PCNET32 */
641 lp->a.reset(ioaddr);
642
643 /* switch pcnet32 to 32bit mode */
644 lp->a.write_bcr(ioaddr, 20, 2);
645
646 lp->init_block.mode =
647 le16_to_cpu((lp->options & PCNET32_PORT_PORTSEL) << 7);
648 lp->init_block.filter[0] = 0;
649 lp->init_block.filter[1] = 0;
650
651 /* purge & init rings but don't actually restart */
652 pcnet32_restart(dev, 0x0000);
653
654 lp->a.write_csr(ioaddr, 0, 0x0004); /* Set STOP bit */
655
656 /* Initialize Transmit buffers. */
657 size = data_len + 15;
658 for (x = 0; x < numbuffs; x++) {
659 if (!(skb = dev_alloc_skb(size))) {
660 if (netif_msg_hw(lp))
661 printk(KERN_DEBUG
662 "%s: Cannot allocate skb at line: %d!\n",
663 dev->name, __LINE__);
664 goto clean_up;
665 } else {
666 packet = skb->data;
667 skb_put(skb, size); /* create space for data */
668 lp->tx_skbuff[x] = skb;
669 lp->tx_ring[x].length = le16_to_cpu(-skb->len);
670 lp->tx_ring[x].misc = 0;
671
672 /* put DA and SA into the skb */
673 for (i = 0; i < 6; i++)
674 *packet++ = dev->dev_addr[i];
675 for (i = 0; i < 6; i++)
676 *packet++ = dev->dev_addr[i];
677 /* type */
678 *packet++ = 0x08;
679 *packet++ = 0x06;
680 /* packet number */
681 *packet++ = x;
682 /* fill packet with data */
683 for (i = 0; i < data_len; i++)
684 *packet++ = i;
685
686 lp->tx_dma_addr[x] =
687 pci_map_single(lp->pci_dev, skb->data, skb->len,
688 PCI_DMA_TODEVICE);
689 lp->tx_ring[x].base =
690 (u32) le32_to_cpu(lp->tx_dma_addr[x]);
691 wmb(); /* Make sure owner changes after all others are visible */
692 lp->tx_ring[x].status = le16_to_cpu(status);
693 }
694 }
695
696 x = a->read_bcr(ioaddr, 32); /* set internal loopback in BSR32 */
697 x = x | 0x0002;
698 a->write_bcr(ioaddr, 32, x);
699
700 lp->a.write_csr(ioaddr, 15, 0x0044); /* set int loopback in CSR15 */
701
702 teststatus = le16_to_cpu(0x8000);
703 lp->a.write_csr(ioaddr, 0, 0x0002); /* Set STRT bit */
704
705 /* Check status of descriptors */
706 for (x = 0; x < numbuffs; x++) {
707 ticks = 0;
708 rmb();
709 while ((lp->rx_ring[x].status & teststatus) && (ticks < 200)) {
710 spin_unlock_irqrestore(&lp->lock, flags);
711 mdelay(1);
712 spin_lock_irqsave(&lp->lock, flags);
713 rmb();
714 ticks++;
715 }
716 if (ticks == 200) {
717 if (netif_msg_hw(lp))
718 printk("%s: Desc %d failed to reset!\n",
719 dev->name, x);
720 break;
721 }
722 }
723
724 lp->a.write_csr(ioaddr, 0, 0x0004); /* Set STOP bit */
725 wmb();
726 if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) {
727 printk(KERN_DEBUG "%s: RX loopback packets:\n", dev->name);
728
729 for (x = 0; x < numbuffs; x++) {
730 printk(KERN_DEBUG "%s: Packet %d:\n", dev->name, x);
731 skb = lp->rx_skbuff[x];
732 for (i = 0; i < size; i++) {
733 printk("%02x ", *(skb->data + i));
734 }
735 printk("\n");
736 }
737 }
738
739 x = 0;
740 rc = 0;
741 while (x < numbuffs && !rc) {
742 skb = lp->rx_skbuff[x];
743 packet = lp->tx_skbuff[x]->data;
744 for (i = 0; i < size; i++) {
745 if (*(skb->data + i) != packet[i]) {
746 if (netif_msg_hw(lp))
747 printk(KERN_DEBUG
748 "%s: Error in compare! %2x - %02x %02x\n",
749 dev->name, i, *(skb->data + i),
750 packet[i]);
751 rc = 1;
752 break;
753 }
754 }
755 x++;
756 }
757 if (!rc) {
758 *data1 = 0;
759 }
760
761 clean_up:
762 pcnet32_purge_tx_ring(dev);
763 x = a->read_csr(ioaddr, 15) & 0xFFFF;
764 a->write_csr(ioaddr, 15, (x & ~0x0044)); /* reset bits 6 and 2 */
765
766 x = a->read_bcr(ioaddr, 32); /* reset internal loopback */
767 x = x & ~0x0002;
768 a->write_bcr(ioaddr, 32, x);
769
770 spin_unlock_irqrestore(&lp->lock, flags);
771
772 if (netif_running(dev)) {
773 pcnet32_open(dev);
774 } else {
775 lp->a.write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
776 }
777
778 return (rc);
779 } /* end pcnet32_loopback_test */
780
781 static void pcnet32_led_blink_callback(struct net_device *dev)
782 {
783 struct pcnet32_private *lp = dev->priv;
784 struct pcnet32_access *a = &lp->a;
785 ulong ioaddr = dev->base_addr;
786 unsigned long flags;
787 int i;
788
789 spin_lock_irqsave(&lp->lock, flags);
790 for (i = 4; i < 8; i++) {
791 a->write_bcr(ioaddr, i, a->read_bcr(ioaddr, i) ^ 0x4000);
792 }
793 spin_unlock_irqrestore(&lp->lock, flags);
794
795 mod_timer(&lp->blink_timer, PCNET32_BLINK_TIMEOUT);
796 }
797
798 static int pcnet32_phys_id(struct net_device *dev, u32 data)
799 {
800 struct pcnet32_private *lp = dev->priv;
801 struct pcnet32_access *a = &lp->a;
802 ulong ioaddr = dev->base_addr;
803 unsigned long flags;
804 int i, regs[4];
805
806 if (!lp->blink_timer.function) {
807 init_timer(&lp->blink_timer);
808 lp->blink_timer.function = (void *)pcnet32_led_blink_callback;
809 lp->blink_timer.data = (unsigned long)dev;
810 }
811
812 /* Save the current value of the bcrs */
813 spin_lock_irqsave(&lp->lock, flags);
814 for (i = 4; i < 8; i++) {
815 regs[i - 4] = a->read_bcr(ioaddr, i);
816 }
817 spin_unlock_irqrestore(&lp->lock, flags);
818
819 mod_timer(&lp->blink_timer, jiffies);
820 set_current_state(TASK_INTERRUPTIBLE);
821
822 if ((!data) || (data > (u32) (MAX_SCHEDULE_TIMEOUT / HZ)))
823 data = (u32) (MAX_SCHEDULE_TIMEOUT / HZ);
824
825 msleep_interruptible(data * 1000);
826 del_timer_sync(&lp->blink_timer);
827
828 /* Restore the original value of the bcrs */
829 spin_lock_irqsave(&lp->lock, flags);
830 for (i = 4; i < 8; i++) {
831 a->write_bcr(ioaddr, i, regs[i - 4]);
832 }
833 spin_unlock_irqrestore(&lp->lock, flags);
834
835 return 0;
836 }
837
838 #define PCNET32_REGS_PER_PHY 32
839 #define PCNET32_MAX_PHYS 32
840 static int pcnet32_get_regs_len(struct net_device *dev)
841 {
842 struct pcnet32_private *lp = dev->priv;
843 int j = lp->phycount * PCNET32_REGS_PER_PHY;
844
845 return ((PCNET32_NUM_REGS + j) * sizeof(u16));
846 }
847
848 static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,
849 void *ptr)
850 {
851 int i, csr0;
852 u16 *buff = ptr;
853 struct pcnet32_private *lp = dev->priv;
854 struct pcnet32_access *a = &lp->a;
855 ulong ioaddr = dev->base_addr;
856 int ticks;
857 unsigned long flags;
858
859 spin_lock_irqsave(&lp->lock, flags);
860
861 csr0 = a->read_csr(ioaddr, 0);
862 if (!(csr0 & 0x0004)) { /* If not stopped */
863 /* set SUSPEND (SPND) - CSR5 bit 0 */
864 a->write_csr(ioaddr, 5, 0x0001);
865
866 /* poll waiting for bit to be set */
867 ticks = 0;
868 while (!(a->read_csr(ioaddr, 5) & 0x0001)) {
869 spin_unlock_irqrestore(&lp->lock, flags);
870 mdelay(1);
871 spin_lock_irqsave(&lp->lock, flags);
872 ticks++;
873 if (ticks > 200) {
874 if (netif_msg_hw(lp))
875 printk(KERN_DEBUG
876 "%s: Error getting into suspend!\n",
877 dev->name);
878 break;
879 }
880 }
881 }
882
883 /* read address PROM */
884 for (i = 0; i < 16; i += 2)
885 *buff++ = inw(ioaddr + i);
886
887 /* read control and status registers */
888 for (i = 0; i < 90; i++) {
889 *buff++ = a->read_csr(ioaddr, i);
890 }
891
892 *buff++ = a->read_csr(ioaddr, 112);
893 *buff++ = a->read_csr(ioaddr, 114);
894
895 /* read bus configuration registers */
896 for (i = 0; i < 30; i++) {
897 *buff++ = a->read_bcr(ioaddr, i);
898 }
899 *buff++ = 0; /* skip bcr30 so as not to hang 79C976 */
900 for (i = 31; i < 36; i++) {
901 *buff++ = a->read_bcr(ioaddr, i);
902 }
903
904 /* read mii phy registers */
905 if (lp->mii) {
906 int j;
907 for (j = 0; j < PCNET32_MAX_PHYS; j++) {
908 if (lp->phymask & (1 << j)) {
909 for (i = 0; i < PCNET32_REGS_PER_PHY; i++) {
910 lp->a.write_bcr(ioaddr, 33,
911 (j << 5) | i);
912 *buff++ = lp->a.read_bcr(ioaddr, 34);
913 }
914 }
915 }
916 }
917
918 if (!(csr0 & 0x0004)) { /* If not stopped */
919 /* clear SUSPEND (SPND) - CSR5 bit 0 */
920 a->write_csr(ioaddr, 5, 0x0000);
921 }
922
923 spin_unlock_irqrestore(&lp->lock, flags);
924 }
925
926 static struct ethtool_ops pcnet32_ethtool_ops = {
927 .get_settings = pcnet32_get_settings,
928 .set_settings = pcnet32_set_settings,
929 .get_drvinfo = pcnet32_get_drvinfo,
930 .get_msglevel = pcnet32_get_msglevel,
931 .set_msglevel = pcnet32_set_msglevel,
932 .nway_reset = pcnet32_nway_reset,
933 .get_link = pcnet32_get_link,
934 .get_ringparam = pcnet32_get_ringparam,
935 .set_ringparam = pcnet32_set_ringparam,
936 .get_tx_csum = ethtool_op_get_tx_csum,
937 .get_sg = ethtool_op_get_sg,
938 .get_tso = ethtool_op_get_tso,
939 .get_strings = pcnet32_get_strings,
940 .self_test_count = pcnet32_self_test_count,
941 .self_test = pcnet32_ethtool_test,
942 .phys_id = pcnet32_phys_id,
943 .get_regs_len = pcnet32_get_regs_len,
944 .get_regs = pcnet32_get_regs,
945 .get_perm_addr = ethtool_op_get_perm_addr,
946 };
947
948 /* only probes for non-PCI devices, the rest are handled by
949 * pci_register_driver via pcnet32_probe_pci */
950
951 static void __devinit pcnet32_probe_vlbus(unsigned int *pcnet32_portlist)
952 {
953 unsigned int *port, ioaddr;
954
955 /* search for PCnet32 VLB cards at known addresses */
956 for (port = pcnet32_portlist; (ioaddr = *port); port++) {
957 if (request_region
958 (ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_vlbus")) {
959 /* check if there is really a pcnet chip on that ioaddr */
960 if ((inb(ioaddr + 14) == 0x57)
961 && (inb(ioaddr + 15) == 0x57)) {
962 pcnet32_probe1(ioaddr, 0, NULL);
963 } else {
964 release_region(ioaddr, PCNET32_TOTAL_SIZE);
965 }
966 }
967 }
968 }
969
970 static int __devinit
971 pcnet32_probe_pci(struct pci_dev *pdev, const struct pci_device_id *ent)
972 {
973 unsigned long ioaddr;
974 int err;
975
976 err = pci_enable_device(pdev);
977 if (err < 0) {
978 if (pcnet32_debug & NETIF_MSG_PROBE)
979 printk(KERN_ERR PFX
980 "failed to enable device -- err=%d\n", err);
981 return err;
982 }
983 pci_set_master(pdev);
984
985 ioaddr = pci_resource_start(pdev, 0);
986 if (!ioaddr) {
987 if (pcnet32_debug & NETIF_MSG_PROBE)
988 printk(KERN_ERR PFX
989 "card has no PCI IO resources, aborting\n");
990 return -ENODEV;
991 }
992
993 if (!pci_dma_supported(pdev, PCNET32_DMA_MASK)) {
994 if (pcnet32_debug & NETIF_MSG_PROBE)
995 printk(KERN_ERR PFX
996 "architecture does not support 32bit PCI busmaster DMA\n");
997 return -ENODEV;
998 }
999 if (request_region(ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_pci") ==
1000 NULL) {
1001 if (pcnet32_debug & NETIF_MSG_PROBE)
1002 printk(KERN_ERR PFX
1003 "io address range already allocated\n");
1004 return -EBUSY;
1005 }
1006
1007 err = pcnet32_probe1(ioaddr, 1, pdev);
1008 if (err < 0) {
1009 pci_disable_device(pdev);
1010 }
1011 return err;
1012 }
1013
1014 /* pcnet32_probe1
1015 * Called from both pcnet32_probe_vlbus and pcnet_probe_pci.
1016 * pdev will be NULL when called from pcnet32_probe_vlbus.
1017 */
1018 static int __devinit
1019 pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
1020 {
1021 struct pcnet32_private *lp;
1022 dma_addr_t lp_dma_addr;
1023 int i, media;
1024 int fdx, mii, fset, dxsuflo;
1025 int chip_version;
1026 char *chipname;
1027 struct net_device *dev;
1028 struct pcnet32_access *a = NULL;
1029 u8 promaddr[6];
1030 int ret = -ENODEV;
1031
1032 /* reset the chip */
1033 pcnet32_wio_reset(ioaddr);
1034
1035 /* NOTE: 16-bit check is first, otherwise some older PCnet chips fail */
1036 if (pcnet32_wio_read_csr(ioaddr, 0) == 4 && pcnet32_wio_check(ioaddr)) {
1037 a = &pcnet32_wio;
1038 } else {
1039 pcnet32_dwio_reset(ioaddr);
1040 if (pcnet32_dwio_read_csr(ioaddr, 0) == 4
1041 && pcnet32_dwio_check(ioaddr)) {
1042 a = &pcnet32_dwio;
1043 } else
1044 goto err_release_region;
1045 }
1046
1047 chip_version =
1048 a->read_csr(ioaddr, 88) | (a->read_csr(ioaddr, 89) << 16);
1049 if ((pcnet32_debug & NETIF_MSG_PROBE) && (pcnet32_debug & NETIF_MSG_HW))
1050 printk(KERN_INFO " PCnet chip version is %#x.\n",
1051 chip_version);
1052 if ((chip_version & 0xfff) != 0x003) {
1053 if (pcnet32_debug & NETIF_MSG_PROBE)
1054 printk(KERN_INFO PFX "Unsupported chip version.\n");
1055 goto err_release_region;
1056 }
1057
1058 /* initialize variables */
1059 fdx = mii = fset = dxsuflo = 0;
1060 chip_version = (chip_version >> 12) & 0xffff;
1061
1062 switch (chip_version) {
1063 case 0x2420:
1064 chipname = "PCnet/PCI 79C970"; /* PCI */
1065 break;
1066 case 0x2430:
1067 if (shared)
1068 chipname = "PCnet/PCI 79C970"; /* 970 gives the wrong chip id back */
1069 else
1070 chipname = "PCnet/32 79C965"; /* 486/VL bus */
1071 break;
1072 case 0x2621:
1073 chipname = "PCnet/PCI II 79C970A"; /* PCI */
1074 fdx = 1;
1075 break;
1076 case 0x2623:
1077 chipname = "PCnet/FAST 79C971"; /* PCI */
1078 fdx = 1;
1079 mii = 1;
1080 fset = 1;
1081 break;
1082 case 0x2624:
1083 chipname = "PCnet/FAST+ 79C972"; /* PCI */
1084 fdx = 1;
1085 mii = 1;
1086 fset = 1;
1087 break;
1088 case 0x2625:
1089 chipname = "PCnet/FAST III 79C973"; /* PCI */
1090 fdx = 1;
1091 mii = 1;
1092 break;
1093 case 0x2626:
1094 chipname = "PCnet/Home 79C978"; /* PCI */
1095 fdx = 1;
1096 /*
1097 * This is based on specs published at www.amd.com. This section
1098 * assumes that a card with a 79C978 wants to go into standard
1099 * ethernet mode. The 79C978 can also go into 1Mb HomePNA mode,
1100 * and the module option homepna=1 can select this instead.
1101 */
1102 media = a->read_bcr(ioaddr, 49);
1103 media &= ~3; /* default to 10Mb ethernet */
1104 if (cards_found < MAX_UNITS && homepna[cards_found])
1105 media |= 1; /* switch to home wiring mode */
1106 if (pcnet32_debug & NETIF_MSG_PROBE)
1107 printk(KERN_DEBUG PFX "media set to %sMbit mode.\n",
1108 (media & 1) ? "1" : "10");
1109 a->write_bcr(ioaddr, 49, media);
1110 break;
1111 case 0x2627:
1112 chipname = "PCnet/FAST III 79C975"; /* PCI */
1113 fdx = 1;
1114 mii = 1;
1115 break;
1116 case 0x2628:
1117 chipname = "PCnet/PRO 79C976";
1118 fdx = 1;
1119 mii = 1;
1120 break;
1121 default:
1122 if (pcnet32_debug & NETIF_MSG_PROBE)
1123 printk(KERN_INFO PFX
1124 "PCnet version %#x, no PCnet32 chip.\n",
1125 chip_version);
1126 goto err_release_region;
1127 }
1128
1129 /*
1130 * On selected chips turn on the BCR18:NOUFLO bit. This stops transmit
1131 * starting until the packet is loaded. Strike one for reliability, lose
1132 * one for latency - although on PCI this isnt a big loss. Older chips
1133 * have FIFO's smaller than a packet, so you can't do this.
1134 * Turn on BCR18:BurstRdEn and BCR18:BurstWrEn.
1135 */
1136
1137 if (fset) {
1138 a->write_bcr(ioaddr, 18, (a->read_bcr(ioaddr, 18) | 0x0860));
1139 a->write_csr(ioaddr, 80,
1140 (a->read_csr(ioaddr, 80) & 0x0C00) | 0x0c00);
1141 dxsuflo = 1;
1142 }
1143
1144 dev = alloc_etherdev(0);
1145 if (!dev) {
1146 if (pcnet32_debug & NETIF_MSG_PROBE)
1147 printk(KERN_ERR PFX "Memory allocation failed.\n");
1148 ret = -ENOMEM;
1149 goto err_release_region;
1150 }
1151 SET_NETDEV_DEV(dev, &pdev->dev);
1152
1153 if (pcnet32_debug & NETIF_MSG_PROBE)
1154 printk(KERN_INFO PFX "%s at %#3lx,", chipname, ioaddr);
1155
1156 /* In most chips, after a chip reset, the ethernet address is read from the
1157 * station address PROM at the base address and programmed into the
1158 * "Physical Address Registers" CSR12-14.
1159 * As a precautionary measure, we read the PROM values and complain if
1160 * they disagree with the CSRs. If they miscompare, and the PROM addr
1161 * is valid, then the PROM addr is used.
1162 */
1163 for (i = 0; i < 3; i++) {
1164 unsigned int val;
1165 val = a->read_csr(ioaddr, i + 12) & 0x0ffff;
1166 /* There may be endianness issues here. */
1167 dev->dev_addr[2 * i] = val & 0x0ff;
1168 dev->dev_addr[2 * i + 1] = (val >> 8) & 0x0ff;
1169 }
1170
1171 /* read PROM address and compare with CSR address */
1172 for (i = 0; i < 6; i++)
1173 promaddr[i] = inb(ioaddr + i);
1174
1175 if (memcmp(promaddr, dev->dev_addr, 6)
1176 || !is_valid_ether_addr(dev->dev_addr)) {
1177 if (is_valid_ether_addr(promaddr)) {
1178 if (pcnet32_debug & NETIF_MSG_PROBE) {
1179 printk(" warning: CSR address invalid,\n");
1180 printk(KERN_INFO
1181 " using instead PROM address of");
1182 }
1183 memcpy(dev->dev_addr, promaddr, 6);
1184 }
1185 }
1186 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
1187
1188 /* if the ethernet address is not valid, force to 00:00:00:00:00:00 */
1189 if (!is_valid_ether_addr(dev->perm_addr))
1190 memset(dev->dev_addr, 0, sizeof(dev->dev_addr));
1191
1192 if (pcnet32_debug & NETIF_MSG_PROBE) {
1193 for (i = 0; i < 6; i++)
1194 printk(" %2.2x", dev->dev_addr[i]);
1195
1196 /* Version 0x2623 and 0x2624 */
1197 if (((chip_version + 1) & 0xfffe) == 0x2624) {
1198 i = a->read_csr(ioaddr, 80) & 0x0C00; /* Check tx_start_pt */
1199 printk("\n" KERN_INFO " tx_start_pt(0x%04x):", i);
1200 switch (i >> 10) {
1201 case 0:
1202 printk(" 20 bytes,");
1203 break;
1204 case 1:
1205 printk(" 64 bytes,");
1206 break;
1207 case 2:
1208 printk(" 128 bytes,");
1209 break;
1210 case 3:
1211 printk("~220 bytes,");
1212 break;
1213 }
1214 i = a->read_bcr(ioaddr, 18); /* Check Burst/Bus control */
1215 printk(" BCR18(%x):", i & 0xffff);
1216 if (i & (1 << 5))
1217 printk("BurstWrEn ");
1218 if (i & (1 << 6))
1219 printk("BurstRdEn ");
1220 if (i & (1 << 7))
1221 printk("DWordIO ");
1222 if (i & (1 << 11))
1223 printk("NoUFlow ");
1224 i = a->read_bcr(ioaddr, 25);
1225 printk("\n" KERN_INFO " SRAMSIZE=0x%04x,", i << 8);
1226 i = a->read_bcr(ioaddr, 26);
1227 printk(" SRAM_BND=0x%04x,", i << 8);
1228 i = a->read_bcr(ioaddr, 27);
1229 if (i & (1 << 14))
1230 printk("LowLatRx");
1231 }
1232 }
1233
1234 dev->base_addr = ioaddr;
1235 /* pci_alloc_consistent returns page-aligned memory, so we do not have to check the alignment */
1236 if ((lp =
1237 pci_alloc_consistent(pdev, sizeof(*lp), &lp_dma_addr)) == NULL) {
1238 if (pcnet32_debug & NETIF_MSG_PROBE)
1239 printk(KERN_ERR PFX
1240 "Consistent memory allocation failed.\n");
1241 ret = -ENOMEM;
1242 goto err_free_netdev;
1243 }
1244
1245 memset(lp, 0, sizeof(*lp));
1246 lp->dma_addr = lp_dma_addr;
1247 lp->pci_dev = pdev;
1248
1249 spin_lock_init(&lp->lock);
1250
1251 SET_MODULE_OWNER(dev);
1252 SET_NETDEV_DEV(dev, &pdev->dev);
1253 dev->priv = lp;
1254 lp->name = chipname;
1255 lp->shared_irq = shared;
1256 lp->tx_ring_size = TX_RING_SIZE; /* default tx ring size */
1257 lp->rx_ring_size = RX_RING_SIZE; /* default rx ring size */
1258 lp->tx_mod_mask = lp->tx_ring_size - 1;
1259 lp->rx_mod_mask = lp->rx_ring_size - 1;
1260 lp->tx_len_bits = (PCNET32_LOG_TX_BUFFERS << 12);
1261 lp->rx_len_bits = (PCNET32_LOG_RX_BUFFERS << 4);
1262 lp->mii_if.full_duplex = fdx;
1263 lp->mii_if.phy_id_mask = 0x1f;
1264 lp->mii_if.reg_num_mask = 0x1f;
1265 lp->dxsuflo = dxsuflo;
1266 lp->mii = mii;
1267 lp->msg_enable = pcnet32_debug;
1268 if ((cards_found >= MAX_UNITS)
1269 || (options[cards_found] > sizeof(options_mapping)))
1270 lp->options = PCNET32_PORT_ASEL;
1271 else
1272 lp->options = options_mapping[options[cards_found]];
1273 lp->mii_if.dev = dev;
1274 lp->mii_if.mdio_read = mdio_read;
1275 lp->mii_if.mdio_write = mdio_write;
1276
1277 if (fdx && !(lp->options & PCNET32_PORT_ASEL) &&
1278 ((cards_found >= MAX_UNITS) || full_duplex[cards_found]))
1279 lp->options |= PCNET32_PORT_FD;
1280
1281 if (!a) {
1282 if (pcnet32_debug & NETIF_MSG_PROBE)
1283 printk(KERN_ERR PFX "No access methods\n");
1284 ret = -ENODEV;
1285 goto err_free_consistent;
1286 }
1287 lp->a = *a;
1288
1289 /* prior to register_netdev, dev->name is not yet correct */
1290 if (pcnet32_alloc_ring(dev, pci_name(lp->pci_dev))) {
1291 ret = -ENOMEM;
1292 goto err_free_ring;
1293 }
1294 /* detect special T1/E1 WAN card by checking for MAC address */
1295 if (dev->dev_addr[0] == 0x00 && dev->dev_addr[1] == 0xe0
1296 && dev->dev_addr[2] == 0x75)
1297 lp->options = PCNET32_PORT_FD | PCNET32_PORT_GPSI;
1298
1299 lp->init_block.mode = le16_to_cpu(0x0003); /* Disable Rx and Tx. */
1300 lp->init_block.tlen_rlen =
1301 le16_to_cpu(lp->tx_len_bits | lp->rx_len_bits);
1302 for (i = 0; i < 6; i++)
1303 lp->init_block.phys_addr[i] = dev->dev_addr[i];
1304 lp->init_block.filter[0] = 0x00000000;
1305 lp->init_block.filter[1] = 0x00000000;
1306 lp->init_block.rx_ring = (u32) le32_to_cpu(lp->rx_ring_dma_addr);
1307 lp->init_block.tx_ring = (u32) le32_to_cpu(lp->tx_ring_dma_addr);
1308
1309 /* switch pcnet32 to 32bit mode */
1310 a->write_bcr(ioaddr, 20, 2);
1311
1312 a->write_csr(ioaddr, 1, (lp->dma_addr + offsetof(struct pcnet32_private,
1313 init_block)) & 0xffff);
1314 a->write_csr(ioaddr, 2, (lp->dma_addr + offsetof(struct pcnet32_private,
1315 init_block)) >> 16);
1316
1317 if (pdev) { /* use the IRQ provided by PCI */
1318 dev->irq = pdev->irq;
1319 if (pcnet32_debug & NETIF_MSG_PROBE)
1320 printk(" assigned IRQ %d.\n", dev->irq);
1321 } else {
1322 unsigned long irq_mask = probe_irq_on();
1323
1324 /*
1325 * To auto-IRQ we enable the initialization-done and DMA error
1326 * interrupts. For ISA boards we get a DMA error, but VLB and PCI
1327 * boards will work.
1328 */
1329 /* Trigger an initialization just for the interrupt. */
1330 a->write_csr(ioaddr, 0, 0x41);
1331 mdelay(1);
1332
1333 dev->irq = probe_irq_off(irq_mask);
1334 if (!dev->irq) {
1335 if (pcnet32_debug & NETIF_MSG_PROBE)
1336 printk(", failed to detect IRQ line.\n");
1337 ret = -ENODEV;
1338 goto err_free_ring;
1339 }
1340 if (pcnet32_debug & NETIF_MSG_PROBE)
1341 printk(", probed IRQ %d.\n", dev->irq);
1342 }
1343
1344 /* Set the mii phy_id so that we can query the link state */
1345 if (lp->mii) {
1346 /* lp->phycount and lp->phymask are set to 0 by memset above */
1347
1348 lp->mii_if.phy_id = ((lp->a.read_bcr(ioaddr, 33)) >> 5) & 0x1f;
1349 /* scan for PHYs */
1350 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
1351 unsigned short id1, id2;
1352
1353 id1 = mdio_read(dev, i, MII_PHYSID1);
1354 if (id1 == 0xffff)
1355 continue;
1356 id2 = mdio_read(dev, i, MII_PHYSID2);
1357 if (id2 == 0xffff)
1358 continue;
1359 if (i == 31 && ((chip_version + 1) & 0xfffe) == 0x2624)
1360 continue; /* 79C971 & 79C972 have phantom phy at id 31 */
1361 lp->phycount++;
1362 lp->phymask |= (1 << i);
1363 lp->mii_if.phy_id = i;
1364 if (pcnet32_debug & NETIF_MSG_PROBE)
1365 printk(KERN_INFO PFX
1366 "Found PHY %04x:%04x at address %d.\n",
1367 id1, id2, i);
1368 }
1369 lp->a.write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
1370 if (lp->phycount > 1) {
1371 lp->options |= PCNET32_PORT_MII;
1372 }
1373 }
1374
1375 init_timer(&lp->watchdog_timer);
1376 lp->watchdog_timer.data = (unsigned long)dev;
1377 lp->watchdog_timer.function = (void *)&pcnet32_watchdog;
1378
1379 /* The PCNET32-specific entries in the device structure. */
1380 dev->open = &pcnet32_open;
1381 dev->hard_start_xmit = &pcnet32_start_xmit;
1382 dev->stop = &pcnet32_close;
1383 dev->get_stats = &pcnet32_get_stats;
1384 dev->set_multicast_list = &pcnet32_set_multicast_list;
1385 dev->do_ioctl = &pcnet32_ioctl;
1386 dev->ethtool_ops = &pcnet32_ethtool_ops;
1387 dev->tx_timeout = pcnet32_tx_timeout;
1388 dev->watchdog_timeo = (5 * HZ);
1389
1390 #ifdef CONFIG_NET_POLL_CONTROLLER
1391 dev->poll_controller = pcnet32_poll_controller;
1392 #endif
1393
1394 /* Fill in the generic fields of the device structure. */
1395 if (register_netdev(dev))
1396 goto err_free_ring;
1397
1398 if (pdev) {
1399 pci_set_drvdata(pdev, dev);
1400 } else {
1401 lp->next = pcnet32_dev;
1402 pcnet32_dev = dev;
1403 }
1404
1405 if (pcnet32_debug & NETIF_MSG_PROBE)
1406 printk(KERN_INFO "%s: registered as %s\n", dev->name, lp->name);
1407 cards_found++;
1408
1409 /* enable LED writes */
1410 a->write_bcr(ioaddr, 2, a->read_bcr(ioaddr, 2) | 0x1000);
1411
1412 return 0;
1413
1414 err_free_ring:
1415 pcnet32_free_ring(dev);
1416 err_free_consistent:
1417 pci_free_consistent(lp->pci_dev, sizeof(*lp), lp, lp->dma_addr);
1418 err_free_netdev:
1419 free_netdev(dev);
1420 err_release_region:
1421 release_region(ioaddr, PCNET32_TOTAL_SIZE);
1422 return ret;
1423 }
1424
1425 /* if any allocation fails, caller must also call pcnet32_free_ring */
1426 static int pcnet32_alloc_ring(struct net_device *dev, char *name)
1427 {
1428 struct pcnet32_private *lp = dev->priv;
1429
1430 lp->tx_ring = pci_alloc_consistent(lp->pci_dev,
1431 sizeof(struct pcnet32_tx_head) *
1432 lp->tx_ring_size,
1433 &lp->tx_ring_dma_addr);
1434 if (lp->tx_ring == NULL) {
1435 if (pcnet32_debug & NETIF_MSG_DRV)
1436 printk("\n" KERN_ERR PFX
1437 "%s: Consistent memory allocation failed.\n",
1438 name);
1439 return -ENOMEM;
1440 }
1441
1442 lp->rx_ring = pci_alloc_consistent(lp->pci_dev,
1443 sizeof(struct pcnet32_rx_head) *
1444 lp->rx_ring_size,
1445 &lp->rx_ring_dma_addr);
1446 if (lp->rx_ring == NULL) {
1447 if (pcnet32_debug & NETIF_MSG_DRV)
1448 printk("\n" KERN_ERR PFX
1449 "%s: Consistent memory allocation failed.\n",
1450 name);
1451 return -ENOMEM;
1452 }
1453
1454 lp->tx_dma_addr = kmalloc(sizeof(dma_addr_t) * lp->tx_ring_size,
1455 GFP_ATOMIC);
1456 if (!lp->tx_dma_addr) {
1457 if (pcnet32_debug & NETIF_MSG_DRV)
1458 printk("\n" KERN_ERR PFX
1459 "%s: Memory allocation failed.\n", name);
1460 return -ENOMEM;
1461 }
1462 memset(lp->tx_dma_addr, 0, sizeof(dma_addr_t) * lp->tx_ring_size);
1463
1464 lp->rx_dma_addr = kmalloc(sizeof(dma_addr_t) * lp->rx_ring_size,
1465 GFP_ATOMIC);
1466 if (!lp->rx_dma_addr) {
1467 if (pcnet32_debug & NETIF_MSG_DRV)
1468 printk("\n" KERN_ERR PFX
1469 "%s: Memory allocation failed.\n", name);
1470 return -ENOMEM;
1471 }
1472 memset(lp->rx_dma_addr, 0, sizeof(dma_addr_t) * lp->rx_ring_size);
1473
1474 lp->tx_skbuff = kmalloc(sizeof(struct sk_buff *) * lp->tx_ring_size,
1475 GFP_ATOMIC);
1476 if (!lp->tx_skbuff) {
1477 if (pcnet32_debug & NETIF_MSG_DRV)
1478 printk("\n" KERN_ERR PFX
1479 "%s: Memory allocation failed.\n", name);
1480 return -ENOMEM;
1481 }
1482 memset(lp->tx_skbuff, 0, sizeof(struct sk_buff *) * lp->tx_ring_size);
1483
1484 lp->rx_skbuff = kmalloc(sizeof(struct sk_buff *) * lp->rx_ring_size,
1485 GFP_ATOMIC);
1486 if (!lp->rx_skbuff) {
1487 if (pcnet32_debug & NETIF_MSG_DRV)
1488 printk("\n" KERN_ERR PFX
1489 "%s: Memory allocation failed.\n", name);
1490 return -ENOMEM;
1491 }
1492 memset(lp->rx_skbuff, 0, sizeof(struct sk_buff *) * lp->rx_ring_size);
1493
1494 return 0;
1495 }
1496
1497 static void pcnet32_free_ring(struct net_device *dev)
1498 {
1499 struct pcnet32_private *lp = dev->priv;
1500
1501 kfree(lp->tx_skbuff);
1502 lp->tx_skbuff = NULL;
1503
1504 kfree(lp->rx_skbuff);
1505 lp->rx_skbuff = NULL;
1506
1507 kfree(lp->tx_dma_addr);
1508 lp->tx_dma_addr = NULL;
1509
1510 kfree(lp->rx_dma_addr);
1511 lp->rx_dma_addr = NULL;
1512
1513 if (lp->tx_ring) {
1514 pci_free_consistent(lp->pci_dev,
1515 sizeof(struct pcnet32_tx_head) *
1516 lp->tx_ring_size, lp->tx_ring,
1517 lp->tx_ring_dma_addr);
1518 lp->tx_ring = NULL;
1519 }
1520
1521 if (lp->rx_ring) {
1522 pci_free_consistent(lp->pci_dev,
1523 sizeof(struct pcnet32_rx_head) *
1524 lp->rx_ring_size, lp->rx_ring,
1525 lp->rx_ring_dma_addr);
1526 lp->rx_ring = NULL;
1527 }
1528 }
1529
1530 static int pcnet32_open(struct net_device *dev)
1531 {
1532 struct pcnet32_private *lp = dev->priv;
1533 unsigned long ioaddr = dev->base_addr;
1534 u16 val;
1535 int i;
1536 int rc;
1537 unsigned long flags;
1538
1539 if (request_irq(dev->irq, &pcnet32_interrupt,
1540 lp->shared_irq ? IRQF_SHARED : 0, dev->name,
1541 (void *)dev)) {
1542 return -EAGAIN;
1543 }
1544
1545 spin_lock_irqsave(&lp->lock, flags);
1546 /* Check for a valid station address */
1547 if (!is_valid_ether_addr(dev->dev_addr)) {
1548 rc = -EINVAL;
1549 goto err_free_irq;
1550 }
1551
1552 /* Reset the PCNET32 */
1553 lp->a.reset(ioaddr);
1554
1555 /* switch pcnet32 to 32bit mode */
1556 lp->a.write_bcr(ioaddr, 20, 2);
1557
1558 if (netif_msg_ifup(lp))
1559 printk(KERN_DEBUG
1560 "%s: pcnet32_open() irq %d tx/rx rings %#x/%#x init %#x.\n",
1561 dev->name, dev->irq, (u32) (lp->tx_ring_dma_addr),
1562 (u32) (lp->rx_ring_dma_addr),
1563 (u32) (lp->dma_addr +
1564 offsetof(struct pcnet32_private, init_block)));
1565
1566 /* set/reset autoselect bit */
1567 val = lp->a.read_bcr(ioaddr, 2) & ~2;
1568 if (lp->options & PCNET32_PORT_ASEL)
1569 val |= 2;
1570 lp->a.write_bcr(ioaddr, 2, val);
1571
1572 /* handle full duplex setting */
1573 if (lp->mii_if.full_duplex) {
1574 val = lp->a.read_bcr(ioaddr, 9) & ~3;
1575 if (lp->options & PCNET32_PORT_FD) {
1576 val |= 1;
1577 if (lp->options == (PCNET32_PORT_FD | PCNET32_PORT_AUI))
1578 val |= 2;
1579 } else if (lp->options & PCNET32_PORT_ASEL) {
1580 /* workaround of xSeries250, turn on for 79C975 only */
1581 i = ((lp->a.read_csr(ioaddr, 88) |
1582 (lp->a.
1583 read_csr(ioaddr, 89) << 16)) >> 12) & 0xffff;
1584 if (i == 0x2627)
1585 val |= 3;
1586 }
1587 lp->a.write_bcr(ioaddr, 9, val);
1588 }
1589
1590 /* set/reset GPSI bit in test register */
1591 val = lp->a.read_csr(ioaddr, 124) & ~0x10;
1592 if ((lp->options & PCNET32_PORT_PORTSEL) == PCNET32_PORT_GPSI)
1593 val |= 0x10;
1594 lp->a.write_csr(ioaddr, 124, val);
1595
1596 /* Allied Telesyn AT 2700/2701 FX are 100Mbit only and do not negotiate */
1597 if (lp->pci_dev->subsystem_vendor == PCI_VENDOR_ID_AT &&
1598 (lp->pci_dev->subsystem_device == PCI_SUBDEVICE_ID_AT_2700FX ||
1599 lp->pci_dev->subsystem_device == PCI_SUBDEVICE_ID_AT_2701FX)) {
1600 if (lp->options & PCNET32_PORT_ASEL) {
1601 lp->options = PCNET32_PORT_FD | PCNET32_PORT_100;
1602 if (netif_msg_link(lp))
1603 printk(KERN_DEBUG
1604 "%s: Setting 100Mb-Full Duplex.\n",
1605 dev->name);
1606 }
1607 }
1608 if (lp->phycount < 2) {
1609 /*
1610 * 24 Jun 2004 according AMD, in order to change the PHY,
1611 * DANAS (or DISPM for 79C976) must be set; then select the speed,
1612 * duplex, and/or enable auto negotiation, and clear DANAS
1613 */
1614 if (lp->mii && !(lp->options & PCNET32_PORT_ASEL)) {
1615 lp->a.write_bcr(ioaddr, 32,
1616 lp->a.read_bcr(ioaddr, 32) | 0x0080);
1617 /* disable Auto Negotiation, set 10Mpbs, HD */
1618 val = lp->a.read_bcr(ioaddr, 32) & ~0xb8;
1619 if (lp->options & PCNET32_PORT_FD)
1620 val |= 0x10;
1621 if (lp->options & PCNET32_PORT_100)
1622 val |= 0x08;
1623 lp->a.write_bcr(ioaddr, 32, val);
1624 } else {
1625 if (lp->options & PCNET32_PORT_ASEL) {
1626 lp->a.write_bcr(ioaddr, 32,
1627 lp->a.read_bcr(ioaddr,
1628 32) | 0x0080);
1629 /* enable auto negotiate, setup, disable fd */
1630 val = lp->a.read_bcr(ioaddr, 32) & ~0x98;
1631 val |= 0x20;
1632 lp->a.write_bcr(ioaddr, 32, val);
1633 }
1634 }
1635 } else {
1636 int first_phy = -1;
1637 u16 bmcr;
1638 u32 bcr9;
1639 struct ethtool_cmd ecmd;
1640
1641 /*
1642 * There is really no good other way to handle multiple PHYs
1643 * other than turning off all automatics
1644 */
1645 val = lp->a.read_bcr(ioaddr, 2);
1646 lp->a.write_bcr(ioaddr, 2, val & ~2);
1647 val = lp->a.read_bcr(ioaddr, 32);
1648 lp->a.write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
1649
1650 if (!(lp->options & PCNET32_PORT_ASEL)) {
1651 /* setup ecmd */
1652 ecmd.port = PORT_MII;
1653 ecmd.transceiver = XCVR_INTERNAL;
1654 ecmd.autoneg = AUTONEG_DISABLE;
1655 ecmd.speed =
1656 lp->
1657 options & PCNET32_PORT_100 ? SPEED_100 : SPEED_10;
1658 bcr9 = lp->a.read_bcr(ioaddr, 9);
1659
1660 if (lp->options & PCNET32_PORT_FD) {
1661 ecmd.duplex = DUPLEX_FULL;
1662 bcr9 |= (1 << 0);
1663 } else {
1664 ecmd.duplex = DUPLEX_HALF;
1665 bcr9 |= ~(1 << 0);
1666 }
1667 lp->a.write_bcr(ioaddr, 9, bcr9);
1668 }
1669
1670 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
1671 if (lp->phymask & (1 << i)) {
1672 /* isolate all but the first PHY */
1673 bmcr = mdio_read(dev, i, MII_BMCR);
1674 if (first_phy == -1) {
1675 first_phy = i;
1676 mdio_write(dev, i, MII_BMCR,
1677 bmcr & ~BMCR_ISOLATE);
1678 } else {
1679 mdio_write(dev, i, MII_BMCR,
1680 bmcr | BMCR_ISOLATE);
1681 }
1682 /* use mii_ethtool_sset to setup PHY */
1683 lp->mii_if.phy_id = i;
1684 ecmd.phy_address = i;
1685 if (lp->options & PCNET32_PORT_ASEL) {
1686 mii_ethtool_gset(&lp->mii_if, &ecmd);
1687 ecmd.autoneg = AUTONEG_ENABLE;
1688 }
1689 mii_ethtool_sset(&lp->mii_if, &ecmd);
1690 }
1691 }
1692 lp->mii_if.phy_id = first_phy;
1693 if (netif_msg_link(lp))
1694 printk(KERN_INFO "%s: Using PHY number %d.\n",
1695 dev->name, first_phy);
1696 }
1697
1698 #ifdef DO_DXSUFLO
1699 if (lp->dxsuflo) { /* Disable transmit stop on underflow */
1700 val = lp->a.read_csr(ioaddr, 3);
1701 val |= 0x40;
1702 lp->a.write_csr(ioaddr, 3, val);
1703 }
1704 #endif
1705
1706 lp->init_block.mode =
1707 le16_to_cpu((lp->options & PCNET32_PORT_PORTSEL) << 7);
1708 pcnet32_load_multicast(dev);
1709
1710 if (pcnet32_init_ring(dev)) {
1711 rc = -ENOMEM;
1712 goto err_free_ring;
1713 }
1714
1715 /* Re-initialize the PCNET32, and start it when done. */
1716 lp->a.write_csr(ioaddr, 1, (lp->dma_addr +
1717 offsetof(struct pcnet32_private,
1718 init_block)) & 0xffff);
1719 lp->a.write_csr(ioaddr, 2,
1720 (lp->dma_addr +
1721 offsetof(struct pcnet32_private, init_block)) >> 16);
1722
1723 lp->a.write_csr(ioaddr, 4, 0x0915);
1724 lp->a.write_csr(ioaddr, 0, 0x0001);
1725
1726 netif_start_queue(dev);
1727
1728 /* Print the link status and start the watchdog */
1729 pcnet32_check_media(dev, 1);
1730 mod_timer(&(lp->watchdog_timer), PCNET32_WATCHDOG_TIMEOUT);
1731
1732 i = 0;
1733 while (i++ < 100)
1734 if (lp->a.read_csr(ioaddr, 0) & 0x0100)
1735 break;
1736 /*
1737 * We used to clear the InitDone bit, 0x0100, here but Mark Stockton
1738 * reports that doing so triggers a bug in the '974.
1739 */
1740 lp->a.write_csr(ioaddr, 0, 0x0042);
1741
1742 if (netif_msg_ifup(lp))
1743 printk(KERN_DEBUG
1744 "%s: pcnet32 open after %d ticks, init block %#x csr0 %4.4x.\n",
1745 dev->name, i,
1746 (u32) (lp->dma_addr +
1747 offsetof(struct pcnet32_private, init_block)),
1748 lp->a.read_csr(ioaddr, 0));
1749
1750 spin_unlock_irqrestore(&lp->lock, flags);
1751
1752 return 0; /* Always succeed */
1753
1754 err_free_ring:
1755 /* free any allocated skbuffs */
1756 for (i = 0; i < lp->rx_ring_size; i++) {
1757 lp->rx_ring[i].status = 0;
1758 if (lp->rx_skbuff[i]) {
1759 pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[i],
1760 PKT_BUF_SZ - 2, PCI_DMA_FROMDEVICE);
1761 dev_kfree_skb(lp->rx_skbuff[i]);
1762 }
1763 lp->rx_skbuff[i] = NULL;
1764 lp->rx_dma_addr[i] = 0;
1765 }
1766
1767 /*
1768 * Switch back to 16bit mode to avoid problems with dumb
1769 * DOS packet driver after a warm reboot
1770 */
1771 lp->a.write_bcr(ioaddr, 20, 4);
1772
1773 err_free_irq:
1774 spin_unlock_irqrestore(&lp->lock, flags);
1775 free_irq(dev->irq, dev);
1776 return rc;
1777 }
1778
1779 /*
1780 * The LANCE has been halted for one reason or another (busmaster memory
1781 * arbitration error, Tx FIFO underflow, driver stopped it to reconfigure,
1782 * etc.). Modern LANCE variants always reload their ring-buffer
1783 * configuration when restarted, so we must reinitialize our ring
1784 * context before restarting. As part of this reinitialization,
1785 * find all packets still on the Tx ring and pretend that they had been
1786 * sent (in effect, drop the packets on the floor) - the higher-level
1787 * protocols will time out and retransmit. It'd be better to shuffle
1788 * these skbs to a temp list and then actually re-Tx them after
1789 * restarting the chip, but I'm too lazy to do so right now. dplatt@3do.com
1790 */
1791
1792 static void pcnet32_purge_tx_ring(struct net_device *dev)
1793 {
1794 struct pcnet32_private *lp = dev->priv;
1795 int i;
1796
1797 for (i = 0; i < lp->tx_ring_size; i++) {
1798 lp->tx_ring[i].status = 0; /* CPU owns buffer */
1799 wmb(); /* Make sure adapter sees owner change */
1800 if (lp->tx_skbuff[i]) {
1801 pci_unmap_single(lp->pci_dev, lp->tx_dma_addr[i],
1802 lp->tx_skbuff[i]->len,
1803 PCI_DMA_TODEVICE);
1804 dev_kfree_skb_any(lp->tx_skbuff[i]);
1805 }
1806 lp->tx_skbuff[i] = NULL;
1807 lp->tx_dma_addr[i] = 0;
1808 }
1809 }
1810
1811 /* Initialize the PCNET32 Rx and Tx rings. */
1812 static int pcnet32_init_ring(struct net_device *dev)
1813 {
1814 struct pcnet32_private *lp = dev->priv;
1815 int i;
1816
1817 lp->tx_full = 0;
1818 lp->cur_rx = lp->cur_tx = 0;
1819 lp->dirty_rx = lp->dirty_tx = 0;
1820
1821 for (i = 0; i < lp->rx_ring_size; i++) {
1822 struct sk_buff *rx_skbuff = lp->rx_skbuff[i];
1823 if (rx_skbuff == NULL) {
1824 if (!
1825 (rx_skbuff = lp->rx_skbuff[i] =
1826 dev_alloc_skb(PKT_BUF_SZ))) {
1827 /* there is not much, we can do at this point */
1828 if (pcnet32_debug & NETIF_MSG_DRV)
1829 printk(KERN_ERR
1830 "%s: pcnet32_init_ring dev_alloc_skb failed.\n",
1831 dev->name);
1832 return -1;
1833 }
1834 skb_reserve(rx_skbuff, 2);
1835 }
1836
1837 rmb();
1838 if (lp->rx_dma_addr[i] == 0)
1839 lp->rx_dma_addr[i] =
1840 pci_map_single(lp->pci_dev, rx_skbuff->data,
1841 PKT_BUF_SZ - 2, PCI_DMA_FROMDEVICE);
1842 lp->rx_ring[i].base = (u32) le32_to_cpu(lp->rx_dma_addr[i]);
1843 lp->rx_ring[i].buf_length = le16_to_cpu(2 - PKT_BUF_SZ);
1844 wmb(); /* Make sure owner changes after all others are visible */
1845 lp->rx_ring[i].status = le16_to_cpu(0x8000);
1846 }
1847 /* The Tx buffer address is filled in as needed, but we do need to clear
1848 * the upper ownership bit. */
1849 for (i = 0; i < lp->tx_ring_size; i++) {
1850 lp->tx_ring[i].status = 0; /* CPU owns buffer */
1851 wmb(); /* Make sure adapter sees owner change */
1852 lp->tx_ring[i].base = 0;
1853 lp->tx_dma_addr[i] = 0;
1854 }
1855
1856 lp->init_block.tlen_rlen =
1857 le16_to_cpu(lp->tx_len_bits | lp->rx_len_bits);
1858 for (i = 0; i < 6; i++)
1859 lp->init_block.phys_addr[i] = dev->dev_addr[i];
1860 lp->init_block.rx_ring = (u32) le32_to_cpu(lp->rx_ring_dma_addr);
1861 lp->init_block.tx_ring = (u32) le32_to_cpu(lp->tx_ring_dma_addr);
1862 wmb(); /* Make sure all changes are visible */
1863 return 0;
1864 }
1865
1866 /* the pcnet32 has been issued a stop or reset. Wait for the stop bit
1867 * then flush the pending transmit operations, re-initialize the ring,
1868 * and tell the chip to initialize.
1869 */
1870 static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits)
1871 {
1872 struct pcnet32_private *lp = dev->priv;
1873 unsigned long ioaddr = dev->base_addr;
1874 int i;
1875
1876 /* wait for stop */
1877 for (i = 0; i < 100; i++)
1878 if (lp->a.read_csr(ioaddr, 0) & 0x0004)
1879 break;
1880
1881 if (i >= 100 && netif_msg_drv(lp))
1882 printk(KERN_ERR
1883 "%s: pcnet32_restart timed out waiting for stop.\n",
1884 dev->name);
1885
1886 pcnet32_purge_tx_ring(dev);
1887 if (pcnet32_init_ring(dev))
1888 return;
1889
1890 /* ReInit Ring */
1891 lp->a.write_csr(ioaddr, 0, 1);
1892 i = 0;
1893 while (i++ < 1000)
1894 if (lp->a.read_csr(ioaddr, 0) & 0x0100)
1895 break;
1896
1897 lp->a.write_csr(ioaddr, 0, csr0_bits);
1898 }
1899
1900 static void pcnet32_tx_timeout(struct net_device *dev)
1901 {
1902 struct pcnet32_private *lp = dev->priv;
1903 unsigned long ioaddr = dev->base_addr, flags;
1904
1905 spin_lock_irqsave(&lp->lock, flags);
1906 /* Transmitter timeout, serious problems. */
1907 if (pcnet32_debug & NETIF_MSG_DRV)
1908 printk(KERN_ERR
1909 "%s: transmit timed out, status %4.4x, resetting.\n",
1910 dev->name, lp->a.read_csr(ioaddr, 0));
1911 lp->a.write_csr(ioaddr, 0, 0x0004);
1912 lp->stats.tx_errors++;
1913 if (netif_msg_tx_err(lp)) {
1914 int i;
1915 printk(KERN_DEBUG
1916 " Ring data dump: dirty_tx %d cur_tx %d%s cur_rx %d.",
1917 lp->dirty_tx, lp->cur_tx, lp->tx_full ? " (full)" : "",
1918 lp->cur_rx);
1919 for (i = 0; i < lp->rx_ring_size; i++)
1920 printk("%s %08x %04x %08x %04x", i & 1 ? "" : "\n ",
1921 le32_to_cpu(lp->rx_ring[i].base),
1922 (-le16_to_cpu(lp->rx_ring[i].buf_length)) &
1923 0xffff, le32_to_cpu(lp->rx_ring[i].msg_length),
1924 le16_to_cpu(lp->rx_ring[i].status));
1925 for (i = 0; i < lp->tx_ring_size; i++)
1926 printk("%s %08x %04x %08x %04x", i & 1 ? "" : "\n ",
1927 le32_to_cpu(lp->tx_ring[i].base),
1928 (-le16_to_cpu(lp->tx_ring[i].length)) & 0xffff,
1929 le32_to_cpu(lp->tx_ring[i].misc),
1930 le16_to_cpu(lp->tx_ring[i].status));
1931 printk("\n");
1932 }
1933 pcnet32_restart(dev, 0x0042);
1934
1935 dev->trans_start = jiffies;
1936 netif_wake_queue(dev);
1937
1938 spin_unlock_irqrestore(&lp->lock, flags);
1939 }
1940
1941 static int pcnet32_start_xmit(struct sk_buff *skb, struct net_device *dev)
1942 {
1943 struct pcnet32_private *lp = dev->priv;
1944 unsigned long ioaddr = dev->base_addr;
1945 u16 status;
1946 int entry;
1947 unsigned long flags;
1948
1949 spin_lock_irqsave(&lp->lock, flags);
1950
1951 if (netif_msg_tx_queued(lp)) {
1952 printk(KERN_DEBUG
1953 "%s: pcnet32_start_xmit() called, csr0 %4.4x.\n",
1954 dev->name, lp->a.read_csr(ioaddr, 0));
1955 }
1956
1957 /* Default status -- will not enable Successful-TxDone
1958 * interrupt when that option is available to us.
1959 */
1960 status = 0x8300;
1961
1962 /* Fill in a Tx ring entry */
1963
1964 /* Mask to ring buffer boundary. */
1965 entry = lp->cur_tx & lp->tx_mod_mask;
1966
1967 /* Caution: the write order is important here, set the status
1968 * with the "ownership" bits last. */
1969
1970 lp->tx_ring[entry].length = le16_to_cpu(-skb->len);
1971
1972 lp->tx_ring[entry].misc = 0x00000000;
1973
1974 lp->tx_skbuff[entry] = skb;
1975 lp->tx_dma_addr[entry] =
1976 pci_map_single(lp->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE);
1977 lp->tx_ring[entry].base = (u32) le32_to_cpu(lp->tx_dma_addr[entry]);
1978 wmb(); /* Make sure owner changes after all others are visible */
1979 lp->tx_ring[entry].status = le16_to_cpu(status);
1980
1981 lp->cur_tx++;
1982 lp->stats.tx_bytes += skb->len;
1983
1984 /* Trigger an immediate send poll. */
1985 lp->a.write_csr(ioaddr, 0, 0x0048);
1986
1987 dev->trans_start = jiffies;
1988
1989 if (lp->tx_ring[(entry + 1) & lp->tx_mod_mask].base != 0) {
1990 lp->tx_full = 1;
1991 netif_stop_queue(dev);
1992 }
1993 spin_unlock_irqrestore(&lp->lock, flags);
1994 return 0;
1995 }
1996
1997 /* The PCNET32 interrupt handler. */
1998 static irqreturn_t
1999 pcnet32_interrupt(int irq, void *dev_id, struct pt_regs *regs)
2000 {
2001 struct net_device *dev = dev_id;
2002 struct pcnet32_private *lp;
2003 unsigned long ioaddr;
2004 u16 csr0, rap;
2005 int boguscnt = max_interrupt_work;
2006 int must_restart;
2007
2008 if (!dev) {
2009 if (pcnet32_debug & NETIF_MSG_INTR)
2010 printk(KERN_DEBUG "%s(): irq %d for unknown device\n",
2011 __FUNCTION__, irq);
2012 return IRQ_NONE;
2013 }
2014
2015 ioaddr = dev->base_addr;
2016 lp = dev->priv;
2017
2018 spin_lock(&lp->lock);
2019
2020 rap = lp->a.read_rap(ioaddr);
2021 while ((csr0 = lp->a.read_csr(ioaddr, 0)) & 0x8f00 && --boguscnt >= 0) {
2022 if (csr0 == 0xffff) {
2023 break; /* PCMCIA remove happened */
2024 }
2025 /* Acknowledge all of the current interrupt sources ASAP. */
2026 lp->a.write_csr(ioaddr, 0, csr0 & ~0x004f);
2027
2028 must_restart = 0;
2029
2030 if (netif_msg_intr(lp))
2031 printk(KERN_DEBUG
2032 "%s: interrupt csr0=%#2.2x new csr=%#2.2x.\n",
2033 dev->name, csr0, lp->a.read_csr(ioaddr, 0));
2034
2035 if (csr0 & 0x0400) /* Rx interrupt */
2036 pcnet32_rx(dev);
2037
2038 if (csr0 & 0x0200) { /* Tx-done interrupt */
2039 unsigned int dirty_tx = lp->dirty_tx;
2040 int delta;
2041
2042 while (dirty_tx != lp->cur_tx) {
2043 int entry = dirty_tx & lp->tx_mod_mask;
2044 int status =
2045 (short)le16_to_cpu(lp->tx_ring[entry].
2046 status);
2047
2048 if (status < 0)
2049 break; /* It still hasn't been Txed */
2050
2051 lp->tx_ring[entry].base = 0;
2052
2053 if (status & 0x4000) {
2054 /* There was an major error, log it. */
2055 int err_status =
2056 le32_to_cpu(lp->tx_ring[entry].
2057 misc);
2058 lp->stats.tx_errors++;
2059 if (netif_msg_tx_err(lp))
2060 printk(KERN_ERR
2061 "%s: Tx error status=%04x err_status=%08x\n",
2062 dev->name, status,
2063 err_status);
2064 if (err_status & 0x04000000)
2065 lp->stats.tx_aborted_errors++;
2066 if (err_status & 0x08000000)
2067 lp->stats.tx_carrier_errors++;
2068 if (err_status & 0x10000000)
2069 lp->stats.tx_window_errors++;
2070 #ifndef DO_DXSUFLO
2071 if (err_status & 0x40000000) {
2072 lp->stats.tx_fifo_errors++;
2073 /* Ackk! On FIFO errors the Tx unit is turned off! */
2074 /* Remove this verbosity later! */
2075 if (netif_msg_tx_err(lp))
2076 printk(KERN_ERR
2077 "%s: Tx FIFO error! CSR0=%4.4x\n",
2078 dev->name, csr0);
2079 must_restart = 1;
2080 }
2081 #else
2082 if (err_status & 0x40000000) {
2083 lp->stats.tx_fifo_errors++;
2084 if (!lp->dxsuflo) { /* If controller doesn't recover ... */
2085 /* Ackk! On FIFO errors the Tx unit is turned off! */
2086 /* Remove this verbosity later! */
2087 if (netif_msg_tx_err
2088 (lp))
2089 printk(KERN_ERR
2090 "%s: Tx FIFO error! CSR0=%4.4x\n",
2091 dev->
2092 name,
2093 csr0);
2094 must_restart = 1;
2095 }
2096 }
2097 #endif
2098 } else {
2099 if (status & 0x1800)
2100 lp->stats.collisions++;
2101 lp->stats.tx_packets++;
2102 }
2103
2104 /* We must free the original skb */
2105 if (lp->tx_skbuff[entry]) {
2106 pci_unmap_single(lp->pci_dev,
2107 lp->tx_dma_addr[entry],
2108 lp->tx_skbuff[entry]->
2109 len, PCI_DMA_TODEVICE);
2110 dev_kfree_skb_irq(lp->tx_skbuff[entry]);
2111 lp->tx_skbuff[entry] = NULL;
2112 lp->tx_dma_addr[entry] = 0;
2113 }
2114 dirty_tx++;
2115 }
2116
2117 delta =
2118 (lp->cur_tx - dirty_tx) & (lp->tx_mod_mask +
2119 lp->tx_ring_size);
2120 if (delta > lp->tx_ring_size) {
2121 if (netif_msg_drv(lp))
2122 printk(KERN_ERR
2123 "%s: out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
2124 dev->name, dirty_tx, lp->cur_tx,
2125 lp->tx_full);
2126 dirty_tx += lp->tx_ring_size;
2127 delta -= lp->tx_ring_size;
2128 }
2129
2130 if (lp->tx_full &&
2131 netif_queue_stopped(dev) &&
2132 delta < lp->tx_ring_size - 2) {
2133 /* The ring is no longer full, clear tbusy. */
2134 lp->tx_full = 0;
2135 netif_wake_queue(dev);
2136 }
2137 lp->dirty_tx = dirty_tx;
2138 }
2139
2140 /* Log misc errors. */
2141 if (csr0 & 0x4000)
2142 lp->stats.tx_errors++; /* Tx babble. */
2143 if (csr0 & 0x1000) {
2144 /*
2145 * this happens when our receive ring is full. This shouldn't
2146 * be a problem as we will see normal rx interrupts for the frames
2147 * in the receive ring. But there are some PCI chipsets (I can
2148 * reproduce this on SP3G with Intel saturn chipset) which have
2149 * sometimes problems and will fill up the receive ring with
2150 * error descriptors. In this situation we don't get a rx
2151 * interrupt, but a missed frame interrupt sooner or later.
2152 * So we try to clean up our receive ring here.
2153 */
2154 pcnet32_rx(dev);
2155 lp->stats.rx_errors++; /* Missed a Rx frame. */
2156 }
2157 if (csr0 & 0x0800) {
2158 if (netif_msg_drv(lp))
2159 printk(KERN_ERR
2160 "%s: Bus master arbitration failure, status %4.4x.\n",
2161 dev->name, csr0);
2162 /* unlike for the lance, there is no restart needed */
2163 }
2164
2165 if (must_restart) {
2166 /* reset the chip to clear the error condition, then restart */
2167 lp->a.reset(ioaddr);
2168 lp->a.write_csr(ioaddr, 4, 0x0915);
2169 pcnet32_restart(dev, 0x0002);
2170 netif_wake_queue(dev);
2171 }
2172 }
2173
2174 /* Set interrupt enable. */
2175 lp->a.write_csr(ioaddr, 0, 0x0040);
2176 lp->a.write_rap(ioaddr, rap);
2177
2178 if (netif_msg_intr(lp))
2179 printk(KERN_DEBUG "%s: exiting interrupt, csr0=%#4.4x.\n",
2180 dev->name, lp->a.read_csr(ioaddr, 0));
2181
2182 spin_unlock(&lp->lock);
2183
2184 return IRQ_HANDLED;
2185 }
2186
2187 static int pcnet32_rx(struct net_device *dev)
2188 {
2189 struct pcnet32_private *lp = dev->priv;
2190 int entry = lp->cur_rx & lp->rx_mod_mask;
2191 int boguscnt = lp->rx_ring_size / 2;
2192
2193 /* If we own the next entry, it's a new packet. Send it up. */
2194 while ((short)le16_to_cpu(lp->rx_ring[entry].status) >= 0) {
2195 int status = (short)le16_to_cpu(lp->rx_ring[entry].status) >> 8;
2196
2197 if (status != 0x03) { /* There was an error. */
2198 /*
2199 * There is a tricky error noted by John Murphy,
2200 * <murf@perftech.com> to Russ Nelson: Even with full-sized
2201 * buffers it's possible for a jabber packet to use two
2202 * buffers, with only the last correctly noting the error.
2203 */
2204 if (status & 0x01) /* Only count a general error at the */
2205 lp->stats.rx_errors++; /* end of a packet. */
2206 if (status & 0x20)
2207 lp->stats.rx_frame_errors++;
2208 if (status & 0x10)
2209 lp->stats.rx_over_errors++;
2210 if (status & 0x08)
2211 lp->stats.rx_crc_errors++;
2212 if (status & 0x04)
2213 lp->stats.rx_fifo_errors++;
2214 lp->rx_ring[entry].status &= le16_to_cpu(0x03ff);
2215 } else {
2216 /* Malloc up new buffer, compatible with net-2e. */
2217 short pkt_len =
2218 (le32_to_cpu(lp->rx_ring[entry].msg_length) & 0xfff)
2219 - 4;
2220 struct sk_buff *skb;
2221
2222 /* Discard oversize frames. */
2223 if (unlikely(pkt_len > PKT_BUF_SZ - 2)) {
2224 if (netif_msg_drv(lp))
2225 printk(KERN_ERR
2226 "%s: Impossible packet size %d!\n",
2227 dev->name, pkt_len);
2228 lp->stats.rx_errors++;
2229 } else if (pkt_len < 60) {
2230 if (netif_msg_rx_err(lp))
2231 printk(KERN_ERR "%s: Runt packet!\n",
2232 dev->name);
2233 lp->stats.rx_errors++;
2234 } else {
2235 int rx_in_place = 0;
2236
2237 if (pkt_len > rx_copybreak) {
2238 struct sk_buff *newskb;
2239
2240 if ((newskb =
2241 dev_alloc_skb(PKT_BUF_SZ))) {
2242 skb_reserve(newskb, 2);
2243 skb = lp->rx_skbuff[entry];
2244 pci_unmap_single(lp->pci_dev,
2245 lp->
2246 rx_dma_addr
2247 [entry],
2248 PKT_BUF_SZ - 2,
2249 PCI_DMA_FROMDEVICE);
2250 skb_put(skb, pkt_len);
2251 lp->rx_skbuff[entry] = newskb;
2252 newskb->dev = dev;
2253 lp->rx_dma_addr[entry] =
2254 pci_map_single(lp->pci_dev,
2255 newskb->data,
2256 PKT_BUF_SZ -
2257 2,
2258 PCI_DMA_FROMDEVICE);
2259 lp->rx_ring[entry].base =
2260 le32_to_cpu(lp->
2261 rx_dma_addr
2262 [entry]);
2263 rx_in_place = 1;
2264 } else
2265 skb = NULL;
2266 } else {
2267 skb = dev_alloc_skb(pkt_len + 2);
2268 }
2269
2270 if (skb == NULL) {
2271 int i;
2272 if (netif_msg_drv(lp))
2273 printk(KERN_ERR
2274 "%s: Memory squeeze, deferring packet.\n",
2275 dev->name);
2276 for (i = 0; i < lp->rx_ring_size; i++)
2277 if ((short)
2278 le16_to_cpu(lp->
2279 rx_ring[(entry +
2280 i)
2281 & lp->
2282 rx_mod_mask].
2283 status) < 0)
2284 break;
2285
2286 if (i > lp->rx_ring_size - 2) {
2287 lp->stats.rx_dropped++;
2288 lp->rx_ring[entry].status |=
2289 le16_to_cpu(0x8000);
2290 wmb(); /* Make sure adapter sees owner change */
2291 lp->cur_rx++;
2292 }
2293 break;
2294 }
2295 skb->dev = dev;
2296 if (!rx_in_place) {
2297 skb_reserve(skb, 2); /* 16 byte align */
2298 skb_put(skb, pkt_len); /* Make room */
2299 pci_dma_sync_single_for_cpu(lp->pci_dev,
2300 lp->
2301 rx_dma_addr
2302 [entry],
2303 PKT_BUF_SZ -
2304 2,
2305 PCI_DMA_FROMDEVICE);
2306 eth_copy_and_sum(skb,
2307 (unsigned char *)(lp->
2308 rx_skbuff
2309 [entry]->
2310 data),
2311 pkt_len, 0);
2312 pci_dma_sync_single_for_device(lp->
2313 pci_dev,
2314 lp->
2315 rx_dma_addr
2316 [entry],
2317 PKT_BUF_SZ
2318 - 2,
2319 PCI_DMA_FROMDEVICE);
2320 }
2321 lp->stats.rx_bytes += skb->len;
2322 skb->protocol = eth_type_trans(skb, dev);
2323 netif_rx(skb);
2324 dev->last_rx = jiffies;
2325 lp->stats.rx_packets++;
2326 }
2327 }
2328 /*
2329 * The docs say that the buffer length isn't touched, but Andrew Boyd
2330 * of QNX reports that some revs of the 79C965 clear it.
2331 */
2332 lp->rx_ring[entry].buf_length = le16_to_cpu(2 - PKT_BUF_SZ);
2333 wmb(); /* Make sure owner changes after all others are visible */
2334 lp->rx_ring[entry].status |= le16_to_cpu(0x8000);
2335 entry = (++lp->cur_rx) & lp->rx_mod_mask;
2336 if (--boguscnt <= 0)
2337 break; /* don't stay in loop forever */
2338 }
2339
2340 return 0;
2341 }
2342
2343 static int pcnet32_close(struct net_device *dev)
2344 {
2345 unsigned long ioaddr = dev->base_addr;
2346 struct pcnet32_private *lp = dev->priv;
2347 int i;
2348 unsigned long flags;
2349
2350 del_timer_sync(&lp->watchdog_timer);
2351
2352 netif_stop_queue(dev);
2353
2354 spin_lock_irqsave(&lp->lock, flags);
2355
2356 lp->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
2357
2358 if (netif_msg_ifdown(lp))
2359 printk(KERN_DEBUG
2360 "%s: Shutting down ethercard, status was %2.2x.\n",
2361 dev->name, lp->a.read_csr(ioaddr, 0));
2362
2363 /* We stop the PCNET32 here -- it occasionally polls memory if we don't. */
2364 lp->a.write_csr(ioaddr, 0, 0x0004);
2365
2366 /*
2367 * Switch back to 16bit mode to avoid problems with dumb
2368 * DOS packet driver after a warm reboot
2369 */
2370 lp->a.write_bcr(ioaddr, 20, 4);
2371
2372 spin_unlock_irqrestore(&lp->lock, flags);
2373
2374 free_irq(dev->irq, dev);
2375
2376 spin_lock_irqsave(&lp->lock, flags);
2377
2378 /* free all allocated skbuffs */
2379 for (i = 0; i < lp->rx_ring_size; i++) {
2380 lp->rx_ring[i].status = 0;
2381 wmb(); /* Make sure adapter sees owner change */
2382 if (lp->rx_skbuff[i]) {
2383 pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[i],
2384 PKT_BUF_SZ - 2, PCI_DMA_FROMDEVICE);
2385 dev_kfree_skb(lp->rx_skbuff[i]);
2386 }
2387 lp->rx_skbuff[i] = NULL;
2388 lp->rx_dma_addr[i] = 0;
2389 }
2390
2391 for (i = 0; i < lp->tx_ring_size; i++) {
2392 lp->tx_ring[i].status = 0; /* CPU owns buffer */
2393 wmb(); /* Make sure adapter sees owner change */
2394 if (lp->tx_skbuff[i]) {
2395 pci_unmap_single(lp->pci_dev, lp->tx_dma_addr[i],
2396 lp->tx_skbuff[i]->len,
2397 PCI_DMA_TODEVICE);
2398 dev_kfree_skb(lp->tx_skbuff[i]);
2399 }
2400 lp->tx_skbuff[i] = NULL;
2401 lp->tx_dma_addr[i] = 0;
2402 }
2403
2404 spin_unlock_irqrestore(&lp->lock, flags);
2405
2406 return 0;
2407 }
2408
2409 static struct net_device_stats *pcnet32_get_stats(struct net_device *dev)
2410 {
2411 struct pcnet32_private *lp = dev->priv;
2412 unsigned long ioaddr = dev->base_addr;
2413 u16 saved_addr;
2414 unsigned long flags;
2415
2416 spin_lock_irqsave(&lp->lock, flags);
2417 saved_addr = lp->a.read_rap(ioaddr);
2418 lp->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
2419 lp->a.write_rap(ioaddr, saved_addr);
2420 spin_unlock_irqrestore(&lp->lock, flags);
2421
2422 return &lp->stats;
2423 }
2424
2425 /* taken from the sunlance driver, which it took from the depca driver */
2426 static void pcnet32_load_multicast(struct net_device *dev)
2427 {
2428 struct pcnet32_private *lp = dev->priv;
2429 volatile struct pcnet32_init_block *ib = &lp->init_block;
2430 volatile u16 *mcast_table = (u16 *) & ib->filter;
2431 struct dev_mc_list *dmi = dev->mc_list;
2432 char *addrs;
2433 int i;
2434 u32 crc;
2435
2436 /* set all multicast bits */
2437 if (dev->flags & IFF_ALLMULTI) {
2438 ib->filter[0] = 0xffffffff;
2439 ib->filter[1] = 0xffffffff;
2440 return;
2441 }
2442 /* clear the multicast filter */
2443 ib->filter[0] = 0;
2444 ib->filter[1] = 0;
2445
2446 /* Add addresses */
2447 for (i = 0; i < dev->mc_count; i++) {
2448 addrs = dmi->dmi_addr;
2449 dmi = dmi->next;
2450
2451 /* multicast address? */
2452 if (!(*addrs & 1))
2453 continue;
2454
2455 crc = ether_crc_le(6, addrs);
2456 crc = crc >> 26;
2457 mcast_table[crc >> 4] =
2458 le16_to_cpu(le16_to_cpu(mcast_table[crc >> 4]) |
2459 (1 << (crc & 0xf)));
2460 }
2461 return;
2462 }
2463
2464 /*
2465 * Set or clear the multicast filter for this adaptor.
2466 */
2467 static void pcnet32_set_multicast_list(struct net_device *dev)
2468 {
2469 unsigned long ioaddr = dev->base_addr, flags;
2470 struct pcnet32_private *lp = dev->priv;
2471
2472 spin_lock_irqsave(&lp->lock, flags);
2473 if (dev->flags & IFF_PROMISC) {
2474 /* Log any net taps. */
2475 if (netif_msg_hw(lp))
2476 printk(KERN_INFO "%s: Promiscuous mode enabled.\n",
2477 dev->name);
2478 lp->init_block.mode =
2479 le16_to_cpu(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) <<
2480 7);
2481 } else {
2482 lp->init_block.mode =
2483 le16_to_cpu((lp->options & PCNET32_PORT_PORTSEL) << 7);
2484 pcnet32_load_multicast(dev);
2485 }
2486
2487 lp->a.write_csr(ioaddr, 0, 0x0004); /* Temporarily stop the lance. */
2488 pcnet32_restart(dev, 0x0042); /* Resume normal operation */
2489 netif_wake_queue(dev);
2490
2491 spin_unlock_irqrestore(&lp->lock, flags);
2492 }
2493
2494 /* This routine assumes that the lp->lock is held */
2495 static int mdio_read(struct net_device *dev, int phy_id, int reg_num)
2496 {
2497 struct pcnet32_private *lp = dev->priv;
2498 unsigned long ioaddr = dev->base_addr;
2499 u16 val_out;
2500
2501 if (!lp->mii)
2502 return 0;
2503
2504 lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
2505 val_out = lp->a.read_bcr(ioaddr, 34);
2506
2507 return val_out;
2508 }
2509
2510 /* This routine assumes that the lp->lock is held */
2511 static void mdio_write(struct net_device *dev, int phy_id, int reg_num, int val)
2512 {
2513 struct pcnet32_private *lp = dev->priv;
2514 unsigned long ioaddr = dev->base_addr;
2515
2516 if (!lp->mii)
2517 return;
2518
2519 lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
2520 lp->a.write_bcr(ioaddr, 34, val);
2521 }
2522
2523 static int pcnet32_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2524 {
2525 struct pcnet32_private *lp = dev->priv;
2526 int rc;
2527 unsigned long flags;
2528
2529 /* SIOC[GS]MIIxxx ioctls */
2530 if (lp->mii) {
2531 spin_lock_irqsave(&lp->lock, flags);
2532 rc = generic_mii_ioctl(&lp->mii_if, if_mii(rq), cmd, NULL);
2533 spin_unlock_irqrestore(&lp->lock, flags);
2534 } else {
2535 rc = -EOPNOTSUPP;
2536 }
2537
2538 return rc;
2539 }
2540
2541 static int pcnet32_check_otherphy(struct net_device *dev)
2542 {
2543 struct pcnet32_private *lp = dev->priv;
2544 struct mii_if_info mii = lp->mii_if;
2545 u16 bmcr;
2546 int i;
2547
2548 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
2549 if (i == lp->mii_if.phy_id)
2550 continue; /* skip active phy */
2551 if (lp->phymask & (1 << i)) {
2552 mii.phy_id = i;
2553 if (mii_link_ok(&mii)) {
2554 /* found PHY with active link */
2555 if (netif_msg_link(lp))
2556 printk(KERN_INFO
2557 "%s: Using PHY number %d.\n",
2558 dev->name, i);
2559
2560 /* isolate inactive phy */
2561 bmcr =
2562 mdio_read(dev, lp->mii_if.phy_id, MII_BMCR);
2563 mdio_write(dev, lp->mii_if.phy_id, MII_BMCR,
2564 bmcr | BMCR_ISOLATE);
2565
2566 /* de-isolate new phy */
2567 bmcr = mdio_read(dev, i, MII_BMCR);
2568 mdio_write(dev, i, MII_BMCR,
2569 bmcr & ~BMCR_ISOLATE);
2570
2571 /* set new phy address */
2572 lp->mii_if.phy_id = i;
2573 return 1;
2574 }
2575 }
2576 }
2577 return 0;
2578 }
2579
2580 /*
2581 * Show the status of the media. Similar to mii_check_media however it
2582 * correctly shows the link speed for all (tested) pcnet32 variants.
2583 * Devices with no mii just report link state without speed.
2584 *
2585 * Caller is assumed to hold and release the lp->lock.
2586 */
2587
2588 static void pcnet32_check_media(struct net_device *dev, int verbose)
2589 {
2590 struct pcnet32_private *lp = dev->priv;
2591 int curr_link;
2592 int prev_link = netif_carrier_ok(dev) ? 1 : 0;
2593 u32 bcr9;
2594
2595 if (lp->mii) {
2596 curr_link = mii_link_ok(&lp->mii_if);
2597 } else {
2598 ulong ioaddr = dev->base_addr; /* card base I/O address */
2599 curr_link = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
2600 }
2601 if (!curr_link) {
2602 if (prev_link || verbose) {
2603 netif_carrier_off(dev);
2604 if (netif_msg_link(lp))
2605 printk(KERN_INFO "%s: link down\n", dev->name);
2606 }
2607 if (lp->phycount > 1) {
2608 curr_link = pcnet32_check_otherphy(dev);
2609 prev_link = 0;
2610 }
2611 } else if (verbose || !prev_link) {
2612 netif_carrier_on(dev);
2613 if (lp->mii) {
2614 if (netif_msg_link(lp)) {
2615 struct ethtool_cmd ecmd;
2616 mii_ethtool_gset(&lp->mii_if, &ecmd);
2617 printk(KERN_INFO
2618 "%s: link up, %sMbps, %s-duplex\n",
2619 dev->name,
2620 (ecmd.speed == SPEED_100) ? "100" : "10",
2621 (ecmd.duplex ==
2622 DUPLEX_FULL) ? "full" : "half");
2623 }
2624 bcr9 = lp->a.read_bcr(dev->base_addr, 9);
2625 if ((bcr9 & (1 << 0)) != lp->mii_if.full_duplex) {
2626 if (lp->mii_if.full_duplex)
2627 bcr9 |= (1 << 0);
2628 else
2629 bcr9 &= ~(1 << 0);
2630 lp->a.write_bcr(dev->base_addr, 9, bcr9);
2631 }
2632 } else {
2633 if (netif_msg_link(lp))
2634 printk(KERN_INFO "%s: link up\n", dev->name);
2635 }
2636 }
2637 }
2638
2639 /*
2640 * Check for loss of link and link establishment.
2641 * Can not use mii_check_media because it does nothing if mode is forced.
2642 */
2643
2644 static void pcnet32_watchdog(struct net_device *dev)
2645 {
2646 struct pcnet32_private *lp = dev->priv;
2647 unsigned long flags;
2648
2649 /* Print the link status if it has changed */
2650 spin_lock_irqsave(&lp->lock, flags);
2651 pcnet32_check_media(dev, 0);
2652 spin_unlock_irqrestore(&lp->lock, flags);
2653
2654 mod_timer(&(lp->watchdog_timer), PCNET32_WATCHDOG_TIMEOUT);
2655 }
2656
2657 static void __devexit pcnet32_remove_one(struct pci_dev *pdev)
2658 {
2659 struct net_device *dev = pci_get_drvdata(pdev);
2660
2661 if (dev) {
2662 struct pcnet32_private *lp = dev->priv;
2663
2664 unregister_netdev(dev);
2665 pcnet32_free_ring(dev);
2666 release_region(dev->base_addr, PCNET32_TOTAL_SIZE);
2667 pci_free_consistent(lp->pci_dev, sizeof(*lp), lp, lp->dma_addr);
2668 free_netdev(dev);
2669 pci_disable_device(pdev);
2670 pci_set_drvdata(pdev, NULL);
2671 }
2672 }
2673
2674 static struct pci_driver pcnet32_driver = {
2675 .name = DRV_NAME,
2676 .probe = pcnet32_probe_pci,
2677 .remove = __devexit_p(pcnet32_remove_one),
2678 .id_table = pcnet32_pci_tbl,
2679 };
2680
2681 /* An additional parameter that may be passed in... */
2682 static int debug = -1;
2683 static int tx_start_pt = -1;
2684 static int pcnet32_have_pci;
2685
2686 module_param(debug, int, 0);
2687 MODULE_PARM_DESC(debug, DRV_NAME " debug level");
2688 module_param(max_interrupt_work, int, 0);
2689 MODULE_PARM_DESC(max_interrupt_work,
2690 DRV_NAME " maximum events handled per interrupt");
2691 module_param(rx_copybreak, int, 0);
2692 MODULE_PARM_DESC(rx_copybreak,
2693 DRV_NAME " copy breakpoint for copy-only-tiny-frames");
2694 module_param(tx_start_pt, int, 0);
2695 MODULE_PARM_DESC(tx_start_pt, DRV_NAME " transmit start point (0-3)");
2696 module_param(pcnet32vlb, int, 0);
2697 MODULE_PARM_DESC(pcnet32vlb, DRV_NAME " Vesa local bus (VLB) support (0/1)");
2698 module_param_array(options, int, NULL, 0);
2699 MODULE_PARM_DESC(options, DRV_NAME " initial option setting(s) (0-15)");
2700 module_param_array(full_duplex, int, NULL, 0);
2701 MODULE_PARM_DESC(full_duplex, DRV_NAME " full duplex setting(s) (1)");
2702 /* Module Parameter for HomePNA cards added by Patrick Simmons, 2004 */
2703 module_param_array(homepna, int, NULL, 0);
2704 MODULE_PARM_DESC(homepna,
2705 DRV_NAME
2706 " mode for 79C978 cards (1 for HomePNA, 0 for Ethernet, default Ethernet");
2707
2708 MODULE_AUTHOR("Thomas Bogendoerfer");
2709 MODULE_DESCRIPTION("Driver for PCnet32 and PCnetPCI based ethercards");
2710 MODULE_LICENSE("GPL");
2711
2712 #define PCNET32_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
2713
2714 static int __init pcnet32_init_module(void)
2715 {
2716 printk(KERN_INFO "%s", version);
2717
2718 pcnet32_debug = netif_msg_init(debug, PCNET32_MSG_DEFAULT);
2719
2720 if ((tx_start_pt >= 0) && (tx_start_pt <= 3))
2721 tx_start = tx_start_pt;
2722
2723 /* find the PCI devices */
2724 if (!pci_module_init(&pcnet32_driver))
2725 pcnet32_have_pci = 1;
2726
2727 /* should we find any remaining VLbus devices ? */
2728 if (pcnet32vlb)
2729 pcnet32_probe_vlbus(pcnet32_portlist);
2730
2731 if (cards_found && (pcnet32_debug & NETIF_MSG_PROBE))
2732 printk(KERN_INFO PFX "%d cards_found.\n", cards_found);
2733
2734 return (pcnet32_have_pci + cards_found) ? 0 : -ENODEV;
2735 }
2736
2737 static void __exit pcnet32_cleanup_module(void)
2738 {
2739 struct net_device *next_dev;
2740
2741 while (pcnet32_dev) {
2742 struct pcnet32_private *lp = pcnet32_dev->priv;
2743 next_dev = lp->next;
2744 unregister_netdev(pcnet32_dev);
2745 pcnet32_free_ring(pcnet32_dev);
2746 release_region(pcnet32_dev->base_addr, PCNET32_TOTAL_SIZE);
2747 pci_free_consistent(lp->pci_dev, sizeof(*lp), lp, lp->dma_addr);
2748 free_netdev(pcnet32_dev);
2749 pcnet32_dev = next_dev;
2750 }
2751
2752 if (pcnet32_have_pci)
2753 pci_unregister_driver(&pcnet32_driver);
2754 }
2755
2756 module_init(pcnet32_init_module);
2757 module_exit(pcnet32_cleanup_module);
2758
2759 /*
2760 * Local variables:
2761 * c-indent-level: 4
2762 * tab-width: 8
2763 * End:
2764 */