]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/net/ethernet/amd/pcnet32.c
pcnet32: fix BNC/AUI port on AM79C970A
[mirror_ubuntu-bionic-kernel.git] / drivers / net / ethernet / amd / pcnet32.c
CommitLineData
1da177e4
LT
1/* pcnet32.c: An AMD PCnet32 ethernet driver for linux. */
2/*
3 * Copyright 1996-1999 Thomas Bogendoerfer
4 *
5 * Derived from the lance driver written 1993,1994,1995 by Donald Becker.
6 *
7 * Copyright 1993 United States Government as represented by the
8 * Director, National Security Agency.
9 *
10 * This software may be used and distributed according to the terms
11 * of the GNU General Public License, incorporated herein by reference.
12 *
13 * This driver is for PCnet32 and PCnetPCI based ethercards
14 */
15/**************************************************************************
16 * 23 Oct, 2000.
17 * Fixed a few bugs, related to running the controller in 32bit mode.
18 *
19 * Carsten Langgaard, carstenl@mips.com
20 * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
21 *
22 *************************************************************************/
23
13ff83b9
JP
24#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
25
1da177e4 26#define DRV_NAME "pcnet32"
01935d7d
DF
27#define DRV_VERSION "1.35"
28#define DRV_RELDATE "21.Apr.2008"
1da177e4
LT
29#define PFX DRV_NAME ": "
30
4a5e8e29
JG
31static const char *const version =
32 DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " tsbogend@alpha.franken.de\n";
1da177e4
LT
33
34#include <linux/module.h>
35#include <linux/kernel.h>
d43c36dc 36#include <linux/sched.h>
1da177e4
LT
37#include <linux/string.h>
38#include <linux/errno.h>
39#include <linux/ioport.h>
40#include <linux/slab.h>
41#include <linux/interrupt.h>
42#include <linux/pci.h>
43#include <linux/delay.h>
44#include <linux/init.h>
45#include <linux/ethtool.h>
46#include <linux/mii.h>
47#include <linux/crc32.h>
48#include <linux/netdevice.h>
49#include <linux/etherdevice.h>
1f044931 50#include <linux/if_ether.h>
1da177e4
LT
51#include <linux/skbuff.h>
52#include <linux/spinlock.h>
53#include <linux/moduleparam.h>
54#include <linux/bitops.h>
9e3f8063
JP
55#include <linux/io.h>
56#include <linux/uaccess.h>
1da177e4
LT
57
58#include <asm/dma.h>
1da177e4
LT
59#include <asm/irq.h>
60
61/*
62 * PCI device identifiers for "new style" Linux PCI Device Drivers
63 */
9baa3c34 64static const struct pci_device_id pcnet32_pci_tbl[] = {
f2622a2b
DF
65 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE_HOME), },
66 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE), },
4a5e8e29
JG
67
68 /*
69 * Adapters that were sold with IBM's RS/6000 or pSeries hardware have
70 * the incorrect vendor id.
71 */
f2622a2b
DF
72 { PCI_DEVICE(PCI_VENDOR_ID_TRIDENT, PCI_DEVICE_ID_AMD_LANCE),
73 .class = (PCI_CLASS_NETWORK_ETHERNET << 8), .class_mask = 0xffff00, },
4a5e8e29
JG
74
75 { } /* terminate list */
1da177e4
LT
76};
77
4a5e8e29 78MODULE_DEVICE_TABLE(pci, pcnet32_pci_tbl);
1da177e4
LT
79
80static int cards_found;
81
82/*
83 * VLB I/O addresses
84 */
aa02bc70 85static unsigned int pcnet32_portlist[] =
4a5e8e29 86 { 0x300, 0x320, 0x340, 0x360, 0 };
1da177e4 87
9e3f8063 88static int pcnet32_debug;
4a5e8e29
JG
89static int tx_start = 1; /* Mapping -- 0:20, 1:64, 2:128, 3:~220 (depends on chip vers) */
90static int pcnet32vlb; /* check for VLB cards ? */
1da177e4
LT
91
92static struct net_device *pcnet32_dev;
93
94static int max_interrupt_work = 2;
95static int rx_copybreak = 200;
96
97#define PCNET32_PORT_AUI 0x00
98#define PCNET32_PORT_10BT 0x01
99#define PCNET32_PORT_GPSI 0x02
100#define PCNET32_PORT_MII 0x03
101
102#define PCNET32_PORT_PORTSEL 0x03
103#define PCNET32_PORT_ASEL 0x04
104#define PCNET32_PORT_100 0x40
105#define PCNET32_PORT_FD 0x80
106
107#define PCNET32_DMA_MASK 0xffffffff
108
109#define PCNET32_WATCHDOG_TIMEOUT (jiffies + (2 * HZ))
110#define PCNET32_BLINK_TIMEOUT (jiffies + (HZ/4))
111
112/*
113 * table to translate option values from tulip
114 * to internal options
115 */
f71e1309 116static const unsigned char options_mapping[] = {
4a5e8e29
JG
117 PCNET32_PORT_ASEL, /* 0 Auto-select */
118 PCNET32_PORT_AUI, /* 1 BNC/AUI */
119 PCNET32_PORT_AUI, /* 2 AUI/BNC */
120 PCNET32_PORT_ASEL, /* 3 not supported */
121 PCNET32_PORT_10BT | PCNET32_PORT_FD, /* 4 10baseT-FD */
122 PCNET32_PORT_ASEL, /* 5 not supported */
123 PCNET32_PORT_ASEL, /* 6 not supported */
124 PCNET32_PORT_ASEL, /* 7 not supported */
125 PCNET32_PORT_ASEL, /* 8 not supported */
126 PCNET32_PORT_MII, /* 9 MII 10baseT */
127 PCNET32_PORT_MII | PCNET32_PORT_FD, /* 10 MII 10baseT-FD */
128 PCNET32_PORT_MII, /* 11 MII (autosel) */
129 PCNET32_PORT_10BT, /* 12 10BaseT */
130 PCNET32_PORT_MII | PCNET32_PORT_100, /* 13 MII 100BaseTx */
131 /* 14 MII 100BaseTx-FD */
132 PCNET32_PORT_MII | PCNET32_PORT_100 | PCNET32_PORT_FD,
133 PCNET32_PORT_ASEL /* 15 not supported */
1da177e4
LT
134};
135
136static const char pcnet32_gstrings_test[][ETH_GSTRING_LEN] = {
4a5e8e29 137 "Loopback test (offline)"
1da177e4 138};
4a5e8e29 139
4c3616cd 140#define PCNET32_TEST_LEN ARRAY_SIZE(pcnet32_gstrings_test)
1da177e4 141
ac62ef04 142#define PCNET32_NUM_REGS 136
1da177e4 143
4a5e8e29 144#define MAX_UNITS 8 /* More are supported, limit only on options */
1da177e4
LT
145static int options[MAX_UNITS];
146static int full_duplex[MAX_UNITS];
147static int homepna[MAX_UNITS];
148
149/*
150 * Theory of Operation
151 *
152 * This driver uses the same software structure as the normal lance
153 * driver. So look for a verbose description in lance.c. The differences
154 * to the normal lance driver is the use of the 32bit mode of PCnet32
155 * and PCnetPCI chips. Because these chips are 32bit chips, there is no
156 * 16MB limitation and we don't need bounce buffers.
157 */
158
1da177e4
LT
159/*
160 * Set the number of Tx and Rx buffers, using Log_2(# buffers).
161 * Reasonable default values are 4 Tx buffers, and 16 Rx buffers.
162 * That translates to 2 (4 == 2^^2) and 4 (16 == 2^^4).
163 */
164#ifndef PCNET32_LOG_TX_BUFFERS
eabf0415
HWL
165#define PCNET32_LOG_TX_BUFFERS 4
166#define PCNET32_LOG_RX_BUFFERS 5
167#define PCNET32_LOG_MAX_TX_BUFFERS 9 /* 2^9 == 512 */
168#define PCNET32_LOG_MAX_RX_BUFFERS 9
1da177e4
LT
169#endif
170
171#define TX_RING_SIZE (1 << (PCNET32_LOG_TX_BUFFERS))
eabf0415 172#define TX_MAX_RING_SIZE (1 << (PCNET32_LOG_MAX_TX_BUFFERS))
1da177e4
LT
173
174#define RX_RING_SIZE (1 << (PCNET32_LOG_RX_BUFFERS))
eabf0415 175#define RX_MAX_RING_SIZE (1 << (PCNET32_LOG_MAX_RX_BUFFERS))
1da177e4 176
232c5640
DF
177#define PKT_BUF_SKB 1544
178/* actual buffer length after being aligned */
179#define PKT_BUF_SIZE (PKT_BUF_SKB - NET_IP_ALIGN)
180/* chip wants twos complement of the (aligned) buffer length */
181#define NEG_BUF_SIZE (NET_IP_ALIGN - PKT_BUF_SKB)
1da177e4
LT
182
183/* Offsets from base I/O address. */
184#define PCNET32_WIO_RDP 0x10
185#define PCNET32_WIO_RAP 0x12
186#define PCNET32_WIO_RESET 0x14
187#define PCNET32_WIO_BDP 0x16
188
189#define PCNET32_DWIO_RDP 0x10
190#define PCNET32_DWIO_RAP 0x14
191#define PCNET32_DWIO_RESET 0x18
192#define PCNET32_DWIO_BDP 0x1C
193
194#define PCNET32_TOTAL_SIZE 0x20
195
06c87850
DF
196#define CSR0 0
197#define CSR0_INIT 0x1
198#define CSR0_START 0x2
199#define CSR0_STOP 0x4
200#define CSR0_TXPOLL 0x8
201#define CSR0_INTEN 0x40
202#define CSR0_IDON 0x0100
203#define CSR0_NORMAL (CSR0_START | CSR0_INTEN)
204#define PCNET32_INIT_LOW 1
205#define PCNET32_INIT_HIGH 2
206#define CSR3 3
207#define CSR4 4
208#define CSR5 5
209#define CSR5_SUSPEND 0x0001
210#define CSR15 15
211#define PCNET32_MC_FILTER 8
212
8d916266
DF
213#define PCNET32_79C970A 0x2621
214
1da177e4
LT
215/* The PCNET32 Rx and Tx ring descriptors. */
216struct pcnet32_rx_head {
3e33545b
AV
217 __le32 base;
218 __le16 buf_length; /* two`s complement of length */
219 __le16 status;
220 __le32 msg_length;
221 __le32 reserved;
1da177e4
LT
222};
223
224struct pcnet32_tx_head {
3e33545b
AV
225 __le32 base;
226 __le16 length; /* two`s complement of length */
227 __le16 status;
228 __le32 misc;
229 __le32 reserved;
1da177e4
LT
230};
231
232/* The PCNET32 32-Bit initialization block, described in databook. */
233struct pcnet32_init_block {
3e33545b
AV
234 __le16 mode;
235 __le16 tlen_rlen;
0b5bf225 236 u8 phys_addr[6];
3e33545b
AV
237 __le16 reserved;
238 __le32 filter[2];
4a5e8e29 239 /* Receive and transmit ring base, along with extra bits. */
3e33545b
AV
240 __le32 rx_ring;
241 __le32 tx_ring;
1da177e4
LT
242};
243
244/* PCnet32 access functions */
245struct pcnet32_access {
4a5e8e29
JG
246 u16 (*read_csr) (unsigned long, int);
247 void (*write_csr) (unsigned long, int, u16);
248 u16 (*read_bcr) (unsigned long, int);
249 void (*write_bcr) (unsigned long, int, u16);
250 u16 (*read_rap) (unsigned long);
251 void (*write_rap) (unsigned long, u16);
252 void (*reset) (unsigned long);
1da177e4
LT
253};
254
255/*
76209926
HWL
256 * The first field of pcnet32_private is read by the ethernet device
257 * so the structure should be allocated using pci_alloc_consistent().
1da177e4
LT
258 */
259struct pcnet32_private {
6ecb7667 260 struct pcnet32_init_block *init_block;
4a5e8e29 261 /* The Tx and Rx ring entries must be aligned on 16-byte boundaries in 32bit mode. */
0b5bf225
JG
262 struct pcnet32_rx_head *rx_ring;
263 struct pcnet32_tx_head *tx_ring;
6ecb7667
DF
264 dma_addr_t init_dma_addr;/* DMA address of beginning of the init block,
265 returned by pci_alloc_consistent */
0b5bf225
JG
266 struct pci_dev *pci_dev;
267 const char *name;
4a5e8e29 268 /* The saved address of a sent-in-place packet/buffer, for skfree(). */
0b5bf225
JG
269 struct sk_buff **tx_skbuff;
270 struct sk_buff **rx_skbuff;
271 dma_addr_t *tx_dma_addr;
272 dma_addr_t *rx_dma_addr;
1d70cb06 273 const struct pcnet32_access *a;
0b5bf225
JG
274 spinlock_t lock; /* Guard lock */
275 unsigned int cur_rx, cur_tx; /* The next free ring entry */
276 unsigned int rx_ring_size; /* current rx ring size */
277 unsigned int tx_ring_size; /* current tx ring size */
278 unsigned int rx_mod_mask; /* rx ring modular mask */
279 unsigned int tx_mod_mask; /* tx ring modular mask */
280 unsigned short rx_len_bits;
281 unsigned short tx_len_bits;
282 dma_addr_t rx_ring_dma_addr;
283 dma_addr_t tx_ring_dma_addr;
284 unsigned int dirty_rx, /* ring entries to be freed. */
285 dirty_tx;
286
bea3348e
SH
287 struct net_device *dev;
288 struct napi_struct napi;
0b5bf225
JG
289 char tx_full;
290 char phycount; /* number of phys found */
291 int options;
292 unsigned int shared_irq:1, /* shared irq possible */
293 dxsuflo:1, /* disable transmit stop on uflo */
2be4cb97
OZ
294 mii:1, /* mii port available */
295 autoneg:1, /* autoneg enabled */
296 port_tp:1, /* port set to TP */
297 fdx:1; /* full duplex enabled */
0b5bf225
JG
298 struct net_device *next;
299 struct mii_if_info mii_if;
300 struct timer_list watchdog_timer;
0b5bf225 301 u32 msg_enable; /* debug message level */
4a5e8e29
JG
302
303 /* each bit indicates an available PHY */
0b5bf225 304 u32 phymask;
8d916266 305 unsigned short chip_version; /* which variant this is */
9871acf6 306
307 /* saved registers during ethtool blink */
308 u16 save_regs[4];
1da177e4
LT
309};
310
4a5e8e29
JG
311static int pcnet32_probe_pci(struct pci_dev *, const struct pci_device_id *);
312static int pcnet32_probe1(unsigned long, int, struct pci_dev *);
313static int pcnet32_open(struct net_device *);
314static int pcnet32_init_ring(struct net_device *);
61357325
SH
315static netdev_tx_t pcnet32_start_xmit(struct sk_buff *,
316 struct net_device *);
4a5e8e29 317static void pcnet32_tx_timeout(struct net_device *dev);
7d12e780 318static irqreturn_t pcnet32_interrupt(int, void *);
4a5e8e29 319static int pcnet32_close(struct net_device *);
1da177e4
LT
320static struct net_device_stats *pcnet32_get_stats(struct net_device *);
321static void pcnet32_load_multicast(struct net_device *dev);
322static void pcnet32_set_multicast_list(struct net_device *);
4a5e8e29 323static int pcnet32_ioctl(struct net_device *, struct ifreq *, int);
1da177e4
LT
324static void pcnet32_watchdog(struct net_device *);
325static int mdio_read(struct net_device *dev, int phy_id, int reg_num);
4a5e8e29
JG
326static void mdio_write(struct net_device *dev, int phy_id, int reg_num,
327 int val);
1da177e4
LT
328static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits);
329static void pcnet32_ethtool_test(struct net_device *dev,
4a5e8e29
JG
330 struct ethtool_test *eth_test, u64 * data);
331static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1);
1da177e4
LT
332static int pcnet32_get_regs_len(struct net_device *dev);
333static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,
4a5e8e29 334 void *ptr);
1bcd3153 335static void pcnet32_purge_tx_ring(struct net_device *dev);
b166cfba 336static int pcnet32_alloc_ring(struct net_device *dev, const char *name);
eabf0415 337static void pcnet32_free_ring(struct net_device *dev);
ac62ef04 338static void pcnet32_check_media(struct net_device *dev, int verbose);
eabf0415 339
4a5e8e29 340static u16 pcnet32_wio_read_csr(unsigned long addr, int index)
1da177e4 341{
4a5e8e29
JG
342 outw(index, addr + PCNET32_WIO_RAP);
343 return inw(addr + PCNET32_WIO_RDP);
1da177e4
LT
344}
345
4a5e8e29 346static void pcnet32_wio_write_csr(unsigned long addr, int index, u16 val)
1da177e4 347{
4a5e8e29
JG
348 outw(index, addr + PCNET32_WIO_RAP);
349 outw(val, addr + PCNET32_WIO_RDP);
1da177e4
LT
350}
351
4a5e8e29 352static u16 pcnet32_wio_read_bcr(unsigned long addr, int index)
1da177e4 353{
4a5e8e29
JG
354 outw(index, addr + PCNET32_WIO_RAP);
355 return inw(addr + PCNET32_WIO_BDP);
1da177e4
LT
356}
357
4a5e8e29 358static void pcnet32_wio_write_bcr(unsigned long addr, int index, u16 val)
1da177e4 359{
4a5e8e29
JG
360 outw(index, addr + PCNET32_WIO_RAP);
361 outw(val, addr + PCNET32_WIO_BDP);
1da177e4
LT
362}
363
4a5e8e29 364static u16 pcnet32_wio_read_rap(unsigned long addr)
1da177e4 365{
4a5e8e29 366 return inw(addr + PCNET32_WIO_RAP);
1da177e4
LT
367}
368
4a5e8e29 369static void pcnet32_wio_write_rap(unsigned long addr, u16 val)
1da177e4 370{
4a5e8e29 371 outw(val, addr + PCNET32_WIO_RAP);
1da177e4
LT
372}
373
4a5e8e29 374static void pcnet32_wio_reset(unsigned long addr)
1da177e4 375{
4a5e8e29 376 inw(addr + PCNET32_WIO_RESET);
1da177e4
LT
377}
378
4a5e8e29 379static int pcnet32_wio_check(unsigned long addr)
1da177e4 380{
4a5e8e29 381 outw(88, addr + PCNET32_WIO_RAP);
807540ba 382 return inw(addr + PCNET32_WIO_RAP) == 88;
1da177e4
LT
383}
384
1d70cb06 385static const struct pcnet32_access pcnet32_wio = {
4a5e8e29
JG
386 .read_csr = pcnet32_wio_read_csr,
387 .write_csr = pcnet32_wio_write_csr,
388 .read_bcr = pcnet32_wio_read_bcr,
389 .write_bcr = pcnet32_wio_write_bcr,
390 .read_rap = pcnet32_wio_read_rap,
391 .write_rap = pcnet32_wio_write_rap,
392 .reset = pcnet32_wio_reset
1da177e4
LT
393};
394
4a5e8e29 395static u16 pcnet32_dwio_read_csr(unsigned long addr, int index)
1da177e4 396{
4a5e8e29 397 outl(index, addr + PCNET32_DWIO_RAP);
9e3f8063 398 return inl(addr + PCNET32_DWIO_RDP) & 0xffff;
1da177e4
LT
399}
400
4a5e8e29 401static void pcnet32_dwio_write_csr(unsigned long addr, int index, u16 val)
1da177e4 402{
4a5e8e29
JG
403 outl(index, addr + PCNET32_DWIO_RAP);
404 outl(val, addr + PCNET32_DWIO_RDP);
1da177e4
LT
405}
406
4a5e8e29 407static u16 pcnet32_dwio_read_bcr(unsigned long addr, int index)
1da177e4 408{
4a5e8e29 409 outl(index, addr + PCNET32_DWIO_RAP);
9e3f8063 410 return inl(addr + PCNET32_DWIO_BDP) & 0xffff;
1da177e4
LT
411}
412
4a5e8e29 413static void pcnet32_dwio_write_bcr(unsigned long addr, int index, u16 val)
1da177e4 414{
4a5e8e29
JG
415 outl(index, addr + PCNET32_DWIO_RAP);
416 outl(val, addr + PCNET32_DWIO_BDP);
1da177e4
LT
417}
418
4a5e8e29 419static u16 pcnet32_dwio_read_rap(unsigned long addr)
1da177e4 420{
9e3f8063 421 return inl(addr + PCNET32_DWIO_RAP) & 0xffff;
1da177e4
LT
422}
423
4a5e8e29 424static void pcnet32_dwio_write_rap(unsigned long addr, u16 val)
1da177e4 425{
4a5e8e29 426 outl(val, addr + PCNET32_DWIO_RAP);
1da177e4
LT
427}
428
4a5e8e29 429static void pcnet32_dwio_reset(unsigned long addr)
1da177e4 430{
4a5e8e29 431 inl(addr + PCNET32_DWIO_RESET);
1da177e4
LT
432}
433
4a5e8e29 434static int pcnet32_dwio_check(unsigned long addr)
1da177e4 435{
4a5e8e29 436 outl(88, addr + PCNET32_DWIO_RAP);
807540ba 437 return (inl(addr + PCNET32_DWIO_RAP) & 0xffff) == 88;
1da177e4
LT
438}
439
1d70cb06 440static const struct pcnet32_access pcnet32_dwio = {
4a5e8e29
JG
441 .read_csr = pcnet32_dwio_read_csr,
442 .write_csr = pcnet32_dwio_write_csr,
443 .read_bcr = pcnet32_dwio_read_bcr,
444 .write_bcr = pcnet32_dwio_write_bcr,
445 .read_rap = pcnet32_dwio_read_rap,
446 .write_rap = pcnet32_dwio_write_rap,
447 .reset = pcnet32_dwio_reset
1da177e4
LT
448};
449
06c87850
DF
450static void pcnet32_netif_stop(struct net_device *dev)
451{
bea3348e 452 struct pcnet32_private *lp = netdev_priv(dev);
01935d7d 453
860e9538 454 netif_trans_update(dev); /* prevent tx timeout */
bea3348e 455 napi_disable(&lp->napi);
06c87850
DF
456 netif_tx_disable(dev);
457}
458
459static void pcnet32_netif_start(struct net_device *dev)
460{
bea3348e 461 struct pcnet32_private *lp = netdev_priv(dev);
d1d08d12
DM
462 ulong ioaddr = dev->base_addr;
463 u16 val;
01935d7d 464
06c87850 465 netif_wake_queue(dev);
1d70cb06 466 val = lp->a->read_csr(ioaddr, CSR3);
d1d08d12 467 val &= 0x00ff;
1d70cb06 468 lp->a->write_csr(ioaddr, CSR3, val);
bea3348e 469 napi_enable(&lp->napi);
06c87850
DF
470}
471
472/*
473 * Allocate space for the new sized tx ring.
474 * Free old resources
475 * Save new resources.
476 * Any failure keeps old resources.
477 * Must be called with lp->lock held.
478 */
479static void pcnet32_realloc_tx_ring(struct net_device *dev,
480 struct pcnet32_private *lp,
481 unsigned int size)
482{
483 dma_addr_t new_ring_dma_addr;
484 dma_addr_t *new_dma_addr_list;
485 struct pcnet32_tx_head *new_tx_ring;
486 struct sk_buff **new_skb_list;
e03aec16 487 unsigned int entries = BIT(size);
06c87850
DF
488
489 pcnet32_purge_tx_ring(dev);
490
e03aec16
JP
491 new_tx_ring =
492 pci_zalloc_consistent(lp->pci_dev,
493 sizeof(struct pcnet32_tx_head) * entries,
494 &new_ring_dma_addr);
495 if (new_tx_ring == NULL)
06c87850 496 return;
06c87850 497
e03aec16 498 new_dma_addr_list = kcalloc(entries, sizeof(dma_addr_t), GFP_ATOMIC);
14f8dc49 499 if (!new_dma_addr_list)
06c87850 500 goto free_new_tx_ring;
06c87850 501
e03aec16 502 new_skb_list = kcalloc(entries, sizeof(struct sk_buff *), GFP_ATOMIC);
14f8dc49 503 if (!new_skb_list)
06c87850 504 goto free_new_lists;
06c87850
DF
505
506 kfree(lp->tx_skbuff);
507 kfree(lp->tx_dma_addr);
508 pci_free_consistent(lp->pci_dev,
e03aec16
JP
509 sizeof(struct pcnet32_tx_head) * lp->tx_ring_size,
510 lp->tx_ring, lp->tx_ring_dma_addr);
06c87850 511
e03aec16 512 lp->tx_ring_size = entries;
06c87850
DF
513 lp->tx_mod_mask = lp->tx_ring_size - 1;
514 lp->tx_len_bits = (size << 12);
515 lp->tx_ring = new_tx_ring;
516 lp->tx_ring_dma_addr = new_ring_dma_addr;
517 lp->tx_dma_addr = new_dma_addr_list;
518 lp->tx_skbuff = new_skb_list;
519 return;
520
9e3f8063 521free_new_lists:
06c87850 522 kfree(new_dma_addr_list);
9e3f8063 523free_new_tx_ring:
06c87850 524 pci_free_consistent(lp->pci_dev,
e03aec16 525 sizeof(struct pcnet32_tx_head) * entries,
06c87850
DF
526 new_tx_ring,
527 new_ring_dma_addr);
06c87850
DF
528}
529
530/*
531 * Allocate space for the new sized rx ring.
532 * Re-use old receive buffers.
533 * alloc extra buffers
534 * free unneeded buffers
535 * free unneeded buffers
536 * Save new resources.
537 * Any failure keeps old resources.
538 * Must be called with lp->lock held.
539 */
540static void pcnet32_realloc_rx_ring(struct net_device *dev,
541 struct pcnet32_private *lp,
542 unsigned int size)
543{
544 dma_addr_t new_ring_dma_addr;
545 dma_addr_t *new_dma_addr_list;
546 struct pcnet32_rx_head *new_rx_ring;
547 struct sk_buff **new_skb_list;
548 int new, overlap;
e03aec16
JP
549 unsigned int entries = BIT(size);
550
551 new_rx_ring =
552 pci_zalloc_consistent(lp->pci_dev,
553 sizeof(struct pcnet32_rx_head) * entries,
554 &new_ring_dma_addr);
555 if (new_rx_ring == NULL)
06c87850 556 return;
06c87850 557
60e2e8b3 558 new_dma_addr_list = kcalloc(entries, sizeof(dma_addr_t), GFP_ATOMIC);
14f8dc49 559 if (!new_dma_addr_list)
06c87850 560 goto free_new_rx_ring;
06c87850 561
4cc5c475 562 new_skb_list = kcalloc(entries, sizeof(struct sk_buff *), GFP_ATOMIC);
14f8dc49 563 if (!new_skb_list)
06c87850 564 goto free_new_lists;
06c87850
DF
565
566 /* first copy the current receive buffers */
60e2e8b3 567 overlap = min(entries, lp->rx_ring_size);
06c87850
DF
568 for (new = 0; new < overlap; new++) {
569 new_rx_ring[new] = lp->rx_ring[new];
570 new_dma_addr_list[new] = lp->rx_dma_addr[new];
571 new_skb_list[new] = lp->rx_skbuff[new];
572 }
573 /* now allocate any new buffers needed */
60e2e8b3 574 for (; new < entries; new++) {
06c87850 575 struct sk_buff *rx_skbuff;
1d266430 576 new_skb_list[new] = netdev_alloc_skb(dev, PKT_BUF_SKB);
9e3f8063
JP
577 rx_skbuff = new_skb_list[new];
578 if (!rx_skbuff) {
06c87850 579 /* keep the original lists and buffers */
1d266430 580 netif_err(lp, drv, dev, "%s netdev_alloc_skb failed\n",
13ff83b9 581 __func__);
06c87850
DF
582 goto free_all_new;
583 }
232c5640 584 skb_reserve(rx_skbuff, NET_IP_ALIGN);
06c87850
DF
585
586 new_dma_addr_list[new] =
587 pci_map_single(lp->pci_dev, rx_skbuff->data,
232c5640 588 PKT_BUF_SIZE, PCI_DMA_FROMDEVICE);
4cc5c475
DF
589 if (pci_dma_mapping_error(lp->pci_dev,
590 new_dma_addr_list[new])) {
591 netif_err(lp, drv, dev, "%s dma mapping failed\n",
592 __func__);
593 dev_kfree_skb(new_skb_list[new]);
594 goto free_all_new;
595 }
3e33545b 596 new_rx_ring[new].base = cpu_to_le32(new_dma_addr_list[new]);
232c5640 597 new_rx_ring[new].buf_length = cpu_to_le16(NEG_BUF_SIZE);
3e33545b 598 new_rx_ring[new].status = cpu_to_le16(0x8000);
06c87850
DF
599 }
600 /* and free any unneeded buffers */
601 for (; new < lp->rx_ring_size; new++) {
602 if (lp->rx_skbuff[new]) {
4cc5c475
DF
603 if (!pci_dma_mapping_error(lp->pci_dev,
604 lp->rx_dma_addr[new]))
605 pci_unmap_single(lp->pci_dev,
606 lp->rx_dma_addr[new],
607 PKT_BUF_SIZE,
608 PCI_DMA_FROMDEVICE);
06c87850
DF
609 dev_kfree_skb(lp->rx_skbuff[new]);
610 }
611 }
612
613 kfree(lp->rx_skbuff);
614 kfree(lp->rx_dma_addr);
615 pci_free_consistent(lp->pci_dev,
616 sizeof(struct pcnet32_rx_head) *
617 lp->rx_ring_size, lp->rx_ring,
618 lp->rx_ring_dma_addr);
619
60e2e8b3 620 lp->rx_ring_size = entries;
06c87850
DF
621 lp->rx_mod_mask = lp->rx_ring_size - 1;
622 lp->rx_len_bits = (size << 4);
623 lp->rx_ring = new_rx_ring;
624 lp->rx_ring_dma_addr = new_ring_dma_addr;
625 lp->rx_dma_addr = new_dma_addr_list;
626 lp->rx_skbuff = new_skb_list;
627 return;
628
9e3f8063
JP
629free_all_new:
630 while (--new >= lp->rx_ring_size) {
06c87850 631 if (new_skb_list[new]) {
4cc5c475
DF
632 if (!pci_dma_mapping_error(lp->pci_dev,
633 new_dma_addr_list[new]))
634 pci_unmap_single(lp->pci_dev,
635 new_dma_addr_list[new],
636 PKT_BUF_SIZE,
637 PCI_DMA_FROMDEVICE);
06c87850
DF
638 dev_kfree_skb(new_skb_list[new]);
639 }
640 }
641 kfree(new_skb_list);
9e3f8063 642free_new_lists:
06c87850 643 kfree(new_dma_addr_list);
9e3f8063 644free_new_rx_ring:
06c87850 645 pci_free_consistent(lp->pci_dev,
60e2e8b3 646 sizeof(struct pcnet32_rx_head) * entries,
06c87850
DF
647 new_rx_ring,
648 new_ring_dma_addr);
06c87850
DF
649}
650
ac5bfe40
DF
651static void pcnet32_purge_rx_ring(struct net_device *dev)
652{
1e56a4b4 653 struct pcnet32_private *lp = netdev_priv(dev);
ac5bfe40
DF
654 int i;
655
656 /* free all allocated skbuffs */
657 for (i = 0; i < lp->rx_ring_size; i++) {
658 lp->rx_ring[i].status = 0; /* CPU owns buffer */
659 wmb(); /* Make sure adapter sees owner change */
660 if (lp->rx_skbuff[i]) {
4cc5c475
DF
661 if (!pci_dma_mapping_error(lp->pci_dev,
662 lp->rx_dma_addr[i]))
663 pci_unmap_single(lp->pci_dev,
664 lp->rx_dma_addr[i],
665 PKT_BUF_SIZE,
666 PCI_DMA_FROMDEVICE);
ac5bfe40
DF
667 dev_kfree_skb_any(lp->rx_skbuff[i]);
668 }
669 lp->rx_skbuff[i] = NULL;
670 lp->rx_dma_addr[i] = 0;
671 }
672}
673
1da177e4
LT
674#ifdef CONFIG_NET_POLL_CONTROLLER
675static void pcnet32_poll_controller(struct net_device *dev)
676{
4a5e8e29 677 disable_irq(dev->irq);
7d12e780 678 pcnet32_interrupt(0, dev);
4a5e8e29 679 enable_irq(dev->irq);
1da177e4
LT
680}
681#endif
682
2be4cb97
OZ
683/*
684 * lp->lock must be held.
685 */
686static int pcnet32_suspend(struct net_device *dev, unsigned long *flags,
687 int can_sleep)
688{
689 int csr5;
690 struct pcnet32_private *lp = netdev_priv(dev);
691 const struct pcnet32_access *a = lp->a;
692 ulong ioaddr = dev->base_addr;
693 int ticks;
694
695 /* really old chips have to be stopped. */
696 if (lp->chip_version < PCNET32_79C970A)
697 return 0;
698
699 /* set SUSPEND (SPND) - CSR5 bit 0 */
700 csr5 = a->read_csr(ioaddr, CSR5);
701 a->write_csr(ioaddr, CSR5, csr5 | CSR5_SUSPEND);
702
703 /* poll waiting for bit to be set */
704 ticks = 0;
705 while (!(a->read_csr(ioaddr, CSR5) & CSR5_SUSPEND)) {
706 spin_unlock_irqrestore(&lp->lock, *flags);
707 if (can_sleep)
708 msleep(1);
709 else
710 mdelay(1);
711 spin_lock_irqsave(&lp->lock, *flags);
712 ticks++;
713 if (ticks > 200) {
714 netif_printk(lp, hw, KERN_DEBUG, dev,
715 "Error getting into suspend!\n");
716 return 0;
717 }
718 }
719 return 1;
720}
721
722static void pcnet32_clr_suspend(struct pcnet32_private *lp, ulong ioaddr)
723{
724 int csr5 = lp->a->read_csr(ioaddr, CSR5);
725 /* clear SUSPEND (SPND) - CSR5 bit 0 */
726 lp->a->write_csr(ioaddr, CSR5, csr5 & ~CSR5_SUSPEND);
727}
728
ea74df81
PR
729static int pcnet32_get_link_ksettings(struct net_device *dev,
730 struct ethtool_link_ksettings *cmd)
1da177e4 731{
1e56a4b4 732 struct pcnet32_private *lp = netdev_priv(dev);
4a5e8e29
JG
733 unsigned long flags;
734 int r = -EOPNOTSUPP;
1da177e4 735
2be4cb97 736 spin_lock_irqsave(&lp->lock, flags);
4a5e8e29 737 if (lp->mii) {
ea74df81 738 mii_ethtool_get_link_ksettings(&lp->mii_if, cmd);
2be4cb97
OZ
739 r = 0;
740 } else if (lp->chip_version == PCNET32_79C970A) {
741 if (lp->autoneg) {
742 cmd->base.autoneg = AUTONEG_ENABLE;
743 if (lp->a->read_bcr(dev->base_addr, 4) == 0xc0)
744 cmd->base.port = PORT_AUI;
745 else
746 cmd->base.port = PORT_TP;
747 } else {
748 cmd->base.autoneg = AUTONEG_DISABLE;
749 cmd->base.port = lp->port_tp ? PORT_TP : PORT_AUI;
750 }
751 cmd->base.duplex = lp->fdx ? DUPLEX_FULL : DUPLEX_HALF;
752 cmd->base.speed = SPEED_10;
753 ethtool_convert_legacy_u32_to_link_mode(
754 cmd->link_modes.supported,
755 SUPPORTED_TP | SUPPORTED_AUI);
4a5e8e29
JG
756 r = 0;
757 }
2be4cb97 758 spin_unlock_irqrestore(&lp->lock, flags);
4a5e8e29 759 return r;
1da177e4
LT
760}
761
ea74df81
PR
762static int pcnet32_set_link_ksettings(struct net_device *dev,
763 const struct ethtool_link_ksettings *cmd)
1da177e4 764{
1e56a4b4 765 struct pcnet32_private *lp = netdev_priv(dev);
2be4cb97 766 ulong ioaddr = dev->base_addr;
4a5e8e29
JG
767 unsigned long flags;
768 int r = -EOPNOTSUPP;
2be4cb97 769 int suspended, bcr2, bcr9, csr15;
1da177e4 770
2be4cb97 771 spin_lock_irqsave(&lp->lock, flags);
4a5e8e29 772 if (lp->mii) {
ea74df81 773 r = mii_ethtool_set_link_ksettings(&lp->mii_if, cmd);
2be4cb97
OZ
774 } else if (lp->chip_version == PCNET32_79C970A) {
775 suspended = pcnet32_suspend(dev, &flags, 0);
776 if (!suspended)
777 lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
778
779 lp->autoneg = cmd->base.autoneg == AUTONEG_ENABLE;
780 bcr2 = lp->a->read_bcr(ioaddr, 2);
781 if (cmd->base.autoneg == AUTONEG_ENABLE) {
782 lp->a->write_bcr(ioaddr, 2, bcr2 | 0x0002);
783 } else {
784 lp->a->write_bcr(ioaddr, 2, bcr2 & ~0x0002);
785
786 lp->port_tp = cmd->base.port == PORT_TP;
787 csr15 = lp->a->read_csr(ioaddr, CSR15) & ~0x0180;
788 if (cmd->base.port == PORT_TP)
789 csr15 |= 0x0080;
790 lp->a->write_csr(ioaddr, CSR15, csr15);
791 lp->init_block->mode = cpu_to_le16(csr15);
792
793 lp->fdx = cmd->base.duplex == DUPLEX_FULL;
794 bcr9 = lp->a->read_bcr(ioaddr, 9) & ~0x0003;
795 if (cmd->base.duplex == DUPLEX_FULL)
796 bcr9 |= 0x0003;
797 lp->a->write_bcr(ioaddr, 9, bcr9);
798 }
799 if (suspended)
800 pcnet32_clr_suspend(lp, ioaddr);
801 else if (netif_running(dev))
802 pcnet32_restart(dev, CSR0_NORMAL);
803 r = 0;
4a5e8e29 804 }
2be4cb97 805 spin_unlock_irqrestore(&lp->lock, flags);
4a5e8e29 806 return r;
1da177e4
LT
807}
808
4a5e8e29
JG
809static void pcnet32_get_drvinfo(struct net_device *dev,
810 struct ethtool_drvinfo *info)
1da177e4 811{
1e56a4b4 812 struct pcnet32_private *lp = netdev_priv(dev);
4a5e8e29 813
23020ab3
RJ
814 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
815 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
4a5e8e29 816 if (lp->pci_dev)
23020ab3
RJ
817 strlcpy(info->bus_info, pci_name(lp->pci_dev),
818 sizeof(info->bus_info));
4a5e8e29 819 else
23020ab3
RJ
820 snprintf(info->bus_info, sizeof(info->bus_info),
821 "VLB 0x%lx", dev->base_addr);
1da177e4
LT
822}
823
824static u32 pcnet32_get_link(struct net_device *dev)
825{
1e56a4b4 826 struct pcnet32_private *lp = netdev_priv(dev);
4a5e8e29
JG
827 unsigned long flags;
828 int r;
1da177e4 829
4a5e8e29
JG
830 spin_lock_irqsave(&lp->lock, flags);
831 if (lp->mii) {
832 r = mii_link_ok(&lp->mii_if);
2be4cb97
OZ
833 } else if (lp->chip_version == PCNET32_79C970A) {
834 ulong ioaddr = dev->base_addr; /* card base I/O address */
835 /* only read link if port is set to TP */
836 if (!lp->autoneg && lp->port_tp)
837 r = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
838 else /* link always up for AUI port or port auto select */
839 r = 1;
840 } else if (lp->chip_version > PCNET32_79C970A) {
4a5e8e29 841 ulong ioaddr = dev->base_addr; /* card base I/O address */
1d70cb06 842 r = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
8d916266
DF
843 } else { /* can not detect link on really old chips */
844 r = 1;
4a5e8e29
JG
845 }
846 spin_unlock_irqrestore(&lp->lock, flags);
847
848 return r;
1da177e4
LT
849}
850
851static u32 pcnet32_get_msglevel(struct net_device *dev)
852{
1e56a4b4 853 struct pcnet32_private *lp = netdev_priv(dev);
4a5e8e29 854 return lp->msg_enable;
1da177e4
LT
855}
856
857static void pcnet32_set_msglevel(struct net_device *dev, u32 value)
858{
1e56a4b4 859 struct pcnet32_private *lp = netdev_priv(dev);
4a5e8e29 860 lp->msg_enable = value;
1da177e4
LT
861}
862
863static int pcnet32_nway_reset(struct net_device *dev)
864{
1e56a4b4 865 struct pcnet32_private *lp = netdev_priv(dev);
4a5e8e29
JG
866 unsigned long flags;
867 int r = -EOPNOTSUPP;
1da177e4 868
4a5e8e29
JG
869 if (lp->mii) {
870 spin_lock_irqsave(&lp->lock, flags);
871 r = mii_nway_restart(&lp->mii_if);
872 spin_unlock_irqrestore(&lp->lock, flags);
873 }
874 return r;
1da177e4
LT
875}
876
4a5e8e29
JG
877static void pcnet32_get_ringparam(struct net_device *dev,
878 struct ethtool_ringparam *ering)
1da177e4 879{
1e56a4b4 880 struct pcnet32_private *lp = netdev_priv(dev);
1da177e4 881
6dcd60c2
DF
882 ering->tx_max_pending = TX_MAX_RING_SIZE;
883 ering->tx_pending = lp->tx_ring_size;
884 ering->rx_max_pending = RX_MAX_RING_SIZE;
885 ering->rx_pending = lp->rx_ring_size;
eabf0415
HWL
886}
887
4a5e8e29
JG
888static int pcnet32_set_ringparam(struct net_device *dev,
889 struct ethtool_ringparam *ering)
eabf0415 890{
1e56a4b4 891 struct pcnet32_private *lp = netdev_priv(dev);
4a5e8e29 892 unsigned long flags;
06c87850
DF
893 unsigned int size;
894 ulong ioaddr = dev->base_addr;
4a5e8e29
JG
895 int i;
896
897 if (ering->rx_mini_pending || ering->rx_jumbo_pending)
898 return -EINVAL;
899
900 if (netif_running(dev))
06c87850 901 pcnet32_netif_stop(dev);
4a5e8e29
JG
902
903 spin_lock_irqsave(&lp->lock, flags);
1d70cb06 904 lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
06c87850
DF
905
906 size = min(ering->tx_pending, (unsigned int)TX_MAX_RING_SIZE);
4a5e8e29
JG
907
908 /* set the minimum ring size to 4, to allow the loopback test to work
909 * unchanged.
910 */
911 for (i = 2; i <= PCNET32_LOG_MAX_TX_BUFFERS; i++) {
06c87850 912 if (size <= (1 << i))
4a5e8e29
JG
913 break;
914 }
06c87850
DF
915 if ((1 << i) != lp->tx_ring_size)
916 pcnet32_realloc_tx_ring(dev, lp, i);
b368a3fb 917
06c87850 918 size = min(ering->rx_pending, (unsigned int)RX_MAX_RING_SIZE);
4a5e8e29 919 for (i = 2; i <= PCNET32_LOG_MAX_RX_BUFFERS; i++) {
06c87850 920 if (size <= (1 << i))
4a5e8e29
JG
921 break;
922 }
06c87850
DF
923 if ((1 << i) != lp->rx_ring_size)
924 pcnet32_realloc_rx_ring(dev, lp, i);
b368a3fb 925
bea3348e 926 lp->napi.weight = lp->rx_ring_size / 2;
06c87850
DF
927
928 if (netif_running(dev)) {
929 pcnet32_netif_start(dev);
930 pcnet32_restart(dev, CSR0_NORMAL);
4a5e8e29 931 }
eabf0415 932
4a5e8e29 933 spin_unlock_irqrestore(&lp->lock, flags);
eabf0415 934
13ff83b9
JP
935 netif_info(lp, drv, dev, "Ring Param Settings: RX: %d, TX: %d\n",
936 lp->rx_ring_size, lp->tx_ring_size);
eabf0415 937
4a5e8e29 938 return 0;
1da177e4
LT
939}
940
4a5e8e29 941static void pcnet32_get_strings(struct net_device *dev, u32 stringset,
9e3f8063 942 u8 *data)
1da177e4 943{
4a5e8e29 944 memcpy(data, pcnet32_gstrings_test, sizeof(pcnet32_gstrings_test));
1da177e4
LT
945}
946
b9f2c044 947static int pcnet32_get_sset_count(struct net_device *dev, int sset)
1da177e4 948{
b9f2c044
JG
949 switch (sset) {
950 case ETH_SS_TEST:
951 return PCNET32_TEST_LEN;
952 default:
953 return -EOPNOTSUPP;
954 }
1da177e4
LT
955}
956
957static void pcnet32_ethtool_test(struct net_device *dev,
4a5e8e29 958 struct ethtool_test *test, u64 * data)
1da177e4 959{
1e56a4b4 960 struct pcnet32_private *lp = netdev_priv(dev);
4a5e8e29
JG
961 int rc;
962
963 if (test->flags == ETH_TEST_FL_OFFLINE) {
964 rc = pcnet32_loopback_test(dev, data);
965 if (rc) {
13ff83b9
JP
966 netif_printk(lp, hw, KERN_DEBUG, dev,
967 "Loopback test failed\n");
4a5e8e29 968 test->flags |= ETH_TEST_FL_FAILED;
13ff83b9
JP
969 } else
970 netif_printk(lp, hw, KERN_DEBUG, dev,
971 "Loopback test passed\n");
972 } else
973 netif_printk(lp, hw, KERN_DEBUG, dev,
974 "No tests to run (specify 'Offline' on ethtool)\n");
4a5e8e29 975} /* end pcnet32_ethtool_test */
1da177e4 976
4a5e8e29 977static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
1da177e4 978{
1e56a4b4 979 struct pcnet32_private *lp = netdev_priv(dev);
1d70cb06 980 const struct pcnet32_access *a = lp->a; /* access to registers */
4a5e8e29
JG
981 ulong ioaddr = dev->base_addr; /* card base I/O address */
982 struct sk_buff *skb; /* sk buff */
983 int x, i; /* counters */
984 int numbuffs = 4; /* number of TX/RX buffers and descs */
985 u16 status = 0x8300; /* TX ring status */
3e33545b 986 __le16 teststatus; /* test of ring status */
4a5e8e29
JG
987 int rc; /* return code */
988 int size; /* size of packets */
989 unsigned char *packet; /* source packet data */
990 static const int data_len = 60; /* length of source packets */
991 unsigned long flags;
992 unsigned long ticks;
993
4a5e8e29
JG
994 rc = 1; /* default to fail */
995
996 if (netif_running(dev))
7de745e5 997 pcnet32_netif_stop(dev);
4a5e8e29
JG
998
999 spin_lock_irqsave(&lp->lock, flags);
1d70cb06 1000 lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
ac5bfe40
DF
1001
1002 numbuffs = min(numbuffs, (int)min(lp->rx_ring_size, lp->tx_ring_size));
4a5e8e29
JG
1003
1004 /* Reset the PCNET32 */
1d70cb06 1005 lp->a->reset(ioaddr);
1006 lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
4a5e8e29
JG
1007
1008 /* switch pcnet32 to 32bit mode */
1d70cb06 1009 lp->a->write_bcr(ioaddr, 20, 2);
4a5e8e29 1010
4a5e8e29
JG
1011 /* purge & init rings but don't actually restart */
1012 pcnet32_restart(dev, 0x0000);
1013
1d70cb06 1014 lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
4a5e8e29
JG
1015
1016 /* Initialize Transmit buffers. */
1017 size = data_len + 15;
1018 for (x = 0; x < numbuffs; x++) {
1d266430 1019 skb = netdev_alloc_skb(dev, size);
9e3f8063 1020 if (!skb) {
13ff83b9
JP
1021 netif_printk(lp, hw, KERN_DEBUG, dev,
1022 "Cannot allocate skb at line: %d!\n",
1023 __LINE__);
4a5e8e29 1024 goto clean_up;
4a5e8e29 1025 }
9e3f8063
JP
1026 packet = skb->data;
1027 skb_put(skb, size); /* create space for data */
1028 lp->tx_skbuff[x] = skb;
1029 lp->tx_ring[x].length = cpu_to_le16(-skb->len);
1030 lp->tx_ring[x].misc = 0;
1031
1032 /* put DA and SA into the skb */
1033 for (i = 0; i < 6; i++)
1034 *packet++ = dev->dev_addr[i];
1035 for (i = 0; i < 6; i++)
1036 *packet++ = dev->dev_addr[i];
1037 /* type */
1038 *packet++ = 0x08;
1039 *packet++ = 0x06;
1040 /* packet number */
1041 *packet++ = x;
1042 /* fill packet with data */
1043 for (i = 0; i < data_len; i++)
1044 *packet++ = i;
1045
1046 lp->tx_dma_addr[x] =
1047 pci_map_single(lp->pci_dev, skb->data, skb->len,
1048 PCI_DMA_TODEVICE);
4cc5c475
DF
1049 if (pci_dma_mapping_error(lp->pci_dev, lp->tx_dma_addr[x])) {
1050 netif_printk(lp, hw, KERN_DEBUG, dev,
1051 "DMA mapping error at line: %d!\n",
1052 __LINE__);
1053 goto clean_up;
1054 }
9e3f8063
JP
1055 lp->tx_ring[x].base = cpu_to_le32(lp->tx_dma_addr[x]);
1056 wmb(); /* Make sure owner changes after all others are visible */
1057 lp->tx_ring[x].status = cpu_to_le16(status);
1da177e4 1058 }
1da177e4 1059
ac5bfe40
DF
1060 x = a->read_bcr(ioaddr, 32); /* set internal loopback in BCR32 */
1061 a->write_bcr(ioaddr, 32, x | 0x0002);
4a5e8e29 1062
ac5bfe40
DF
1063 /* set int loopback in CSR15 */
1064 x = a->read_csr(ioaddr, CSR15) & 0xfffc;
1d70cb06 1065 lp->a->write_csr(ioaddr, CSR15, x | 0x0044);
4a5e8e29 1066
3e33545b 1067 teststatus = cpu_to_le16(0x8000);
1d70cb06 1068 lp->a->write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
4a5e8e29
JG
1069
1070 /* Check status of descriptors */
1071 for (x = 0; x < numbuffs; x++) {
1072 ticks = 0;
1073 rmb();
1074 while ((lp->rx_ring[x].status & teststatus) && (ticks < 200)) {
1075 spin_unlock_irqrestore(&lp->lock, flags);
ac5bfe40 1076 msleep(1);
4a5e8e29
JG
1077 spin_lock_irqsave(&lp->lock, flags);
1078 rmb();
1079 ticks++;
1080 }
1081 if (ticks == 200) {
13ff83b9 1082 netif_err(lp, hw, dev, "Desc %d failed to reset!\n", x);
4a5e8e29
JG
1083 break;
1084 }
1085 }
1086
1d70cb06 1087 lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
4a5e8e29
JG
1088 wmb();
1089 if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) {
13ff83b9 1090 netdev_printk(KERN_DEBUG, dev, "RX loopback packets:\n");
4a5e8e29
JG
1091
1092 for (x = 0; x < numbuffs; x++) {
13ff83b9 1093 netdev_printk(KERN_DEBUG, dev, "Packet %d: ", x);
4a5e8e29 1094 skb = lp->rx_skbuff[x];
9e3f8063 1095 for (i = 0; i < size; i++)
13ff83b9 1096 pr_cont(" %02x", *(skb->data + i));
13ff83b9 1097 pr_cont("\n");
4a5e8e29
JG
1098 }
1099 }
1da177e4 1100
4a5e8e29
JG
1101 x = 0;
1102 rc = 0;
1103 while (x < numbuffs && !rc) {
1104 skb = lp->rx_skbuff[x];
1105 packet = lp->tx_skbuff[x]->data;
1106 for (i = 0; i < size; i++) {
1107 if (*(skb->data + i) != packet[i]) {
13ff83b9
JP
1108 netif_printk(lp, hw, KERN_DEBUG, dev,
1109 "Error in compare! %2x - %02x %02x\n",
1110 i, *(skb->data + i), packet[i]);
4a5e8e29
JG
1111 rc = 1;
1112 break;
1113 }
1114 }
1115 x++;
1116 }
1da177e4 1117
9e3f8063 1118clean_up:
ac5bfe40 1119 *data1 = rc;
4a5e8e29 1120 pcnet32_purge_tx_ring(dev);
1da177e4 1121
ac5bfe40
DF
1122 x = a->read_csr(ioaddr, CSR15);
1123 a->write_csr(ioaddr, CSR15, (x & ~0x0044)); /* reset bits 6 and 2 */
1da177e4 1124
ac5bfe40
DF
1125 x = a->read_bcr(ioaddr, 32); /* reset internal loopback */
1126 a->write_bcr(ioaddr, 32, (x & ~0x0002));
4a5e8e29 1127
7de745e5
DF
1128 if (netif_running(dev)) {
1129 pcnet32_netif_start(dev);
1130 pcnet32_restart(dev, CSR0_NORMAL);
1131 } else {
1132 pcnet32_purge_rx_ring(dev);
1d70cb06 1133 lp->a->write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
7de745e5
DF
1134 }
1135 spin_unlock_irqrestore(&lp->lock, flags);
4a5e8e29 1136
9e3f8063 1137 return rc;
4a5e8e29 1138} /* end pcnet32_loopback_test */
1da177e4 1139
9871acf6 1140static int pcnet32_set_phys_id(struct net_device *dev,
1141 enum ethtool_phys_id_state state)
1da177e4 1142{
1e56a4b4 1143 struct pcnet32_private *lp = netdev_priv(dev);
1d70cb06 1144 const struct pcnet32_access *a = lp->a;
4a5e8e29
JG
1145 ulong ioaddr = dev->base_addr;
1146 unsigned long flags;
1147 int i;
1148
9871acf6 1149 switch (state) {
1150 case ETHTOOL_ID_ACTIVE:
1151 /* Save the current value of the bcrs */
1152 spin_lock_irqsave(&lp->lock, flags);
1153 for (i = 4; i < 8; i++)
1154 lp->save_regs[i - 4] = a->read_bcr(ioaddr, i);
1155 spin_unlock_irqrestore(&lp->lock, flags);
fce55922 1156 return 2; /* cycle on/off twice per second */
1da177e4 1157
9871acf6 1158 case ETHTOOL_ID_ON:
1159 case ETHTOOL_ID_OFF:
1160 /* Blink the led */
1161 spin_lock_irqsave(&lp->lock, flags);
1162 for (i = 4; i < 8; i++)
1163 a->write_bcr(ioaddr, i, a->read_bcr(ioaddr, i) ^ 0x4000);
1164 spin_unlock_irqrestore(&lp->lock, flags);
1165 break;
4a5e8e29 1166
9871acf6 1167 case ETHTOOL_ID_INACTIVE:
1168 /* Restore the original value of the bcrs */
1169 spin_lock_irqsave(&lp->lock, flags);
1170 for (i = 4; i < 8; i++)
1171 a->write_bcr(ioaddr, i, lp->save_regs[i - 4]);
1172 spin_unlock_irqrestore(&lp->lock, flags);
4a5e8e29 1173 }
4a5e8e29 1174 return 0;
1da177e4
LT
1175}
1176
3904c324
DF
1177/*
1178 * process one receive descriptor entry
1179 */
1180
1181static void pcnet32_rx_entry(struct net_device *dev,
1182 struct pcnet32_private *lp,
1183 struct pcnet32_rx_head *rxp,
1184 int entry)
1185{
1186 int status = (short)le16_to_cpu(rxp->status) >> 8;
1187 int rx_in_place = 0;
1188 struct sk_buff *skb;
1189 short pkt_len;
1190
1191 if (status != 0x03) { /* There was an error. */
1192 /*
1193 * There is a tricky error noted by John Murphy,
1194 * <murf@perftech.com> to Russ Nelson: Even with full-sized
1195 * buffers it's possible for a jabber packet to use two
1196 * buffers, with only the last correctly noting the error.
1197 */
1198 if (status & 0x01) /* Only count a general error at the */
4f1e5ba0 1199 dev->stats.rx_errors++; /* end of a packet. */
3904c324 1200 if (status & 0x20)
4f1e5ba0 1201 dev->stats.rx_frame_errors++;
3904c324 1202 if (status & 0x10)
4f1e5ba0 1203 dev->stats.rx_over_errors++;
3904c324 1204 if (status & 0x08)
4f1e5ba0 1205 dev->stats.rx_crc_errors++;
3904c324 1206 if (status & 0x04)
4f1e5ba0 1207 dev->stats.rx_fifo_errors++;
3904c324
DF
1208 return;
1209 }
1210
1211 pkt_len = (le32_to_cpu(rxp->msg_length) & 0xfff) - 4;
1212
1213 /* Discard oversize frames. */
232c5640 1214 if (unlikely(pkt_len > PKT_BUF_SIZE)) {
13ff83b9
JP
1215 netif_err(lp, drv, dev, "Impossible packet size %d!\n",
1216 pkt_len);
4f1e5ba0 1217 dev->stats.rx_errors++;
3904c324
DF
1218 return;
1219 }
1220 if (pkt_len < 60) {
13ff83b9 1221 netif_err(lp, rx_err, dev, "Runt packet!\n");
4f1e5ba0 1222 dev->stats.rx_errors++;
3904c324
DF
1223 return;
1224 }
1225
1226 if (pkt_len > rx_copybreak) {
1227 struct sk_buff *newskb;
4cc5c475 1228 dma_addr_t new_dma_addr;
3904c324 1229
1d266430 1230 newskb = netdev_alloc_skb(dev, PKT_BUF_SKB);
4cc5c475
DF
1231 /*
1232 * map the new buffer, if mapping fails, drop the packet and
1233 * reuse the old buffer
1234 */
9e3f8063 1235 if (newskb) {
232c5640 1236 skb_reserve(newskb, NET_IP_ALIGN);
4cc5c475
DF
1237 new_dma_addr = pci_map_single(lp->pci_dev,
1238 newskb->data,
1239 PKT_BUF_SIZE,
1240 PCI_DMA_FROMDEVICE);
1241 if (pci_dma_mapping_error(lp->pci_dev, new_dma_addr)) {
1242 netif_err(lp, rx_err, dev,
1243 "DMA mapping error.\n");
1244 dev_kfree_skb(newskb);
1245 skb = NULL;
1246 } else {
1247 skb = lp->rx_skbuff[entry];
1248 pci_unmap_single(lp->pci_dev,
1249 lp->rx_dma_addr[entry],
1250 PKT_BUF_SIZE,
1251 PCI_DMA_FROMDEVICE);
1252 skb_put(skb, pkt_len);
1253 lp->rx_skbuff[entry] = newskb;
1254 lp->rx_dma_addr[entry] = new_dma_addr;
1255 rxp->base = cpu_to_le32(new_dma_addr);
1256 rx_in_place = 1;
1257 }
3904c324
DF
1258 } else
1259 skb = NULL;
9e3f8063 1260 } else
1d266430 1261 skb = netdev_alloc_skb(dev, pkt_len + NET_IP_ALIGN);
3904c324
DF
1262
1263 if (skb == NULL) {
4f1e5ba0 1264 dev->stats.rx_dropped++;
3904c324
DF
1265 return;
1266 }
3904c324 1267 if (!rx_in_place) {
232c5640 1268 skb_reserve(skb, NET_IP_ALIGN);
3904c324
DF
1269 skb_put(skb, pkt_len); /* Make room */
1270 pci_dma_sync_single_for_cpu(lp->pci_dev,
1271 lp->rx_dma_addr[entry],
b2cbbd8e 1272 pkt_len,
3904c324 1273 PCI_DMA_FROMDEVICE);
8c7b7faa 1274 skb_copy_to_linear_data(skb,
3904c324 1275 (unsigned char *)(lp->rx_skbuff[entry]->data),
8c7b7faa 1276 pkt_len);
3904c324
DF
1277 pci_dma_sync_single_for_device(lp->pci_dev,
1278 lp->rx_dma_addr[entry],
b2cbbd8e 1279 pkt_len,
3904c324
DF
1280 PCI_DMA_FROMDEVICE);
1281 }
4f1e5ba0 1282 dev->stats.rx_bytes += skb->len;
3904c324 1283 skb->protocol = eth_type_trans(skb, dev);
7de745e5 1284 netif_receive_skb(skb);
4f1e5ba0 1285 dev->stats.rx_packets++;
3904c324
DF
1286}
1287
bea3348e 1288static int pcnet32_rx(struct net_device *dev, int budget)
9691edd2 1289{
1e56a4b4 1290 struct pcnet32_private *lp = netdev_priv(dev);
9691edd2 1291 int entry = lp->cur_rx & lp->rx_mod_mask;
3904c324
DF
1292 struct pcnet32_rx_head *rxp = &lp->rx_ring[entry];
1293 int npackets = 0;
9691edd2
DF
1294
1295 /* If we own the next entry, it's a new packet. Send it up. */
bea3348e 1296 while (npackets < budget && (short)le16_to_cpu(rxp->status) >= 0) {
3904c324
DF
1297 pcnet32_rx_entry(dev, lp, rxp, entry);
1298 npackets += 1;
9691edd2 1299 /*
3904c324
DF
1300 * The docs say that the buffer length isn't touched, but Andrew
1301 * Boyd of QNX reports that some revs of the 79C965 clear it.
9691edd2 1302 */
232c5640 1303 rxp->buf_length = cpu_to_le16(NEG_BUF_SIZE);
3904c324 1304 wmb(); /* Make sure owner changes after others are visible */
3e33545b 1305 rxp->status = cpu_to_le16(0x8000);
9691edd2 1306 entry = (++lp->cur_rx) & lp->rx_mod_mask;
3904c324 1307 rxp = &lp->rx_ring[entry];
9691edd2
DF
1308 }
1309
7de745e5 1310 return npackets;
9691edd2
DF
1311}
1312
7de745e5 1313static int pcnet32_tx(struct net_device *dev)
9691edd2 1314{
1e56a4b4 1315 struct pcnet32_private *lp = netdev_priv(dev);
9691edd2
DF
1316 unsigned int dirty_tx = lp->dirty_tx;
1317 int delta;
1318 int must_restart = 0;
1319
1320 while (dirty_tx != lp->cur_tx) {
1321 int entry = dirty_tx & lp->tx_mod_mask;
1322 int status = (short)le16_to_cpu(lp->tx_ring[entry].status);
1323
1324 if (status < 0)
1325 break; /* It still hasn't been Txed */
1326
1327 lp->tx_ring[entry].base = 0;
1328
1329 if (status & 0x4000) {
3904c324 1330 /* There was a major error, log it. */
9691edd2 1331 int err_status = le32_to_cpu(lp->tx_ring[entry].misc);
4f1e5ba0 1332 dev->stats.tx_errors++;
13ff83b9
JP
1333 netif_err(lp, tx_err, dev,
1334 "Tx error status=%04x err_status=%08x\n",
1335 status, err_status);
9691edd2 1336 if (err_status & 0x04000000)
4f1e5ba0 1337 dev->stats.tx_aborted_errors++;
9691edd2 1338 if (err_status & 0x08000000)
4f1e5ba0 1339 dev->stats.tx_carrier_errors++;
9691edd2 1340 if (err_status & 0x10000000)
4f1e5ba0 1341 dev->stats.tx_window_errors++;
9691edd2
DF
1342#ifndef DO_DXSUFLO
1343 if (err_status & 0x40000000) {
4f1e5ba0 1344 dev->stats.tx_fifo_errors++;
9691edd2
DF
1345 /* Ackk! On FIFO errors the Tx unit is turned off! */
1346 /* Remove this verbosity later! */
13ff83b9 1347 netif_err(lp, tx_err, dev, "Tx FIFO error!\n");
9691edd2
DF
1348 must_restart = 1;
1349 }
1350#else
1351 if (err_status & 0x40000000) {
4f1e5ba0 1352 dev->stats.tx_fifo_errors++;
9691edd2
DF
1353 if (!lp->dxsuflo) { /* If controller doesn't recover ... */
1354 /* Ackk! On FIFO errors the Tx unit is turned off! */
1355 /* Remove this verbosity later! */
13ff83b9 1356 netif_err(lp, tx_err, dev, "Tx FIFO error!\n");
9691edd2
DF
1357 must_restart = 1;
1358 }
1359 }
1360#endif
1361 } else {
1362 if (status & 0x1800)
4f1e5ba0
DF
1363 dev->stats.collisions++;
1364 dev->stats.tx_packets++;
9691edd2
DF
1365 }
1366
1367 /* We must free the original skb */
1368 if (lp->tx_skbuff[entry]) {
1369 pci_unmap_single(lp->pci_dev,
1370 lp->tx_dma_addr[entry],
1371 lp->tx_skbuff[entry]->
1372 len, PCI_DMA_TODEVICE);
3904c324 1373 dev_kfree_skb_any(lp->tx_skbuff[entry]);
9691edd2
DF
1374 lp->tx_skbuff[entry] = NULL;
1375 lp->tx_dma_addr[entry] = 0;
1376 }
1377 dirty_tx++;
1378 }
1379
3904c324 1380 delta = (lp->cur_tx - dirty_tx) & (lp->tx_mod_mask + lp->tx_ring_size);
9691edd2 1381 if (delta > lp->tx_ring_size) {
13ff83b9
JP
1382 netif_err(lp, drv, dev, "out-of-sync dirty pointer, %d vs. %d, full=%d\n",
1383 dirty_tx, lp->cur_tx, lp->tx_full);
9691edd2
DF
1384 dirty_tx += lp->tx_ring_size;
1385 delta -= lp->tx_ring_size;
1386 }
1387
1388 if (lp->tx_full &&
1389 netif_queue_stopped(dev) &&
1390 delta < lp->tx_ring_size - 2) {
1391 /* The ring is no longer full, clear tbusy. */
1392 lp->tx_full = 0;
1393 netif_wake_queue(dev);
1394 }
1395 lp->dirty_tx = dirty_tx;
1396
1397 return must_restart;
1398}
1399
bea3348e 1400static int pcnet32_poll(struct napi_struct *napi, int budget)
7de745e5 1401{
bea3348e
SH
1402 struct pcnet32_private *lp = container_of(napi, struct pcnet32_private, napi);
1403 struct net_device *dev = lp->dev;
7de745e5
DF
1404 unsigned long ioaddr = dev->base_addr;
1405 unsigned long flags;
bea3348e 1406 int work_done;
7de745e5
DF
1407 u16 val;
1408
bea3348e 1409 work_done = pcnet32_rx(dev, budget);
7de745e5
DF
1410
1411 spin_lock_irqsave(&lp->lock, flags);
1412 if (pcnet32_tx(dev)) {
1413 /* reset the chip to clear the error condition, then restart */
1d70cb06 1414 lp->a->reset(ioaddr);
1415 lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
7de745e5
DF
1416 pcnet32_restart(dev, CSR0_START);
1417 netif_wake_queue(dev);
1418 }
7de745e5 1419
5b2ec6f2 1420 if (work_done < budget && napi_complete_done(napi, work_done)) {
bea3348e 1421 /* clear interrupt masks */
1d70cb06 1422 val = lp->a->read_csr(ioaddr, CSR3);
bea3348e 1423 val &= 0x00ff;
1d70cb06 1424 lp->a->write_csr(ioaddr, CSR3, val);
7de745e5 1425
bea3348e 1426 /* Set interrupt enable. */
1d70cb06 1427 lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN);
bea3348e 1428 }
5b2ec6f2
ED
1429
1430 spin_unlock_irqrestore(&lp->lock, flags);
bea3348e 1431 return work_done;
7de745e5 1432}
7de745e5 1433
ac62ef04
DF
1434#define PCNET32_REGS_PER_PHY 32
1435#define PCNET32_MAX_PHYS 32
1da177e4
LT
1436static int pcnet32_get_regs_len(struct net_device *dev)
1437{
1e56a4b4 1438 struct pcnet32_private *lp = netdev_priv(dev);
4a5e8e29 1439 int j = lp->phycount * PCNET32_REGS_PER_PHY;
ac62ef04 1440
9e3f8063 1441 return (PCNET32_NUM_REGS + j) * sizeof(u16);
1da177e4
LT
1442}
1443
1444static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,
4a5e8e29 1445 void *ptr)
1da177e4 1446{
4a5e8e29
JG
1447 int i, csr0;
1448 u16 *buff = ptr;
1e56a4b4 1449 struct pcnet32_private *lp = netdev_priv(dev);
1d70cb06 1450 const struct pcnet32_access *a = lp->a;
4a5e8e29 1451 ulong ioaddr = dev->base_addr;
4a5e8e29
JG
1452 unsigned long flags;
1453
1454 spin_lock_irqsave(&lp->lock, flags);
1455
df27f4a6
DF
1456 csr0 = a->read_csr(ioaddr, CSR0);
1457 if (!(csr0 & CSR0_STOP)) /* If not stopped */
1458 pcnet32_suspend(dev, &flags, 1);
1da177e4 1459
4a5e8e29
JG
1460 /* read address PROM */
1461 for (i = 0; i < 16; i += 2)
1462 *buff++ = inw(ioaddr + i);
1463
1464 /* read control and status registers */
9e3f8063 1465 for (i = 0; i < 90; i++)
4a5e8e29 1466 *buff++ = a->read_csr(ioaddr, i);
4a5e8e29
JG
1467
1468 *buff++ = a->read_csr(ioaddr, 112);
1469 *buff++ = a->read_csr(ioaddr, 114);
1da177e4 1470
4a5e8e29 1471 /* read bus configuration registers */
9e3f8063 1472 for (i = 0; i < 30; i++)
4a5e8e29 1473 *buff++ = a->read_bcr(ioaddr, i);
9e3f8063 1474
4a5e8e29 1475 *buff++ = 0; /* skip bcr30 so as not to hang 79C976 */
9e3f8063
JP
1476
1477 for (i = 31; i < 36; i++)
4a5e8e29 1478 *buff++ = a->read_bcr(ioaddr, i);
4a5e8e29
JG
1479
1480 /* read mii phy registers */
1481 if (lp->mii) {
1482 int j;
1483 for (j = 0; j < PCNET32_MAX_PHYS; j++) {
1484 if (lp->phymask & (1 << j)) {
1485 for (i = 0; i < PCNET32_REGS_PER_PHY; i++) {
1d70cb06 1486 lp->a->write_bcr(ioaddr, 33,
4a5e8e29 1487 (j << 5) | i);
1d70cb06 1488 *buff++ = lp->a->read_bcr(ioaddr, 34);
4a5e8e29
JG
1489 }
1490 }
1491 }
1492 }
1493
cce5fbad
OZ
1494 if (!(csr0 & CSR0_STOP)) /* If not stopped */
1495 pcnet32_clr_suspend(lp, ioaddr);
4a5e8e29
JG
1496
1497 spin_unlock_irqrestore(&lp->lock, flags);
1da177e4
LT
1498}
1499
7282d491 1500static const struct ethtool_ops pcnet32_ethtool_ops = {
4a5e8e29
JG
1501 .get_drvinfo = pcnet32_get_drvinfo,
1502 .get_msglevel = pcnet32_get_msglevel,
1503 .set_msglevel = pcnet32_set_msglevel,
1504 .nway_reset = pcnet32_nway_reset,
1505 .get_link = pcnet32_get_link,
1506 .get_ringparam = pcnet32_get_ringparam,
1507 .set_ringparam = pcnet32_set_ringparam,
4a5e8e29 1508 .get_strings = pcnet32_get_strings,
4a5e8e29 1509 .self_test = pcnet32_ethtool_test,
9871acf6 1510 .set_phys_id = pcnet32_set_phys_id,
4a5e8e29
JG
1511 .get_regs_len = pcnet32_get_regs_len,
1512 .get_regs = pcnet32_get_regs,
b9f2c044 1513 .get_sset_count = pcnet32_get_sset_count,
ea74df81
PR
1514 .get_link_ksettings = pcnet32_get_link_ksettings,
1515 .set_link_ksettings = pcnet32_set_link_ksettings,
1da177e4
LT
1516};
1517
1518/* only probes for non-PCI devices, the rest are handled by
1519 * pci_register_driver via pcnet32_probe_pci */
1520
a9590879 1521static void pcnet32_probe_vlbus(unsigned int *pcnet32_portlist)
1da177e4 1522{
4a5e8e29
JG
1523 unsigned int *port, ioaddr;
1524
1525 /* search for PCnet32 VLB cards at known addresses */
1526 for (port = pcnet32_portlist; (ioaddr = *port); port++) {
1527 if (request_region
1528 (ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_vlbus")) {
1529 /* check if there is really a pcnet chip on that ioaddr */
8e95a202
JP
1530 if ((inb(ioaddr + 14) == 0x57) &&
1531 (inb(ioaddr + 15) == 0x57)) {
4a5e8e29
JG
1532 pcnet32_probe1(ioaddr, 0, NULL);
1533 } else {
1534 release_region(ioaddr, PCNET32_TOTAL_SIZE);
1535 }
1536 }
1537 }
1da177e4
LT
1538}
1539
a9590879 1540static int
1da177e4
LT
1541pcnet32_probe_pci(struct pci_dev *pdev, const struct pci_device_id *ent)
1542{
4a5e8e29
JG
1543 unsigned long ioaddr;
1544 int err;
1545
1546 err = pci_enable_device(pdev);
1547 if (err < 0) {
1548 if (pcnet32_debug & NETIF_MSG_PROBE)
13ff83b9 1549 pr_err("failed to enable device -- err=%d\n", err);
4a5e8e29
JG
1550 return err;
1551 }
1552 pci_set_master(pdev);
1553
1554 ioaddr = pci_resource_start(pdev, 0);
1555 if (!ioaddr) {
1556 if (pcnet32_debug & NETIF_MSG_PROBE)
13ff83b9 1557 pr_err("card has no PCI IO resources, aborting\n");
4a5e8e29
JG
1558 return -ENODEV;
1559 }
1da177e4 1560
1a47de6e
CH
1561 err = pci_set_dma_mask(pdev, PCNET32_DMA_MASK);
1562 if (err) {
4a5e8e29 1563 if (pcnet32_debug & NETIF_MSG_PROBE)
13ff83b9 1564 pr_err("architecture does not support 32bit PCI busmaster DMA\n");
1a47de6e 1565 return err;
4a5e8e29 1566 }
9e3f8063 1567 if (!request_region(ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_pci")) {
4a5e8e29 1568 if (pcnet32_debug & NETIF_MSG_PROBE)
13ff83b9 1569 pr_err("io address range already allocated\n");
4a5e8e29
JG
1570 return -EBUSY;
1571 }
1da177e4 1572
4a5e8e29 1573 err = pcnet32_probe1(ioaddr, 1, pdev);
9e3f8063 1574 if (err < 0)
4a5e8e29 1575 pci_disable_device(pdev);
9e3f8063 1576
4a5e8e29 1577 return err;
1da177e4
LT
1578}
1579
3bc124dd
SH
1580static const struct net_device_ops pcnet32_netdev_ops = {
1581 .ndo_open = pcnet32_open,
1582 .ndo_stop = pcnet32_close,
1583 .ndo_start_xmit = pcnet32_start_xmit,
1584 .ndo_tx_timeout = pcnet32_tx_timeout,
1585 .ndo_get_stats = pcnet32_get_stats,
afc4b13d 1586 .ndo_set_rx_mode = pcnet32_set_multicast_list,
3bc124dd 1587 .ndo_do_ioctl = pcnet32_ioctl,
3bc124dd
SH
1588 .ndo_set_mac_address = eth_mac_addr,
1589 .ndo_validate_addr = eth_validate_addr,
1590#ifdef CONFIG_NET_POLL_CONTROLLER
1591 .ndo_poll_controller = pcnet32_poll_controller,
1592#endif
1593};
1594
1da177e4
LT
1595/* pcnet32_probe1
1596 * Called from both pcnet32_probe_vlbus and pcnet_probe_pci.
1597 * pdev will be NULL when called from pcnet32_probe_vlbus.
1598 */
a9590879 1599static int
1da177e4
LT
1600pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
1601{
4a5e8e29 1602 struct pcnet32_private *lp;
4a5e8e29 1603 int i, media;
87f966d9 1604 int fdx, mii, fset, dxsuflo, sram;
4a5e8e29
JG
1605 int chip_version;
1606 char *chipname;
1607 struct net_device *dev;
1d70cb06 1608 const struct pcnet32_access *a = NULL;
1409a932 1609 u8 promaddr[ETH_ALEN];
4a5e8e29
JG
1610 int ret = -ENODEV;
1611
1612 /* reset the chip */
1613 pcnet32_wio_reset(ioaddr);
1614
1615 /* NOTE: 16-bit check is first, otherwise some older PCnet chips fail */
1616 if (pcnet32_wio_read_csr(ioaddr, 0) == 4 && pcnet32_wio_check(ioaddr)) {
1617 a = &pcnet32_wio;
1618 } else {
1619 pcnet32_dwio_reset(ioaddr);
8e95a202
JP
1620 if (pcnet32_dwio_read_csr(ioaddr, 0) == 4 &&
1621 pcnet32_dwio_check(ioaddr)) {
4a5e8e29 1622 a = &pcnet32_dwio;
df4e7f72
DF
1623 } else {
1624 if (pcnet32_debug & NETIF_MSG_PROBE)
13ff83b9 1625 pr_err("No access methods\n");
4a5e8e29 1626 goto err_release_region;
df4e7f72 1627 }
4a5e8e29
JG
1628 }
1629
1630 chip_version =
1631 a->read_csr(ioaddr, 88) | (a->read_csr(ioaddr, 89) << 16);
1632 if ((pcnet32_debug & NETIF_MSG_PROBE) && (pcnet32_debug & NETIF_MSG_HW))
13ff83b9 1633 pr_info(" PCnet chip version is %#x\n", chip_version);
4a5e8e29
JG
1634 if ((chip_version & 0xfff) != 0x003) {
1635 if (pcnet32_debug & NETIF_MSG_PROBE)
13ff83b9 1636 pr_info("Unsupported chip version\n");
4a5e8e29
JG
1637 goto err_release_region;
1638 }
1639
1640 /* initialize variables */
87f966d9 1641 fdx = mii = fset = dxsuflo = sram = 0;
4a5e8e29
JG
1642 chip_version = (chip_version >> 12) & 0xffff;
1643
1644 switch (chip_version) {
1645 case 0x2420:
1646 chipname = "PCnet/PCI 79C970"; /* PCI */
1647 break;
1648 case 0x2430:
1649 if (shared)
1650 chipname = "PCnet/PCI 79C970"; /* 970 gives the wrong chip id back */
1651 else
1652 chipname = "PCnet/32 79C965"; /* 486/VL bus */
1653 break;
1654 case 0x2621:
1655 chipname = "PCnet/PCI II 79C970A"; /* PCI */
1656 fdx = 1;
1657 break;
1658 case 0x2623:
1659 chipname = "PCnet/FAST 79C971"; /* PCI */
1660 fdx = 1;
1661 mii = 1;
1662 fset = 1;
1663 break;
1664 case 0x2624:
1665 chipname = "PCnet/FAST+ 79C972"; /* PCI */
1666 fdx = 1;
1667 mii = 1;
1668 fset = 1;
1669 break;
1670 case 0x2625:
1671 chipname = "PCnet/FAST III 79C973"; /* PCI */
1672 fdx = 1;
1673 mii = 1;
87f966d9 1674 sram = 1;
4a5e8e29
JG
1675 break;
1676 case 0x2626:
1677 chipname = "PCnet/Home 79C978"; /* PCI */
1678 fdx = 1;
1679 /*
1680 * This is based on specs published at www.amd.com. This section
1681 * assumes that a card with a 79C978 wants to go into standard
1682 * ethernet mode. The 79C978 can also go into 1Mb HomePNA mode,
1683 * and the module option homepna=1 can select this instead.
1684 */
1685 media = a->read_bcr(ioaddr, 49);
1686 media &= ~3; /* default to 10Mb ethernet */
1687 if (cards_found < MAX_UNITS && homepna[cards_found])
1688 media |= 1; /* switch to home wiring mode */
1689 if (pcnet32_debug & NETIF_MSG_PROBE)
13ff83b9 1690 printk(KERN_DEBUG PFX "media set to %sMbit mode\n",
4a5e8e29
JG
1691 (media & 1) ? "1" : "10");
1692 a->write_bcr(ioaddr, 49, media);
1693 break;
1694 case 0x2627:
1695 chipname = "PCnet/FAST III 79C975"; /* PCI */
1696 fdx = 1;
1697 mii = 1;
87f966d9 1698 sram = 1;
4a5e8e29
JG
1699 break;
1700 case 0x2628:
1701 chipname = "PCnet/PRO 79C976";
1702 fdx = 1;
1703 mii = 1;
1704 break;
1705 default:
1706 if (pcnet32_debug & NETIF_MSG_PROBE)
13ff83b9
JP
1707 pr_info("PCnet version %#x, no PCnet32 chip\n",
1708 chip_version);
4a5e8e29
JG
1709 goto err_release_region;
1710 }
1711
1da177e4 1712 /*
4a5e8e29
JG
1713 * On selected chips turn on the BCR18:NOUFLO bit. This stops transmit
1714 * starting until the packet is loaded. Strike one for reliability, lose
25985edc 1715 * one for latency - although on PCI this isn't a big loss. Older chips
4a5e8e29
JG
1716 * have FIFO's smaller than a packet, so you can't do this.
1717 * Turn on BCR18:BurstRdEn and BCR18:BurstWrEn.
1da177e4 1718 */
4a5e8e29
JG
1719
1720 if (fset) {
1721 a->write_bcr(ioaddr, 18, (a->read_bcr(ioaddr, 18) | 0x0860));
1722 a->write_csr(ioaddr, 80,
1723 (a->read_csr(ioaddr, 80) & 0x0C00) | 0x0c00);
1724 dxsuflo = 1;
1725 }
1726
87f966d9
MC
1727 /*
1728 * The Am79C973/Am79C975 controllers come with 12K of SRAM
1729 * which we can use for the Tx/Rx buffers but most importantly,
1730 * the use of SRAM allow us to use the BCR18:NOUFLO bit to avoid
1731 * Tx fifo underflows.
1732 */
1733 if (sram) {
1734 /*
1735 * The SRAM is being configured in two steps. First we
1736 * set the SRAM size in the BCR25:SRAM_SIZE bits. According
1737 * to the datasheet, each bit corresponds to a 512-byte
1738 * page so we can have at most 24 pages. The SRAM_SIZE
1739 * holds the value of the upper 8 bits of the 16-bit SRAM size.
1740 * The low 8-bits start at 0x00 and end at 0xff. So the
1741 * address range is from 0x0000 up to 0x17ff. Therefore,
1742 * the SRAM_SIZE is set to 0x17. The next step is to set
1743 * the BCR26:SRAM_BND midway through so the Tx and Rx
1744 * buffers can share the SRAM equally.
1745 */
1746 a->write_bcr(ioaddr, 25, 0x17);
1747 a->write_bcr(ioaddr, 26, 0xc);
1748 /* And finally enable the NOUFLO bit */
1749 a->write_bcr(ioaddr, 18, a->read_bcr(ioaddr, 18) | (1 << 11));
1750 }
1751
6ecb7667 1752 dev = alloc_etherdev(sizeof(*lp));
4a5e8e29 1753 if (!dev) {
4a5e8e29
JG
1754 ret = -ENOMEM;
1755 goto err_release_region;
1756 }
63097b3a
DF
1757
1758 if (pdev)
1759 SET_NETDEV_DEV(dev, &pdev->dev);
4a5e8e29 1760
1da177e4 1761 if (pcnet32_debug & NETIF_MSG_PROBE)
13ff83b9 1762 pr_info("%s at %#3lx,", chipname, ioaddr);
4a5e8e29
JG
1763
1764 /* In most chips, after a chip reset, the ethernet address is read from the
1765 * station address PROM at the base address and programmed into the
1766 * "Physical Address Registers" CSR12-14.
1767 * As a precautionary measure, we read the PROM values and complain if
bc0e1fc9
LV
1768 * they disagree with the CSRs. If they miscompare, and the PROM addr
1769 * is valid, then the PROM addr is used.
4a5e8e29
JG
1770 */
1771 for (i = 0; i < 3; i++) {
1772 unsigned int val;
1773 val = a->read_csr(ioaddr, i + 12) & 0x0ffff;
1774 /* There may be endianness issues here. */
1775 dev->dev_addr[2 * i] = val & 0x0ff;
1776 dev->dev_addr[2 * i + 1] = (val >> 8) & 0x0ff;
1777 }
1778
1779 /* read PROM address and compare with CSR address */
1409a932 1780 for (i = 0; i < ETH_ALEN; i++)
4a5e8e29
JG
1781 promaddr[i] = inb(ioaddr + i);
1782
ebff7b41 1783 if (!ether_addr_equal(promaddr, dev->dev_addr) ||
8e95a202 1784 !is_valid_ether_addr(dev->dev_addr)) {
4a5e8e29
JG
1785 if (is_valid_ether_addr(promaddr)) {
1786 if (pcnet32_debug & NETIF_MSG_PROBE) {
13ff83b9
JP
1787 pr_cont(" warning: CSR address invalid,\n");
1788 pr_info(" using instead PROM address of");
4a5e8e29 1789 }
d458cdf7 1790 memcpy(dev->dev_addr, promaddr, ETH_ALEN);
4a5e8e29
JG
1791 }
1792 }
4a5e8e29
JG
1793
1794 /* if the ethernet address is not valid, force to 00:00:00:00:00:00 */
aaeb6cdf 1795 if (!is_valid_ether_addr(dev->dev_addr))
c7bf7169 1796 eth_zero_addr(dev->dev_addr);
4a5e8e29
JG
1797
1798 if (pcnet32_debug & NETIF_MSG_PROBE) {
13ff83b9 1799 pr_cont(" %pM", dev->dev_addr);
4a5e8e29
JG
1800
1801 /* Version 0x2623 and 0x2624 */
1802 if (((chip_version + 1) & 0xfffe) == 0x2624) {
1803 i = a->read_csr(ioaddr, 80) & 0x0C00; /* Check tx_start_pt */
13ff83b9 1804 pr_info(" tx_start_pt(0x%04x):", i);
4a5e8e29
JG
1805 switch (i >> 10) {
1806 case 0:
13ff83b9 1807 pr_cont(" 20 bytes,");
4a5e8e29
JG
1808 break;
1809 case 1:
13ff83b9 1810 pr_cont(" 64 bytes,");
4a5e8e29
JG
1811 break;
1812 case 2:
13ff83b9 1813 pr_cont(" 128 bytes,");
4a5e8e29
JG
1814 break;
1815 case 3:
13ff83b9 1816 pr_cont("~220 bytes,");
4a5e8e29
JG
1817 break;
1818 }
1819 i = a->read_bcr(ioaddr, 18); /* Check Burst/Bus control */
13ff83b9 1820 pr_cont(" BCR18(%x):", i & 0xffff);
4a5e8e29 1821 if (i & (1 << 5))
13ff83b9 1822 pr_cont("BurstWrEn ");
4a5e8e29 1823 if (i & (1 << 6))
13ff83b9 1824 pr_cont("BurstRdEn ");
4a5e8e29 1825 if (i & (1 << 7))
13ff83b9 1826 pr_cont("DWordIO ");
4a5e8e29 1827 if (i & (1 << 11))
13ff83b9 1828 pr_cont("NoUFlow ");
4a5e8e29 1829 i = a->read_bcr(ioaddr, 25);
13ff83b9 1830 pr_info(" SRAMSIZE=0x%04x,", i << 8);
4a5e8e29 1831 i = a->read_bcr(ioaddr, 26);
13ff83b9 1832 pr_cont(" SRAM_BND=0x%04x,", i << 8);
4a5e8e29
JG
1833 i = a->read_bcr(ioaddr, 27);
1834 if (i & (1 << 14))
13ff83b9 1835 pr_cont("LowLatRx");
4a5e8e29
JG
1836 }
1837 }
1838
1839 dev->base_addr = ioaddr;
1e56a4b4 1840 lp = netdev_priv(dev);
4a5e8e29 1841 /* pci_alloc_consistent returns page-aligned memory, so we do not have to check the alignment */
9e3f8063
JP
1842 lp->init_block = pci_alloc_consistent(pdev, sizeof(*lp->init_block),
1843 &lp->init_dma_addr);
1844 if (!lp->init_block) {
4a5e8e29 1845 if (pcnet32_debug & NETIF_MSG_PROBE)
13ff83b9 1846 pr_err("Consistent memory allocation failed\n");
4a5e8e29
JG
1847 ret = -ENOMEM;
1848 goto err_free_netdev;
1849 }
4a5e8e29
JG
1850 lp->pci_dev = pdev;
1851
bea3348e
SH
1852 lp->dev = dev;
1853
4a5e8e29
JG
1854 spin_lock_init(&lp->lock);
1855
4a5e8e29
JG
1856 lp->name = chipname;
1857 lp->shared_irq = shared;
1858 lp->tx_ring_size = TX_RING_SIZE; /* default tx ring size */
1859 lp->rx_ring_size = RX_RING_SIZE; /* default rx ring size */
1860 lp->tx_mod_mask = lp->tx_ring_size - 1;
1861 lp->rx_mod_mask = lp->rx_ring_size - 1;
1862 lp->tx_len_bits = (PCNET32_LOG_TX_BUFFERS << 12);
1863 lp->rx_len_bits = (PCNET32_LOG_RX_BUFFERS << 4);
1864 lp->mii_if.full_duplex = fdx;
1865 lp->mii_if.phy_id_mask = 0x1f;
1866 lp->mii_if.reg_num_mask = 0x1f;
1867 lp->dxsuflo = dxsuflo;
1868 lp->mii = mii;
8d916266 1869 lp->chip_version = chip_version;
4a5e8e29 1870 lp->msg_enable = pcnet32_debug;
8e95a202
JP
1871 if ((cards_found >= MAX_UNITS) ||
1872 (options[cards_found] >= sizeof(options_mapping)))
4a5e8e29
JG
1873 lp->options = PCNET32_PORT_ASEL;
1874 else
1875 lp->options = options_mapping[options[cards_found]];
2be4cb97
OZ
1876 /* force default port to TP on 79C970A so link detection can work */
1877 if (lp->chip_version == PCNET32_79C970A)
1878 lp->options = PCNET32_PORT_10BT;
4a5e8e29
JG
1879 lp->mii_if.dev = dev;
1880 lp->mii_if.mdio_read = mdio_read;
1881 lp->mii_if.mdio_write = mdio_write;
1882
feff348f
DF
1883 /* napi.weight is used in both the napi and non-napi cases */
1884 lp->napi.weight = lp->rx_ring_size / 2;
1885
bea3348e 1886 netif_napi_add(dev, &lp->napi, pcnet32_poll, lp->rx_ring_size / 2);
bea3348e 1887
4a5e8e29
JG
1888 if (fdx && !(lp->options & PCNET32_PORT_ASEL) &&
1889 ((cards_found >= MAX_UNITS) || full_duplex[cards_found]))
1890 lp->options |= PCNET32_PORT_FD;
1891
1d70cb06 1892 lp->a = a;
4a5e8e29
JG
1893
1894 /* prior to register_netdev, dev->name is not yet correct */
1895 if (pcnet32_alloc_ring(dev, pci_name(lp->pci_dev))) {
1896 ret = -ENOMEM;
1897 goto err_free_ring;
1898 }
1899 /* detect special T1/E1 WAN card by checking for MAC address */
8e95a202
JP
1900 if (dev->dev_addr[0] == 0x00 && dev->dev_addr[1] == 0xe0 &&
1901 dev->dev_addr[2] == 0x75)
4a5e8e29 1902 lp->options = PCNET32_PORT_FD | PCNET32_PORT_GPSI;
1da177e4 1903
3e33545b 1904 lp->init_block->mode = cpu_to_le16(0x0003); /* Disable Rx and Tx. */
6ecb7667 1905 lp->init_block->tlen_rlen =
3e33545b 1906 cpu_to_le16(lp->tx_len_bits | lp->rx_len_bits);
4a5e8e29 1907 for (i = 0; i < 6; i++)
6ecb7667
DF
1908 lp->init_block->phys_addr[i] = dev->dev_addr[i];
1909 lp->init_block->filter[0] = 0x00000000;
1910 lp->init_block->filter[1] = 0x00000000;
3e33545b
AV
1911 lp->init_block->rx_ring = cpu_to_le32(lp->rx_ring_dma_addr);
1912 lp->init_block->tx_ring = cpu_to_le32(lp->tx_ring_dma_addr);
4a5e8e29
JG
1913
1914 /* switch pcnet32 to 32bit mode */
1915 a->write_bcr(ioaddr, 20, 2);
1916
6ecb7667
DF
1917 a->write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
1918 a->write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
4a5e8e29
JG
1919
1920 if (pdev) { /* use the IRQ provided by PCI */
1921 dev->irq = pdev->irq;
1922 if (pcnet32_debug & NETIF_MSG_PROBE)
13ff83b9 1923 pr_cont(" assigned IRQ %d\n", dev->irq);
4a5e8e29
JG
1924 } else {
1925 unsigned long irq_mask = probe_irq_on();
1926
1927 /*
1928 * To auto-IRQ we enable the initialization-done and DMA error
1929 * interrupts. For ISA boards we get a DMA error, but VLB and PCI
1930 * boards will work.
1931 */
1932 /* Trigger an initialization just for the interrupt. */
b368a3fb 1933 a->write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_INIT);
4a5e8e29
JG
1934 mdelay(1);
1935
1936 dev->irq = probe_irq_off(irq_mask);
1937 if (!dev->irq) {
1938 if (pcnet32_debug & NETIF_MSG_PROBE)
13ff83b9 1939 pr_cont(", failed to detect IRQ line\n");
4a5e8e29
JG
1940 ret = -ENODEV;
1941 goto err_free_ring;
1942 }
1943 if (pcnet32_debug & NETIF_MSG_PROBE)
13ff83b9 1944 pr_cont(", probed IRQ %d\n", dev->irq);
4a5e8e29 1945 }
1da177e4 1946
4a5e8e29
JG
1947 /* Set the mii phy_id so that we can query the link state */
1948 if (lp->mii) {
1949 /* lp->phycount and lp->phymask are set to 0 by memset above */
1950
1d70cb06 1951 lp->mii_if.phy_id = ((lp->a->read_bcr(ioaddr, 33)) >> 5) & 0x1f;
4a5e8e29
JG
1952 /* scan for PHYs */
1953 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
1954 unsigned short id1, id2;
1955
1956 id1 = mdio_read(dev, i, MII_PHYSID1);
1957 if (id1 == 0xffff)
1958 continue;
1959 id2 = mdio_read(dev, i, MII_PHYSID2);
1960 if (id2 == 0xffff)
1961 continue;
1962 if (i == 31 && ((chip_version + 1) & 0xfffe) == 0x2624)
1963 continue; /* 79C971 & 79C972 have phantom phy at id 31 */
1964 lp->phycount++;
1965 lp->phymask |= (1 << i);
1966 lp->mii_if.phy_id = i;
1967 if (pcnet32_debug & NETIF_MSG_PROBE)
13ff83b9
JP
1968 pr_info("Found PHY %04x:%04x at address %d\n",
1969 id1, id2, i);
4a5e8e29 1970 }
1d70cb06 1971 lp->a->write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
9e3f8063 1972 if (lp->phycount > 1)
4a5e8e29 1973 lp->options |= PCNET32_PORT_MII;
1da177e4 1974 }
4a5e8e29
JG
1975
1976 init_timer(&lp->watchdog_timer);
1977 lp->watchdog_timer.data = (unsigned long)dev;
1978 lp->watchdog_timer.function = (void *)&pcnet32_watchdog;
1979
1980 /* The PCNET32-specific entries in the device structure. */
3bc124dd 1981 dev->netdev_ops = &pcnet32_netdev_ops;
4a5e8e29 1982 dev->ethtool_ops = &pcnet32_ethtool_ops;
4a5e8e29 1983 dev->watchdog_timeo = (5 * HZ);
1da177e4 1984
4a5e8e29
JG
1985 /* Fill in the generic fields of the device structure. */
1986 if (register_netdev(dev))
1987 goto err_free_ring;
1988
1989 if (pdev) {
1990 pci_set_drvdata(pdev, dev);
1991 } else {
1992 lp->next = pcnet32_dev;
1993 pcnet32_dev = dev;
1994 }
1995
1996 if (pcnet32_debug & NETIF_MSG_PROBE)
13ff83b9 1997 pr_info("%s: registered as %s\n", dev->name, lp->name);
4a5e8e29
JG
1998 cards_found++;
1999
2000 /* enable LED writes */
2001 a->write_bcr(ioaddr, 2, a->read_bcr(ioaddr, 2) | 0x1000);
1da177e4 2002
4a5e8e29
JG
2003 return 0;
2004
df4e7f72 2005err_free_ring:
4a5e8e29 2006 pcnet32_free_ring(dev);
7d2e3cb7 2007 pci_free_consistent(lp->pci_dev, sizeof(*lp->init_block),
6ecb7667 2008 lp->init_block, lp->init_dma_addr);
df4e7f72 2009err_free_netdev:
4a5e8e29 2010 free_netdev(dev);
df4e7f72 2011err_release_region:
4a5e8e29
JG
2012 release_region(ioaddr, PCNET32_TOTAL_SIZE);
2013 return ret;
2014}
1da177e4 2015
a88c844c 2016/* if any allocation fails, caller must also call pcnet32_free_ring */
b166cfba 2017static int pcnet32_alloc_ring(struct net_device *dev, const char *name)
eabf0415 2018{
1e56a4b4 2019 struct pcnet32_private *lp = netdev_priv(dev);
eabf0415 2020
4a5e8e29
JG
2021 lp->tx_ring = pci_alloc_consistent(lp->pci_dev,
2022 sizeof(struct pcnet32_tx_head) *
2023 lp->tx_ring_size,
2024 &lp->tx_ring_dma_addr);
2025 if (lp->tx_ring == NULL) {
13ff83b9 2026 netif_err(lp, drv, dev, "Consistent memory allocation failed\n");
4a5e8e29
JG
2027 return -ENOMEM;
2028 }
eabf0415 2029
4a5e8e29
JG
2030 lp->rx_ring = pci_alloc_consistent(lp->pci_dev,
2031 sizeof(struct pcnet32_rx_head) *
2032 lp->rx_ring_size,
2033 &lp->rx_ring_dma_addr);
2034 if (lp->rx_ring == NULL) {
13ff83b9 2035 netif_err(lp, drv, dev, "Consistent memory allocation failed\n");
4a5e8e29
JG
2036 return -ENOMEM;
2037 }
eabf0415 2038
12fa30f3 2039 lp->tx_dma_addr = kcalloc(lp->tx_ring_size, sizeof(dma_addr_t),
4a5e8e29 2040 GFP_ATOMIC);
14f8dc49 2041 if (!lp->tx_dma_addr)
4a5e8e29 2042 return -ENOMEM;
4a5e8e29 2043
12fa30f3 2044 lp->rx_dma_addr = kcalloc(lp->rx_ring_size, sizeof(dma_addr_t),
4a5e8e29 2045 GFP_ATOMIC);
14f8dc49 2046 if (!lp->rx_dma_addr)
4a5e8e29 2047 return -ENOMEM;
4a5e8e29 2048
12fa30f3 2049 lp->tx_skbuff = kcalloc(lp->tx_ring_size, sizeof(struct sk_buff *),
4a5e8e29 2050 GFP_ATOMIC);
14f8dc49 2051 if (!lp->tx_skbuff)
4a5e8e29 2052 return -ENOMEM;
4a5e8e29 2053
12fa30f3 2054 lp->rx_skbuff = kcalloc(lp->rx_ring_size, sizeof(struct sk_buff *),
4a5e8e29 2055 GFP_ATOMIC);
14f8dc49 2056 if (!lp->rx_skbuff)
4a5e8e29 2057 return -ENOMEM;
4a5e8e29
JG
2058
2059 return 0;
2060}
eabf0415
HWL
2061
2062static void pcnet32_free_ring(struct net_device *dev)
2063{
1e56a4b4 2064 struct pcnet32_private *lp = netdev_priv(dev);
eabf0415 2065
4a5e8e29
JG
2066 kfree(lp->tx_skbuff);
2067 lp->tx_skbuff = NULL;
eabf0415 2068
4a5e8e29
JG
2069 kfree(lp->rx_skbuff);
2070 lp->rx_skbuff = NULL;
eabf0415 2071
4a5e8e29
JG
2072 kfree(lp->tx_dma_addr);
2073 lp->tx_dma_addr = NULL;
eabf0415 2074
4a5e8e29
JG
2075 kfree(lp->rx_dma_addr);
2076 lp->rx_dma_addr = NULL;
eabf0415 2077
4a5e8e29
JG
2078 if (lp->tx_ring) {
2079 pci_free_consistent(lp->pci_dev,
2080 sizeof(struct pcnet32_tx_head) *
2081 lp->tx_ring_size, lp->tx_ring,
2082 lp->tx_ring_dma_addr);
2083 lp->tx_ring = NULL;
2084 }
eabf0415 2085
4a5e8e29
JG
2086 if (lp->rx_ring) {
2087 pci_free_consistent(lp->pci_dev,
2088 sizeof(struct pcnet32_rx_head) *
2089 lp->rx_ring_size, lp->rx_ring,
2090 lp->rx_ring_dma_addr);
2091 lp->rx_ring = NULL;
2092 }
eabf0415
HWL
2093}
2094
4a5e8e29 2095static int pcnet32_open(struct net_device *dev)
1da177e4 2096{
1e56a4b4 2097 struct pcnet32_private *lp = netdev_priv(dev);
63097b3a 2098 struct pci_dev *pdev = lp->pci_dev;
4a5e8e29
JG
2099 unsigned long ioaddr = dev->base_addr;
2100 u16 val;
2101 int i;
2102 int rc;
2103 unsigned long flags;
2104
a0607fd3 2105 if (request_irq(dev->irq, pcnet32_interrupt,
1fb9df5d 2106 lp->shared_irq ? IRQF_SHARED : 0, dev->name,
4a5e8e29
JG
2107 (void *)dev)) {
2108 return -EAGAIN;
2109 }
2110
2111 spin_lock_irqsave(&lp->lock, flags);
2112 /* Check for a valid station address */
2113 if (!is_valid_ether_addr(dev->dev_addr)) {
2114 rc = -EINVAL;
2115 goto err_free_irq;
2116 }
2117
2118 /* Reset the PCNET32 */
1d70cb06 2119 lp->a->reset(ioaddr);
4a5e8e29
JG
2120
2121 /* switch pcnet32 to 32bit mode */
1d70cb06 2122 lp->a->write_bcr(ioaddr, 20, 2);
4a5e8e29 2123
13ff83b9
JP
2124 netif_printk(lp, ifup, KERN_DEBUG, dev,
2125 "%s() irq %d tx/rx rings %#x/%#x init %#x\n",
2126 __func__, dev->irq, (u32) (lp->tx_ring_dma_addr),
2127 (u32) (lp->rx_ring_dma_addr),
2128 (u32) (lp->init_dma_addr));
4a5e8e29 2129
2be4cb97
OZ
2130 lp->autoneg = !!(lp->options & PCNET32_PORT_ASEL);
2131 lp->port_tp = !!(lp->options & PCNET32_PORT_10BT);
2132 lp->fdx = !!(lp->options & PCNET32_PORT_FD);
2133
4a5e8e29 2134 /* set/reset autoselect bit */
1d70cb06 2135 val = lp->a->read_bcr(ioaddr, 2) & ~2;
4a5e8e29 2136 if (lp->options & PCNET32_PORT_ASEL)
1da177e4 2137 val |= 2;
1d70cb06 2138 lp->a->write_bcr(ioaddr, 2, val);
4a5e8e29
JG
2139
2140 /* handle full duplex setting */
2141 if (lp->mii_if.full_duplex) {
1d70cb06 2142 val = lp->a->read_bcr(ioaddr, 9) & ~3;
4a5e8e29
JG
2143 if (lp->options & PCNET32_PORT_FD) {
2144 val |= 1;
2145 if (lp->options == (PCNET32_PORT_FD | PCNET32_PORT_AUI))
2146 val |= 2;
2147 } else if (lp->options & PCNET32_PORT_ASEL) {
2148 /* workaround of xSeries250, turn on for 79C975 only */
8d916266 2149 if (lp->chip_version == 0x2627)
4a5e8e29
JG
2150 val |= 3;
2151 }
1d70cb06 2152 lp->a->write_bcr(ioaddr, 9, val);
4a5e8e29
JG
2153 }
2154
2155 /* set/reset GPSI bit in test register */
1d70cb06 2156 val = lp->a->read_csr(ioaddr, 124) & ~0x10;
4a5e8e29
JG
2157 if ((lp->options & PCNET32_PORT_PORTSEL) == PCNET32_PORT_GPSI)
2158 val |= 0x10;
1d70cb06 2159 lp->a->write_csr(ioaddr, 124, val);
4a5e8e29
JG
2160
2161 /* Allied Telesyn AT 2700/2701 FX are 100Mbit only and do not negotiate */
63097b3a
DF
2162 if (pdev && pdev->subsystem_vendor == PCI_VENDOR_ID_AT &&
2163 (pdev->subsystem_device == PCI_SUBDEVICE_ID_AT_2700FX ||
2164 pdev->subsystem_device == PCI_SUBDEVICE_ID_AT_2701FX)) {
ac62ef04 2165 if (lp->options & PCNET32_PORT_ASEL) {
4a5e8e29 2166 lp->options = PCNET32_PORT_FD | PCNET32_PORT_100;
13ff83b9
JP
2167 netif_printk(lp, link, KERN_DEBUG, dev,
2168 "Setting 100Mb-Full Duplex\n");
4a5e8e29
JG
2169 }
2170 }
2171 if (lp->phycount < 2) {
2172 /*
2173 * 24 Jun 2004 according AMD, in order to change the PHY,
2174 * DANAS (or DISPM for 79C976) must be set; then select the speed,
2175 * duplex, and/or enable auto negotiation, and clear DANAS
2176 */
2177 if (lp->mii && !(lp->options & PCNET32_PORT_ASEL)) {
1d70cb06 2178 lp->a->write_bcr(ioaddr, 32,
2179 lp->a->read_bcr(ioaddr, 32) | 0x0080);
4a5e8e29 2180 /* disable Auto Negotiation, set 10Mpbs, HD */
1d70cb06 2181 val = lp->a->read_bcr(ioaddr, 32) & ~0xb8;
4a5e8e29
JG
2182 if (lp->options & PCNET32_PORT_FD)
2183 val |= 0x10;
2184 if (lp->options & PCNET32_PORT_100)
2185 val |= 0x08;
1d70cb06 2186 lp->a->write_bcr(ioaddr, 32, val);
4a5e8e29
JG
2187 } else {
2188 if (lp->options & PCNET32_PORT_ASEL) {
1d70cb06 2189 lp->a->write_bcr(ioaddr, 32,
2190 lp->a->read_bcr(ioaddr,
4a5e8e29
JG
2191 32) | 0x0080);
2192 /* enable auto negotiate, setup, disable fd */
1d70cb06 2193 val = lp->a->read_bcr(ioaddr, 32) & ~0x98;
4a5e8e29 2194 val |= 0x20;
1d70cb06 2195 lp->a->write_bcr(ioaddr, 32, val);
4a5e8e29
JG
2196 }
2197 }
2198 } else {
2199 int first_phy = -1;
2200 u16 bmcr;
2201 u32 bcr9;
8ae6daca 2202 struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET };
4a5e8e29
JG
2203
2204 /*
2205 * There is really no good other way to handle multiple PHYs
2206 * other than turning off all automatics
2207 */
1d70cb06 2208 val = lp->a->read_bcr(ioaddr, 2);
2209 lp->a->write_bcr(ioaddr, 2, val & ~2);
2210 val = lp->a->read_bcr(ioaddr, 32);
2211 lp->a->write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
4a5e8e29
JG
2212
2213 if (!(lp->options & PCNET32_PORT_ASEL)) {
2214 /* setup ecmd */
2215 ecmd.port = PORT_MII;
2216 ecmd.transceiver = XCVR_INTERNAL;
2217 ecmd.autoneg = AUTONEG_DISABLE;
8ae6daca
DD
2218 ethtool_cmd_speed_set(&ecmd,
2219 (lp->options & PCNET32_PORT_100) ?
2220 SPEED_100 : SPEED_10);
1d70cb06 2221 bcr9 = lp->a->read_bcr(ioaddr, 9);
4a5e8e29
JG
2222
2223 if (lp->options & PCNET32_PORT_FD) {
2224 ecmd.duplex = DUPLEX_FULL;
2225 bcr9 |= (1 << 0);
2226 } else {
2227 ecmd.duplex = DUPLEX_HALF;
2228 bcr9 |= ~(1 << 0);
2229 }
1d70cb06 2230 lp->a->write_bcr(ioaddr, 9, bcr9);
ac62ef04 2231 }
4a5e8e29
JG
2232
2233 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
2234 if (lp->phymask & (1 << i)) {
2235 /* isolate all but the first PHY */
2236 bmcr = mdio_read(dev, i, MII_BMCR);
2237 if (first_phy == -1) {
2238 first_phy = i;
2239 mdio_write(dev, i, MII_BMCR,
2240 bmcr & ~BMCR_ISOLATE);
2241 } else {
2242 mdio_write(dev, i, MII_BMCR,
2243 bmcr | BMCR_ISOLATE);
2244 }
2245 /* use mii_ethtool_sset to setup PHY */
2246 lp->mii_if.phy_id = i;
2247 ecmd.phy_address = i;
2248 if (lp->options & PCNET32_PORT_ASEL) {
2249 mii_ethtool_gset(&lp->mii_if, &ecmd);
2250 ecmd.autoneg = AUTONEG_ENABLE;
2251 }
2252 mii_ethtool_sset(&lp->mii_if, &ecmd);
2253 }
2254 }
2255 lp->mii_if.phy_id = first_phy;
13ff83b9 2256 netif_info(lp, link, dev, "Using PHY number %d\n", first_phy);
4a5e8e29 2257 }
1da177e4
LT
2258
2259#ifdef DO_DXSUFLO
4a5e8e29 2260 if (lp->dxsuflo) { /* Disable transmit stop on underflow */
1d70cb06 2261 val = lp->a->read_csr(ioaddr, CSR3);
4a5e8e29 2262 val |= 0x40;
1d70cb06 2263 lp->a->write_csr(ioaddr, CSR3, val);
4a5e8e29 2264 }
1da177e4
LT
2265#endif
2266
6ecb7667 2267 lp->init_block->mode =
3e33545b 2268 cpu_to_le16((lp->options & PCNET32_PORT_PORTSEL) << 7);
4a5e8e29
JG
2269 pcnet32_load_multicast(dev);
2270
2271 if (pcnet32_init_ring(dev)) {
2272 rc = -ENOMEM;
2273 goto err_free_ring;
2274 }
2275
bea3348e 2276 napi_enable(&lp->napi);
bea3348e 2277
4a5e8e29 2278 /* Re-initialize the PCNET32, and start it when done. */
1d70cb06 2279 lp->a->write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
2280 lp->a->write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
4a5e8e29 2281
1d70cb06 2282 lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
2283 lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
4a5e8e29
JG
2284
2285 netif_start_queue(dev);
2286
8d916266
DF
2287 if (lp->chip_version >= PCNET32_79C970A) {
2288 /* Print the link status and start the watchdog */
2289 pcnet32_check_media(dev, 1);
283a21d3 2290 mod_timer(&lp->watchdog_timer, PCNET32_WATCHDOG_TIMEOUT);
8d916266 2291 }
4a5e8e29
JG
2292
2293 i = 0;
2294 while (i++ < 100)
1d70cb06 2295 if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
4a5e8e29
JG
2296 break;
2297 /*
2298 * We used to clear the InitDone bit, 0x0100, here but Mark Stockton
2299 * reports that doing so triggers a bug in the '974.
2300 */
1d70cb06 2301 lp->a->write_csr(ioaddr, CSR0, CSR0_NORMAL);
4a5e8e29 2302
13ff83b9
JP
2303 netif_printk(lp, ifup, KERN_DEBUG, dev,
2304 "pcnet32 open after %d ticks, init block %#x csr0 %4.4x\n",
2305 i,
2306 (u32) (lp->init_dma_addr),
1d70cb06 2307 lp->a->read_csr(ioaddr, CSR0));
4a5e8e29
JG
2308
2309 spin_unlock_irqrestore(&lp->lock, flags);
2310
2311 return 0; /* Always succeed */
2312
9e3f8063 2313err_free_ring:
4a5e8e29 2314 /* free any allocated skbuffs */
ac5bfe40 2315 pcnet32_purge_rx_ring(dev);
4a5e8e29 2316
4a5e8e29
JG
2317 /*
2318 * Switch back to 16bit mode to avoid problems with dumb
2319 * DOS packet driver after a warm reboot
2320 */
1d70cb06 2321 lp->a->write_bcr(ioaddr, 20, 4);
4a5e8e29 2322
9e3f8063 2323err_free_irq:
4a5e8e29
JG
2324 spin_unlock_irqrestore(&lp->lock, flags);
2325 free_irq(dev->irq, dev);
2326 return rc;
1da177e4
LT
2327}
2328
2329/*
2330 * The LANCE has been halted for one reason or another (busmaster memory
2331 * arbitration error, Tx FIFO underflow, driver stopped it to reconfigure,
2332 * etc.). Modern LANCE variants always reload their ring-buffer
2333 * configuration when restarted, so we must reinitialize our ring
2334 * context before restarting. As part of this reinitialization,
2335 * find all packets still on the Tx ring and pretend that they had been
2336 * sent (in effect, drop the packets on the floor) - the higher-level
2337 * protocols will time out and retransmit. It'd be better to shuffle
2338 * these skbs to a temp list and then actually re-Tx them after
2339 * restarting the chip, but I'm too lazy to do so right now. dplatt@3do.com
2340 */
2341
4a5e8e29 2342static void pcnet32_purge_tx_ring(struct net_device *dev)
1da177e4 2343{
1e56a4b4 2344 struct pcnet32_private *lp = netdev_priv(dev);
4a5e8e29 2345 int i;
1da177e4 2346
4a5e8e29
JG
2347 for (i = 0; i < lp->tx_ring_size; i++) {
2348 lp->tx_ring[i].status = 0; /* CPU owns buffer */
2349 wmb(); /* Make sure adapter sees owner change */
2350 if (lp->tx_skbuff[i]) {
4cc5c475
DF
2351 if (!pci_dma_mapping_error(lp->pci_dev,
2352 lp->tx_dma_addr[i]))
2353 pci_unmap_single(lp->pci_dev,
2354 lp->tx_dma_addr[i],
2355 lp->tx_skbuff[i]->len,
2356 PCI_DMA_TODEVICE);
4a5e8e29
JG
2357 dev_kfree_skb_any(lp->tx_skbuff[i]);
2358 }
2359 lp->tx_skbuff[i] = NULL;
2360 lp->tx_dma_addr[i] = 0;
2361 }
2362}
1da177e4
LT
2363
2364/* Initialize the PCNET32 Rx and Tx rings. */
4a5e8e29 2365static int pcnet32_init_ring(struct net_device *dev)
1da177e4 2366{
1e56a4b4 2367 struct pcnet32_private *lp = netdev_priv(dev);
4a5e8e29
JG
2368 int i;
2369
2370 lp->tx_full = 0;
2371 lp->cur_rx = lp->cur_tx = 0;
2372 lp->dirty_rx = lp->dirty_tx = 0;
2373
2374 for (i = 0; i < lp->rx_ring_size; i++) {
2375 struct sk_buff *rx_skbuff = lp->rx_skbuff[i];
2376 if (rx_skbuff == NULL) {
1d266430 2377 lp->rx_skbuff[i] = netdev_alloc_skb(dev, PKT_BUF_SKB);
9e3f8063
JP
2378 rx_skbuff = lp->rx_skbuff[i];
2379 if (!rx_skbuff) {
2380 /* there is not much we can do at this point */
1d266430 2381 netif_err(lp, drv, dev, "%s netdev_alloc_skb failed\n",
13ff83b9 2382 __func__);
4a5e8e29
JG
2383 return -1;
2384 }
232c5640 2385 skb_reserve(rx_skbuff, NET_IP_ALIGN);
4a5e8e29
JG
2386 }
2387
2388 rmb();
4cc5c475 2389 if (lp->rx_dma_addr[i] == 0) {
4a5e8e29
JG
2390 lp->rx_dma_addr[i] =
2391 pci_map_single(lp->pci_dev, rx_skbuff->data,
232c5640 2392 PKT_BUF_SIZE, PCI_DMA_FROMDEVICE);
4cc5c475
DF
2393 if (pci_dma_mapping_error(lp->pci_dev,
2394 lp->rx_dma_addr[i])) {
2395 /* there is not much we can do at this point */
2396 netif_err(lp, drv, dev,
2397 "%s pci dma mapping error\n",
2398 __func__);
2399 return -1;
2400 }
2401 }
3e33545b 2402 lp->rx_ring[i].base = cpu_to_le32(lp->rx_dma_addr[i]);
232c5640 2403 lp->rx_ring[i].buf_length = cpu_to_le16(NEG_BUF_SIZE);
4a5e8e29 2404 wmb(); /* Make sure owner changes after all others are visible */
3e33545b 2405 lp->rx_ring[i].status = cpu_to_le16(0x8000);
4a5e8e29
JG
2406 }
2407 /* The Tx buffer address is filled in as needed, but we do need to clear
2408 * the upper ownership bit. */
2409 for (i = 0; i < lp->tx_ring_size; i++) {
2410 lp->tx_ring[i].status = 0; /* CPU owns buffer */
2411 wmb(); /* Make sure adapter sees owner change */
2412 lp->tx_ring[i].base = 0;
2413 lp->tx_dma_addr[i] = 0;
2414 }
2415
6ecb7667 2416 lp->init_block->tlen_rlen =
3e33545b 2417 cpu_to_le16(lp->tx_len_bits | lp->rx_len_bits);
4a5e8e29 2418 for (i = 0; i < 6; i++)
6ecb7667 2419 lp->init_block->phys_addr[i] = dev->dev_addr[i];
3e33545b
AV
2420 lp->init_block->rx_ring = cpu_to_le32(lp->rx_ring_dma_addr);
2421 lp->init_block->tx_ring = cpu_to_le32(lp->tx_ring_dma_addr);
4a5e8e29
JG
2422 wmb(); /* Make sure all changes are visible */
2423 return 0;
1da177e4
LT
2424}
2425
2426/* the pcnet32 has been issued a stop or reset. Wait for the stop bit
2427 * then flush the pending transmit operations, re-initialize the ring,
2428 * and tell the chip to initialize.
2429 */
4a5e8e29 2430static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits)
1da177e4 2431{
1e56a4b4 2432 struct pcnet32_private *lp = netdev_priv(dev);
4a5e8e29
JG
2433 unsigned long ioaddr = dev->base_addr;
2434 int i;
1da177e4 2435
4a5e8e29
JG
2436 /* wait for stop */
2437 for (i = 0; i < 100; i++)
1d70cb06 2438 if (lp->a->read_csr(ioaddr, CSR0) & CSR0_STOP)
4a5e8e29 2439 break;
1da177e4 2440
13ff83b9
JP
2441 if (i >= 100)
2442 netif_err(lp, drv, dev, "%s timed out waiting for stop\n",
2443 __func__);
1da177e4 2444
4a5e8e29
JG
2445 pcnet32_purge_tx_ring(dev);
2446 if (pcnet32_init_ring(dev))
2447 return;
1da177e4 2448
4a5e8e29 2449 /* ReInit Ring */
1d70cb06 2450 lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
4a5e8e29
JG
2451 i = 0;
2452 while (i++ < 1000)
1d70cb06 2453 if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
4a5e8e29 2454 break;
1da177e4 2455
1d70cb06 2456 lp->a->write_csr(ioaddr, CSR0, csr0_bits);
1da177e4
LT
2457}
2458
4a5e8e29 2459static void pcnet32_tx_timeout(struct net_device *dev)
1da177e4 2460{
1e56a4b4 2461 struct pcnet32_private *lp = netdev_priv(dev);
4a5e8e29
JG
2462 unsigned long ioaddr = dev->base_addr, flags;
2463
2464 spin_lock_irqsave(&lp->lock, flags);
2465 /* Transmitter timeout, serious problems. */
2466 if (pcnet32_debug & NETIF_MSG_DRV)
13ff83b9 2467 pr_err("%s: transmit timed out, status %4.4x, resetting\n",
1d70cb06 2468 dev->name, lp->a->read_csr(ioaddr, CSR0));
2469 lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
4f1e5ba0 2470 dev->stats.tx_errors++;
4a5e8e29
JG
2471 if (netif_msg_tx_err(lp)) {
2472 int i;
2473 printk(KERN_DEBUG
2474 " Ring data dump: dirty_tx %d cur_tx %d%s cur_rx %d.",
2475 lp->dirty_tx, lp->cur_tx, lp->tx_full ? " (full)" : "",
2476 lp->cur_rx);
2477 for (i = 0; i < lp->rx_ring_size; i++)
2478 printk("%s %08x %04x %08x %04x", i & 1 ? "" : "\n ",
2479 le32_to_cpu(lp->rx_ring[i].base),
2480 (-le16_to_cpu(lp->rx_ring[i].buf_length)) &
2481 0xffff, le32_to_cpu(lp->rx_ring[i].msg_length),
2482 le16_to_cpu(lp->rx_ring[i].status));
2483 for (i = 0; i < lp->tx_ring_size; i++)
2484 printk("%s %08x %04x %08x %04x", i & 1 ? "" : "\n ",
2485 le32_to_cpu(lp->tx_ring[i].base),
2486 (-le16_to_cpu(lp->tx_ring[i].length)) & 0xffff,
2487 le32_to_cpu(lp->tx_ring[i].misc),
2488 le16_to_cpu(lp->tx_ring[i].status));
2489 printk("\n");
2490 }
b368a3fb 2491 pcnet32_restart(dev, CSR0_NORMAL);
1da177e4 2492
860e9538 2493 netif_trans_update(dev); /* prevent tx timeout */
4a5e8e29 2494 netif_wake_queue(dev);
1da177e4 2495
4a5e8e29
JG
2496 spin_unlock_irqrestore(&lp->lock, flags);
2497}
2498
61357325
SH
2499static netdev_tx_t pcnet32_start_xmit(struct sk_buff *skb,
2500 struct net_device *dev)
1da177e4 2501{
1e56a4b4 2502 struct pcnet32_private *lp = netdev_priv(dev);
4a5e8e29
JG
2503 unsigned long ioaddr = dev->base_addr;
2504 u16 status;
2505 int entry;
2506 unsigned long flags;
1da177e4 2507
4a5e8e29 2508 spin_lock_irqsave(&lp->lock, flags);
1da177e4 2509
13ff83b9
JP
2510 netif_printk(lp, tx_queued, KERN_DEBUG, dev,
2511 "%s() called, csr0 %4.4x\n",
1d70cb06 2512 __func__, lp->a->read_csr(ioaddr, CSR0));
1da177e4 2513
4a5e8e29
JG
2514 /* Default status -- will not enable Successful-TxDone
2515 * interrupt when that option is available to us.
2516 */
2517 status = 0x8300;
1da177e4 2518
4a5e8e29 2519 /* Fill in a Tx ring entry */
1da177e4 2520
4a5e8e29
JG
2521 /* Mask to ring buffer boundary. */
2522 entry = lp->cur_tx & lp->tx_mod_mask;
1da177e4 2523
4a5e8e29
JG
2524 /* Caution: the write order is important here, set the status
2525 * with the "ownership" bits last. */
1da177e4 2526
3e33545b 2527 lp->tx_ring[entry].length = cpu_to_le16(-skb->len);
1da177e4 2528
4a5e8e29 2529 lp->tx_ring[entry].misc = 0x00000000;
1da177e4 2530
4a5e8e29
JG
2531 lp->tx_dma_addr[entry] =
2532 pci_map_single(lp->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE);
4cc5c475 2533 if (pci_dma_mapping_error(lp->pci_dev, lp->tx_dma_addr[entry])) {
af9ba92c 2534 dev_kfree_skb_any(skb);
4cc5c475
DF
2535 dev->stats.tx_dropped++;
2536 goto drop_packet;
2537 }
2538 lp->tx_skbuff[entry] = skb;
3e33545b 2539 lp->tx_ring[entry].base = cpu_to_le32(lp->tx_dma_addr[entry]);
4a5e8e29 2540 wmb(); /* Make sure owner changes after all others are visible */
3e33545b 2541 lp->tx_ring[entry].status = cpu_to_le16(status);
1da177e4 2542
4a5e8e29 2543 lp->cur_tx++;
4f1e5ba0 2544 dev->stats.tx_bytes += skb->len;
1da177e4 2545
4a5e8e29 2546 /* Trigger an immediate send poll. */
1d70cb06 2547 lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
1da177e4 2548
4a5e8e29
JG
2549 if (lp->tx_ring[(entry + 1) & lp->tx_mod_mask].base != 0) {
2550 lp->tx_full = 1;
2551 netif_stop_queue(dev);
2552 }
4cc5c475 2553drop_packet:
4a5e8e29 2554 spin_unlock_irqrestore(&lp->lock, flags);
6ed10654 2555 return NETDEV_TX_OK;
1da177e4
LT
2556}
2557
2558/* The PCNET32 interrupt handler. */
2559static irqreturn_t
7d12e780 2560pcnet32_interrupt(int irq, void *dev_id)
1da177e4 2561{
4a5e8e29
JG
2562 struct net_device *dev = dev_id;
2563 struct pcnet32_private *lp;
2564 unsigned long ioaddr;
5c99346a 2565 u16 csr0;
4a5e8e29 2566 int boguscnt = max_interrupt_work;
4a5e8e29 2567
4a5e8e29 2568 ioaddr = dev->base_addr;
1e56a4b4 2569 lp = netdev_priv(dev);
1da177e4 2570
4a5e8e29
JG
2571 spin_lock(&lp->lock);
2572
1d70cb06 2573 csr0 = lp->a->read_csr(ioaddr, CSR0);
3904c324 2574 while ((csr0 & 0x8f00) && --boguscnt >= 0) {
9e3f8063 2575 if (csr0 == 0xffff)
4a5e8e29 2576 break; /* PCMCIA remove happened */
4a5e8e29 2577 /* Acknowledge all of the current interrupt sources ASAP. */
1d70cb06 2578 lp->a->write_csr(ioaddr, CSR0, csr0 & ~0x004f);
4a5e8e29 2579
13ff83b9
JP
2580 netif_printk(lp, intr, KERN_DEBUG, dev,
2581 "interrupt csr0=%#2.2x new csr=%#2.2x\n",
1d70cb06 2582 csr0, lp->a->read_csr(ioaddr, CSR0));
4a5e8e29 2583
4a5e8e29
JG
2584 /* Log misc errors. */
2585 if (csr0 & 0x4000)
4f1e5ba0 2586 dev->stats.tx_errors++; /* Tx babble. */
4a5e8e29
JG
2587 if (csr0 & 0x1000) {
2588 /*
3904c324
DF
2589 * This happens when our receive ring is full. This
2590 * shouldn't be a problem as we will see normal rx
2591 * interrupts for the frames in the receive ring. But
2592 * there are some PCI chipsets (I can reproduce this
2593 * on SP3G with Intel saturn chipset) which have
2594 * sometimes problems and will fill up the receive
2595 * ring with error descriptors. In this situation we
2596 * don't get a rx interrupt, but a missed frame
7de745e5 2597 * interrupt sooner or later.
4a5e8e29 2598 */
4f1e5ba0 2599 dev->stats.rx_errors++; /* Missed a Rx frame. */
4a5e8e29
JG
2600 }
2601 if (csr0 & 0x0800) {
13ff83b9
JP
2602 netif_err(lp, drv, dev, "Bus master arbitration failure, status %4.4x\n",
2603 csr0);
4a5e8e29 2604 /* unlike for the lance, there is no restart needed */
1da177e4 2605 }
288379f0 2606 if (napi_schedule_prep(&lp->napi)) {
7de745e5
DF
2607 u16 val;
2608 /* set interrupt masks */
1d70cb06 2609 val = lp->a->read_csr(ioaddr, CSR3);
7de745e5 2610 val |= 0x5f00;
1d70cb06 2611 lp->a->write_csr(ioaddr, CSR3, val);
ce105a08 2612
288379f0 2613 __napi_schedule(&lp->napi);
7de745e5
DF
2614 break;
2615 }
1d70cb06 2616 csr0 = lp->a->read_csr(ioaddr, CSR0);
4a5e8e29
JG
2617 }
2618
13ff83b9
JP
2619 netif_printk(lp, intr, KERN_DEBUG, dev,
2620 "exiting interrupt, csr0=%#4.4x\n",
1d70cb06 2621 lp->a->read_csr(ioaddr, CSR0));
4a5e8e29
JG
2622
2623 spin_unlock(&lp->lock);
2624
2625 return IRQ_HANDLED;
1da177e4
LT
2626}
2627
4a5e8e29 2628static int pcnet32_close(struct net_device *dev)
1da177e4 2629{
4a5e8e29 2630 unsigned long ioaddr = dev->base_addr;
1e56a4b4 2631 struct pcnet32_private *lp = netdev_priv(dev);
4a5e8e29 2632 unsigned long flags;
1da177e4 2633
4a5e8e29 2634 del_timer_sync(&lp->watchdog_timer);
1da177e4 2635
4a5e8e29 2636 netif_stop_queue(dev);
bea3348e 2637 napi_disable(&lp->napi);
1da177e4 2638
4a5e8e29 2639 spin_lock_irqsave(&lp->lock, flags);
1da177e4 2640
1d70cb06 2641 dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
1da177e4 2642
13ff83b9
JP
2643 netif_printk(lp, ifdown, KERN_DEBUG, dev,
2644 "Shutting down ethercard, status was %2.2x\n",
1d70cb06 2645 lp->a->read_csr(ioaddr, CSR0));
1da177e4 2646
4a5e8e29 2647 /* We stop the PCNET32 here -- it occasionally polls memory if we don't. */
1d70cb06 2648 lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
1da177e4 2649
4a5e8e29
JG
2650 /*
2651 * Switch back to 16bit mode to avoid problems with dumb
2652 * DOS packet driver after a warm reboot
2653 */
1d70cb06 2654 lp->a->write_bcr(ioaddr, 20, 4);
1da177e4 2655
4a5e8e29 2656 spin_unlock_irqrestore(&lp->lock, flags);
1da177e4 2657
4a5e8e29 2658 free_irq(dev->irq, dev);
1da177e4 2659
4a5e8e29 2660 spin_lock_irqsave(&lp->lock, flags);
1da177e4 2661
ac5bfe40
DF
2662 pcnet32_purge_rx_ring(dev);
2663 pcnet32_purge_tx_ring(dev);
1da177e4 2664
4a5e8e29 2665 spin_unlock_irqrestore(&lp->lock, flags);
1da177e4 2666
4a5e8e29 2667 return 0;
1da177e4
LT
2668}
2669
4a5e8e29 2670static struct net_device_stats *pcnet32_get_stats(struct net_device *dev)
1da177e4 2671{
1e56a4b4 2672 struct pcnet32_private *lp = netdev_priv(dev);
4a5e8e29 2673 unsigned long ioaddr = dev->base_addr;
4a5e8e29
JG
2674 unsigned long flags;
2675
2676 spin_lock_irqsave(&lp->lock, flags);
1d70cb06 2677 dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
4a5e8e29
JG
2678 spin_unlock_irqrestore(&lp->lock, flags);
2679
4f1e5ba0 2680 return &dev->stats;
1da177e4
LT
2681}
2682
2683/* taken from the sunlance driver, which it took from the depca driver */
4a5e8e29 2684static void pcnet32_load_multicast(struct net_device *dev)
1da177e4 2685{
1e56a4b4 2686 struct pcnet32_private *lp = netdev_priv(dev);
6ecb7667 2687 volatile struct pcnet32_init_block *ib = lp->init_block;
3e33545b 2688 volatile __le16 *mcast_table = (__le16 *)ib->filter;
22bedad3 2689 struct netdev_hw_addr *ha;
df27f4a6 2690 unsigned long ioaddr = dev->base_addr;
4a5e8e29
JG
2691 int i;
2692 u32 crc;
2693
2694 /* set all multicast bits */
2695 if (dev->flags & IFF_ALLMULTI) {
3e33545b
AV
2696 ib->filter[0] = cpu_to_le32(~0U);
2697 ib->filter[1] = cpu_to_le32(~0U);
1d70cb06 2698 lp->a->write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
2699 lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
2700 lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
2701 lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
4a5e8e29
JG
2702 return;
2703 }
2704 /* clear the multicast filter */
2705 ib->filter[0] = 0;
2706 ib->filter[1] = 0;
2707
2708 /* Add addresses */
22bedad3 2709 netdev_for_each_mc_addr(ha, dev) {
498d8e23 2710 crc = ether_crc_le(6, ha->addr);
4a5e8e29 2711 crc = crc >> 26;
3e33545b 2712 mcast_table[crc >> 4] |= cpu_to_le16(1 << (crc & 0xf));
4a5e8e29 2713 }
df27f4a6 2714 for (i = 0; i < 4; i++)
1d70cb06 2715 lp->a->write_csr(ioaddr, PCNET32_MC_FILTER + i,
df27f4a6 2716 le16_to_cpu(mcast_table[i]));
1da177e4
LT
2717}
2718
1da177e4
LT
2719/*
2720 * Set or clear the multicast filter for this adaptor.
2721 */
2722static void pcnet32_set_multicast_list(struct net_device *dev)
2723{
4a5e8e29 2724 unsigned long ioaddr = dev->base_addr, flags;
1e56a4b4 2725 struct pcnet32_private *lp = netdev_priv(dev);
df27f4a6 2726 int csr15, suspended;
4a5e8e29
JG
2727
2728 spin_lock_irqsave(&lp->lock, flags);
df27f4a6 2729 suspended = pcnet32_suspend(dev, &flags, 0);
1d70cb06 2730 csr15 = lp->a->read_csr(ioaddr, CSR15);
4a5e8e29
JG
2731 if (dev->flags & IFF_PROMISC) {
2732 /* Log any net taps. */
13ff83b9 2733 netif_info(lp, hw, dev, "Promiscuous mode enabled\n");
6ecb7667 2734 lp->init_block->mode =
3e33545b 2735 cpu_to_le16(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) <<
4a5e8e29 2736 7);
1d70cb06 2737 lp->a->write_csr(ioaddr, CSR15, csr15 | 0x8000);
4a5e8e29 2738 } else {
6ecb7667 2739 lp->init_block->mode =
3e33545b 2740 cpu_to_le16((lp->options & PCNET32_PORT_PORTSEL) << 7);
1d70cb06 2741 lp->a->write_csr(ioaddr, CSR15, csr15 & 0x7fff);
4a5e8e29
JG
2742 pcnet32_load_multicast(dev);
2743 }
2744
df27f4a6 2745 if (suspended) {
cce5fbad 2746 pcnet32_clr_suspend(lp, ioaddr);
b368a3fb 2747 } else {
1d70cb06 2748 lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
df27f4a6
DF
2749 pcnet32_restart(dev, CSR0_NORMAL);
2750 netif_wake_queue(dev);
2751 }
4a5e8e29
JG
2752
2753 spin_unlock_irqrestore(&lp->lock, flags);
1da177e4
LT
2754}
2755
2756/* This routine assumes that the lp->lock is held */
2757static int mdio_read(struct net_device *dev, int phy_id, int reg_num)
2758{
1e56a4b4 2759 struct pcnet32_private *lp = netdev_priv(dev);
4a5e8e29
JG
2760 unsigned long ioaddr = dev->base_addr;
2761 u16 val_out;
1da177e4 2762
4a5e8e29
JG
2763 if (!lp->mii)
2764 return 0;
1da177e4 2765
1d70cb06 2766 lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
2767 val_out = lp->a->read_bcr(ioaddr, 34);
1da177e4 2768
4a5e8e29 2769 return val_out;
1da177e4
LT
2770}
2771
2772/* This routine assumes that the lp->lock is held */
2773static void mdio_write(struct net_device *dev, int phy_id, int reg_num, int val)
2774{
1e56a4b4 2775 struct pcnet32_private *lp = netdev_priv(dev);
4a5e8e29 2776 unsigned long ioaddr = dev->base_addr;
1da177e4 2777
4a5e8e29
JG
2778 if (!lp->mii)
2779 return;
1da177e4 2780
1d70cb06 2781 lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
2782 lp->a->write_bcr(ioaddr, 34, val);
1da177e4
LT
2783}
2784
2785static int pcnet32_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2786{
1e56a4b4 2787 struct pcnet32_private *lp = netdev_priv(dev);
4a5e8e29
JG
2788 int rc;
2789 unsigned long flags;
1da177e4 2790
4a5e8e29
JG
2791 /* SIOC[GS]MIIxxx ioctls */
2792 if (lp->mii) {
2793 spin_lock_irqsave(&lp->lock, flags);
2794 rc = generic_mii_ioctl(&lp->mii_if, if_mii(rq), cmd, NULL);
2795 spin_unlock_irqrestore(&lp->lock, flags);
2796 } else {
2797 rc = -EOPNOTSUPP;
2798 }
1da177e4 2799
4a5e8e29 2800 return rc;
1da177e4
LT
2801}
2802
ac62ef04
DF
2803static int pcnet32_check_otherphy(struct net_device *dev)
2804{
1e56a4b4 2805 struct pcnet32_private *lp = netdev_priv(dev);
4a5e8e29
JG
2806 struct mii_if_info mii = lp->mii_if;
2807 u16 bmcr;
2808 int i;
ac62ef04 2809
4a5e8e29
JG
2810 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
2811 if (i == lp->mii_if.phy_id)
2812 continue; /* skip active phy */
2813 if (lp->phymask & (1 << i)) {
2814 mii.phy_id = i;
2815 if (mii_link_ok(&mii)) {
2816 /* found PHY with active link */
13ff83b9
JP
2817 netif_info(lp, link, dev, "Using PHY number %d\n",
2818 i);
4a5e8e29
JG
2819
2820 /* isolate inactive phy */
2821 bmcr =
2822 mdio_read(dev, lp->mii_if.phy_id, MII_BMCR);
2823 mdio_write(dev, lp->mii_if.phy_id, MII_BMCR,
2824 bmcr | BMCR_ISOLATE);
2825
2826 /* de-isolate new phy */
2827 bmcr = mdio_read(dev, i, MII_BMCR);
2828 mdio_write(dev, i, MII_BMCR,
2829 bmcr & ~BMCR_ISOLATE);
2830
2831 /* set new phy address */
2832 lp->mii_if.phy_id = i;
2833 return 1;
2834 }
2835 }
ac62ef04 2836 }
4a5e8e29 2837 return 0;
ac62ef04
DF
2838}
2839
2840/*
2841 * Show the status of the media. Similar to mii_check_media however it
2842 * correctly shows the link speed for all (tested) pcnet32 variants.
2843 * Devices with no mii just report link state without speed.
2844 *
2845 * Caller is assumed to hold and release the lp->lock.
2846 */
2847
2848static void pcnet32_check_media(struct net_device *dev, int verbose)
2849{
1e56a4b4 2850 struct pcnet32_private *lp = netdev_priv(dev);
4a5e8e29
JG
2851 int curr_link;
2852 int prev_link = netif_carrier_ok(dev) ? 1 : 0;
2853 u32 bcr9;
2854
ac62ef04 2855 if (lp->mii) {
4a5e8e29 2856 curr_link = mii_link_ok(&lp->mii_if);
2be4cb97
OZ
2857 } else if (lp->chip_version == PCNET32_79C970A) {
2858 ulong ioaddr = dev->base_addr; /* card base I/O address */
2859 /* only read link if port is set to TP */
2860 if (!lp->autoneg && lp->port_tp)
2861 curr_link = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
2862 else /* link always up for AUI port or port auto select */
2863 curr_link = 1;
ac62ef04 2864 } else {
4a5e8e29 2865 ulong ioaddr = dev->base_addr; /* card base I/O address */
1d70cb06 2866 curr_link = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
4a5e8e29
JG
2867 }
2868 if (!curr_link) {
2869 if (prev_link || verbose) {
2870 netif_carrier_off(dev);
13ff83b9 2871 netif_info(lp, link, dev, "link down\n");
4a5e8e29
JG
2872 }
2873 if (lp->phycount > 1) {
2874 curr_link = pcnet32_check_otherphy(dev);
2875 prev_link = 0;
2876 }
2877 } else if (verbose || !prev_link) {
2878 netif_carrier_on(dev);
2879 if (lp->mii) {
2880 if (netif_msg_link(lp)) {
8ae6daca
DD
2881 struct ethtool_cmd ecmd = {
2882 .cmd = ETHTOOL_GSET };
4a5e8e29 2883 mii_ethtool_gset(&lp->mii_if, &ecmd);
8ae6daca
DD
2884 netdev_info(dev, "link up, %uMbps, %s-duplex\n",
2885 ethtool_cmd_speed(&ecmd),
13ff83b9
JP
2886 (ecmd.duplex == DUPLEX_FULL)
2887 ? "full" : "half");
4a5e8e29 2888 }
1d70cb06 2889 bcr9 = lp->a->read_bcr(dev->base_addr, 9);
4a5e8e29
JG
2890 if ((bcr9 & (1 << 0)) != lp->mii_if.full_duplex) {
2891 if (lp->mii_if.full_duplex)
2892 bcr9 |= (1 << 0);
2893 else
2894 bcr9 &= ~(1 << 0);
1d70cb06 2895 lp->a->write_bcr(dev->base_addr, 9, bcr9);
4a5e8e29
JG
2896 }
2897 } else {
13ff83b9 2898 netif_info(lp, link, dev, "link up\n");
4a5e8e29 2899 }
ac62ef04 2900 }
ac62ef04
DF
2901}
2902
2903/*
2904 * Check for loss of link and link establishment.
5bdc7380 2905 * Could possibly be changed to use mii_check_media instead.
ac62ef04
DF
2906 */
2907
1da177e4
LT
2908static void pcnet32_watchdog(struct net_device *dev)
2909{
1e56a4b4 2910 struct pcnet32_private *lp = netdev_priv(dev);
4a5e8e29 2911 unsigned long flags;
1da177e4 2912
4a5e8e29
JG
2913 /* Print the link status if it has changed */
2914 spin_lock_irqsave(&lp->lock, flags);
2915 pcnet32_check_media(dev, 0);
2916 spin_unlock_irqrestore(&lp->lock, flags);
1da177e4 2917
283a21d3 2918 mod_timer(&lp->watchdog_timer, round_jiffies(PCNET32_WATCHDOG_TIMEOUT));
1da177e4
LT
2919}
2920
917270c6
DF
2921static int pcnet32_pm_suspend(struct pci_dev *pdev, pm_message_t state)
2922{
2923 struct net_device *dev = pci_get_drvdata(pdev);
2924
2925 if (netif_running(dev)) {
2926 netif_device_detach(dev);
2927 pcnet32_close(dev);
2928 }
2929 pci_save_state(pdev);
2930 pci_set_power_state(pdev, pci_choose_state(pdev, state));
2931 return 0;
2932}
2933
2934static int pcnet32_pm_resume(struct pci_dev *pdev)
2935{
2936 struct net_device *dev = pci_get_drvdata(pdev);
2937
2938 pci_set_power_state(pdev, PCI_D0);
2939 pci_restore_state(pdev);
2940
2941 if (netif_running(dev)) {
2942 pcnet32_open(dev);
2943 netif_device_attach(dev);
2944 }
2945 return 0;
2946}
2947
a9590879 2948static void pcnet32_remove_one(struct pci_dev *pdev)
1da177e4 2949{
4a5e8e29
JG
2950 struct net_device *dev = pci_get_drvdata(pdev);
2951
2952 if (dev) {
1e56a4b4 2953 struct pcnet32_private *lp = netdev_priv(dev);
4a5e8e29
JG
2954
2955 unregister_netdev(dev);
2956 pcnet32_free_ring(dev);
2957 release_region(dev->base_addr, PCNET32_TOTAL_SIZE);
7d2e3cb7 2958 pci_free_consistent(lp->pci_dev, sizeof(*lp->init_block),
6ecb7667 2959 lp->init_block, lp->init_dma_addr);
4a5e8e29
JG
2960 free_netdev(dev);
2961 pci_disable_device(pdev);
4a5e8e29 2962 }
1da177e4
LT
2963}
2964
2965static struct pci_driver pcnet32_driver = {
4a5e8e29
JG
2966 .name = DRV_NAME,
2967 .probe = pcnet32_probe_pci,
a9590879 2968 .remove = pcnet32_remove_one,
4a5e8e29 2969 .id_table = pcnet32_pci_tbl,
917270c6
DF
2970 .suspend = pcnet32_pm_suspend,
2971 .resume = pcnet32_pm_resume,
1da177e4
LT
2972};
2973
2974/* An additional parameter that may be passed in... */
2975static int debug = -1;
2976static int tx_start_pt = -1;
2977static int pcnet32_have_pci;
2978
2979module_param(debug, int, 0);
2980MODULE_PARM_DESC(debug, DRV_NAME " debug level");
2981module_param(max_interrupt_work, int, 0);
4a5e8e29
JG
2982MODULE_PARM_DESC(max_interrupt_work,
2983 DRV_NAME " maximum events handled per interrupt");
1da177e4 2984module_param(rx_copybreak, int, 0);
4a5e8e29
JG
2985MODULE_PARM_DESC(rx_copybreak,
2986 DRV_NAME " copy breakpoint for copy-only-tiny-frames");
1da177e4
LT
2987module_param(tx_start_pt, int, 0);
2988MODULE_PARM_DESC(tx_start_pt, DRV_NAME " transmit start point (0-3)");
2989module_param(pcnet32vlb, int, 0);
2990MODULE_PARM_DESC(pcnet32vlb, DRV_NAME " Vesa local bus (VLB) support (0/1)");
2991module_param_array(options, int, NULL, 0);
2992MODULE_PARM_DESC(options, DRV_NAME " initial option setting(s) (0-15)");
2993module_param_array(full_duplex, int, NULL, 0);
2994MODULE_PARM_DESC(full_duplex, DRV_NAME " full duplex setting(s) (1)");
2995/* Module Parameter for HomePNA cards added by Patrick Simmons, 2004 */
2996module_param_array(homepna, int, NULL, 0);
4a5e8e29
JG
2997MODULE_PARM_DESC(homepna,
2998 DRV_NAME
2999 " mode for 79C978 cards (1 for HomePNA, 0 for Ethernet, default Ethernet");
1da177e4
LT
3000
3001MODULE_AUTHOR("Thomas Bogendoerfer");
3002MODULE_DESCRIPTION("Driver for PCnet32 and PCnetPCI based ethercards");
3003MODULE_LICENSE("GPL");
3004
3005#define PCNET32_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
3006
3007static int __init pcnet32_init_module(void)
3008{
13ff83b9 3009 pr_info("%s", version);
1da177e4 3010
4a5e8e29 3011 pcnet32_debug = netif_msg_init(debug, PCNET32_MSG_DEFAULT);
1da177e4 3012
4a5e8e29
JG
3013 if ((tx_start_pt >= 0) && (tx_start_pt <= 3))
3014 tx_start = tx_start_pt;
1da177e4 3015
4a5e8e29 3016 /* find the PCI devices */
29917620 3017 if (!pci_register_driver(&pcnet32_driver))
4a5e8e29 3018 pcnet32_have_pci = 1;
1da177e4 3019
4a5e8e29
JG
3020 /* should we find any remaining VLbus devices ? */
3021 if (pcnet32vlb)
dcaf9769 3022 pcnet32_probe_vlbus(pcnet32_portlist);
1da177e4 3023
4a5e8e29 3024 if (cards_found && (pcnet32_debug & NETIF_MSG_PROBE))
13ff83b9 3025 pr_info("%d cards_found\n", cards_found);
1da177e4 3026
4a5e8e29 3027 return (pcnet32_have_pci + cards_found) ? 0 : -ENODEV;
1da177e4
LT
3028}
3029
3030static void __exit pcnet32_cleanup_module(void)
3031{
4a5e8e29
JG
3032 struct net_device *next_dev;
3033
3034 while (pcnet32_dev) {
1e56a4b4 3035 struct pcnet32_private *lp = netdev_priv(pcnet32_dev);
4a5e8e29
JG
3036 next_dev = lp->next;
3037 unregister_netdev(pcnet32_dev);
3038 pcnet32_free_ring(pcnet32_dev);
3039 release_region(pcnet32_dev->base_addr, PCNET32_TOTAL_SIZE);
7d2e3cb7 3040 pci_free_consistent(lp->pci_dev, sizeof(*lp->init_block),
6ecb7667 3041 lp->init_block, lp->init_dma_addr);
4a5e8e29
JG
3042 free_netdev(pcnet32_dev);
3043 pcnet32_dev = next_dev;
3044 }
1da177e4 3045
4a5e8e29
JG
3046 if (pcnet32_have_pci)
3047 pci_unregister_driver(&pcnet32_driver);
1da177e4
LT
3048}
3049
3050module_init(pcnet32_init_module);
3051module_exit(pcnet32_cleanup_module);
3052
3053/*
3054 * Local variables:
3055 * c-indent-level: 4
3056 * tab-width: 8
3057 * End:
3058 */