]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/net/ethernet/smsc/epic100.c
ethernet: Use eth_random_addr
[mirror_ubuntu-artful-kernel.git] / drivers / net / ethernet / smsc / epic100.c
CommitLineData
1da177e4
LT
1/* epic100.c: A SMC 83c170 EPIC/100 Fast Ethernet driver for Linux. */
2/*
3 Written/copyright 1997-2001 by Donald Becker.
4
5 This software may be used and distributed according to the terms of
6 the GNU General Public License (GPL), incorporated herein by reference.
7 Drivers based on or derived from this code fall under the GPL and must
8 retain the authorship, copyright and license notice. This file is not
9 a complete program and may only be used when the entire operating
10 system is licensed under the GPL.
11
12 This driver is for the SMC83c170/175 "EPIC" series, as used on the
13 SMC EtherPower II 9432 PCI adapter, and several CardBus cards.
14
15 The author may be reached as becker@scyld.com, or C/O
16 Scyld Computing Corporation
17 410 Severn Ave., Suite 210
18 Annapolis MD 21403
19
20 Information and updates available at
21 http://www.scyld.com/network/epic100.html
36e1e847 22 [this link no longer provides anything useful -jgarzik]
1da177e4
LT
23
24 ---------------------------------------------------------------------
f3b197ac 25
1da177e4
LT
26*/
27
28#define DRV_NAME "epic100"
d5b20697
AG
29#define DRV_VERSION "2.1"
30#define DRV_RELDATE "Sept 11, 2006"
1da177e4
LT
31
32/* The user-configurable values.
33 These may be modified when a driver module is loaded.*/
34
35static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
36
37/* Used to pass the full-duplex flag, etc. */
38#define MAX_UNITS 8 /* More are supported, limit only on options */
39static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
40static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
41
42/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
43 Setting to > 1518 effectively disables this feature. */
44static int rx_copybreak;
45
46/* Operational parameters that are set at compile time. */
47
48/* Keep the ring sizes a power of two for operational efficiency.
49 The compiler will convert <unsigned>'%'<2^N> into a bit mask.
50 Making the Tx ring too large decreases the effectiveness of channel
51 bonding and packet priority.
52 There are no ill effects from too-large receive rings. */
53#define TX_RING_SIZE 256
54#define TX_QUEUE_LEN 240 /* Limit ring entries actually used. */
55#define RX_RING_SIZE 256
56#define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct epic_tx_desc)
57#define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct epic_rx_desc)
58
59/* Operational parameters that usually are not changed. */
60/* Time in jiffies before concluding the transmitter is hung. */
61#define TX_TIMEOUT (2*HZ)
62
63#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
64
65/* Bytes transferred to chip before transmission starts. */
66/* Initial threshold, increased on underflow, rounded down to 4 byte units. */
67#define TX_FIFO_THRESH 256
68#define RX_FIFO_THRESH 1 /* 0-3, 0==32, 64,96, or 3==128 bytes */
69
1da177e4
LT
70#include <linux/module.h>
71#include <linux/kernel.h>
72#include <linux/string.h>
73#include <linux/timer.h>
74#include <linux/errno.h>
75#include <linux/ioport.h>
1da177e4
LT
76#include <linux/interrupt.h>
77#include <linux/pci.h>
78#include <linux/delay.h>
79#include <linux/netdevice.h>
80#include <linux/etherdevice.h>
81#include <linux/skbuff.h>
82#include <linux/init.h>
83#include <linux/spinlock.h>
84#include <linux/ethtool.h>
85#include <linux/mii.h>
86#include <linux/crc32.h>
87#include <linux/bitops.h>
88#include <asm/io.h>
89#include <asm/uaccess.h>
9e2d11b9 90#include <asm/byteorder.h>
1da177e4
LT
91
92/* These identify the driver base version and may not be removed. */
93static char version[] __devinitdata =
94DRV_NAME ".c:v1.11 1/7/2001 Written by Donald Becker <becker@scyld.com>\n";
95static char version2[] __devinitdata =
1da177e4
LT
96" (unofficial 2.4.x kernel port, version " DRV_VERSION ", " DRV_RELDATE ")\n";
97
98MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
99MODULE_DESCRIPTION("SMC 83c170 EPIC series Ethernet driver");
100MODULE_LICENSE("GPL");
101
102module_param(debug, int, 0);
103module_param(rx_copybreak, int, 0);
104module_param_array(options, int, NULL, 0);
105module_param_array(full_duplex, int, NULL, 0);
106MODULE_PARM_DESC(debug, "EPIC/100 debug level (0-5)");
107MODULE_PARM_DESC(options, "EPIC/100: Bits 0-3: media type, bit 4: full duplex");
108MODULE_PARM_DESC(rx_copybreak, "EPIC/100 copy breakpoint for copy-only-tiny-frames");
109MODULE_PARM_DESC(full_duplex, "EPIC/100 full duplex setting(s) (1)");
110
111/*
112 Theory of Operation
113
114I. Board Compatibility
115
116This device driver is designed for the SMC "EPIC/100", the SMC
117single-chip Ethernet controllers for PCI. This chip is used on
118the SMC EtherPower II boards.
119
120II. Board-specific settings
121
122PCI bus devices are configured by the system at boot time, so no jumpers
123need to be set on the board. The system BIOS will assign the
124PCI INTA signal to a (preferably otherwise unused) system IRQ line.
125Note: Kernel versions earlier than 1.3.73 do not support shared PCI
126interrupt lines.
127
128III. Driver operation
129
130IIIa. Ring buffers
131
132IVb. References
133
631dd1a8
JM
134http://www.smsc.com/media/Downloads_Public/discontinued/83c171.pdf
135http://www.smsc.com/media/Downloads_Public/discontinued/83c175.pdf
1da177e4
LT
136http://scyld.com/expert/NWay.html
137http://www.national.com/pf/DP/DP83840A.html
138
139IVc. Errata
140
141*/
142
143
1da177e4
LT
144enum chip_capability_flags { MII_PWRDWN=1, TYPE2_INTR=2, NO_MII=4 };
145
146#define EPIC_TOTAL_SIZE 0x100
147#define USE_IO_OPS 1
1da177e4 148
aae9bc30
FR
149#ifdef USE_IO_OPS
150#define EPIC_BAR 0
151#else
152#define EPIC_BAR 1
153#endif
154
1da177e4
LT
155typedef enum {
156 SMSC_83C170_0,
157 SMSC_83C170,
158 SMSC_83C175,
159} chip_t;
160
161
162struct epic_chip_info {
163 const char *name;
1da177e4
LT
164 int drv_flags; /* Driver use, intended as capability flags. */
165};
166
167
168/* indexed by chip_t */
f71e1309 169static const struct epic_chip_info pci_id_tbl[] = {
36e1e847
JG
170 { "SMSC EPIC/100 83c170", TYPE2_INTR | NO_MII | MII_PWRDWN },
171 { "SMSC EPIC/100 83c170", TYPE2_INTR },
172 { "SMSC EPIC/C 83c175", TYPE2_INTR | MII_PWRDWN },
1da177e4
LT
173};
174
175
a3aa1884 176static DEFINE_PCI_DEVICE_TABLE(epic_pci_tbl) = {
1da177e4
LT
177 { 0x10B8, 0x0005, 0x1092, 0x0AB4, 0, 0, SMSC_83C170_0 },
178 { 0x10B8, 0x0005, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SMSC_83C170 },
179 { 0x10B8, 0x0006, PCI_ANY_ID, PCI_ANY_ID,
180 PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, SMSC_83C175 },
181 { 0,}
182};
183MODULE_DEVICE_TABLE (pci, epic_pci_tbl);
184
aae9bc30
FR
185#define ew16(reg, val) iowrite16(val, ioaddr + (reg))
186#define ew32(reg, val) iowrite32(val, ioaddr + (reg))
187#define er8(reg) ioread8(ioaddr + (reg))
188#define er16(reg) ioread16(ioaddr + (reg))
189#define er32(reg) ioread32(ioaddr + (reg))
1da177e4
LT
190
191/* Offsets to registers, using the (ugh) SMC names. */
192enum epic_registers {
193 COMMAND=0, INTSTAT=4, INTMASK=8, GENCTL=0x0C, NVCTL=0x10, EECTL=0x14,
194 PCIBurstCnt=0x18,
195 TEST1=0x1C, CRCCNT=0x20, ALICNT=0x24, MPCNT=0x28, /* Rx error counters. */
196 MIICtrl=0x30, MIIData=0x34, MIICfg=0x38,
197 LAN0=64, /* MAC address. */
198 MC0=80, /* Multicast filter table. */
199 RxCtrl=96, TxCtrl=112, TxSTAT=0x74,
200 PRxCDAR=0x84, RxSTAT=0xA4, EarlyRx=0xB0, PTxCDAR=0xC4, TxThresh=0xDC,
201};
202
203/* Interrupt register bits, using my own meaningful names. */
204enum IntrStatus {
205 TxIdle=0x40000, RxIdle=0x20000, IntrSummary=0x010000,
206 PCIBusErr170=0x7000, PCIBusErr175=0x1000, PhyEvent175=0x8000,
207 RxStarted=0x0800, RxEarlyWarn=0x0400, CntFull=0x0200, TxUnderrun=0x0100,
208 TxEmpty=0x0080, TxDone=0x0020, RxError=0x0010,
209 RxOverflow=0x0008, RxFull=0x0004, RxHeader=0x0002, RxDone=0x0001,
210};
211enum CommandBits {
212 StopRx=1, StartRx=2, TxQueued=4, RxQueued=8,
213 StopTxDMA=0x20, StopRxDMA=0x40, RestartTx=0x80,
214};
215
216#define EpicRemoved 0xffffffff /* Chip failed or removed (CardBus) */
217
218#define EpicNapiEvent (TxEmpty | TxDone | \
219 RxDone | RxStarted | RxEarlyWarn | RxOverflow | RxFull)
220#define EpicNormalEvent (0x0000ffff & ~EpicNapiEvent)
221
f71e1309 222static const u16 media2miictl[16] = {
1da177e4
LT
223 0, 0x0C00, 0x0C00, 0x2000, 0x0100, 0x2100, 0, 0,
224 0, 0, 0, 0, 0, 0, 0, 0 };
225
9ebfd492
AV
226/*
227 * The EPIC100 Rx and Tx buffer descriptors. Note that these
228 * really ARE host-endian; it's not a misannotation. We tell
229 * the card to byteswap them internally on big-endian hosts -
9e2d11b9 230 * look for #ifdef __BIG_ENDIAN in epic_open().
9ebfd492 231 */
1da177e4
LT
232
233struct epic_tx_desc {
234 u32 txstatus;
235 u32 bufaddr;
236 u32 buflength;
237 u32 next;
238};
239
240struct epic_rx_desc {
241 u32 rxstatus;
242 u32 bufaddr;
243 u32 buflength;
244 u32 next;
245};
246
247enum desc_status_bits {
248 DescOwn=0x8000,
249};
250
251#define PRIV_ALIGN 15 /* Required alignment mask */
252struct epic_private {
253 struct epic_rx_desc *rx_ring;
254 struct epic_tx_desc *tx_ring;
255 /* The saved address of a sent-in-place packet/buffer, for skfree(). */
256 struct sk_buff* tx_skbuff[TX_RING_SIZE];
257 /* The addresses of receive-in-place skbuffs. */
258 struct sk_buff* rx_skbuff[RX_RING_SIZE];
259
260 dma_addr_t tx_ring_dma;
261 dma_addr_t rx_ring_dma;
262
263 /* Ring pointers. */
264 spinlock_t lock; /* Group with Tx control cache line. */
265 spinlock_t napi_lock;
bea3348e 266 struct napi_struct napi;
1da177e4
LT
267 unsigned int reschedule_in_poll;
268 unsigned int cur_tx, dirty_tx;
269
270 unsigned int cur_rx, dirty_rx;
271 u32 irq_mask;
272 unsigned int rx_buf_sz; /* Based on MTU+slack. */
273
aae9bc30 274 void __iomem *ioaddr;
1da177e4
LT
275 struct pci_dev *pci_dev; /* PCI bus location. */
276 int chip_id, chip_flags;
277
1da177e4
LT
278 struct timer_list timer; /* Media selection timer. */
279 int tx_threshold;
280 unsigned char mc_filter[8];
281 signed char phys[4]; /* MII device addresses. */
282 u16 advertising; /* NWay media advertisement */
283 int mii_phy_cnt;
284 struct mii_if_info mii;
285 unsigned int tx_full:1; /* The Tx queue is full. */
286 unsigned int default_port:4; /* Last dev->if_port value. */
287};
288
289static int epic_open(struct net_device *dev);
aae9bc30 290static int read_eeprom(struct epic_private *, int);
1da177e4
LT
291static int mdio_read(struct net_device *dev, int phy_id, int location);
292static void mdio_write(struct net_device *dev, int phy_id, int loc, int val);
293static void epic_restart(struct net_device *dev);
294static void epic_timer(unsigned long data);
295static void epic_tx_timeout(struct net_device *dev);
296static void epic_init_ring(struct net_device *dev);
61357325
SH
297static netdev_tx_t epic_start_xmit(struct sk_buff *skb,
298 struct net_device *dev);
1da177e4 299static int epic_rx(struct net_device *dev, int budget);
bea3348e 300static int epic_poll(struct napi_struct *napi, int budget);
7d12e780 301static irqreturn_t epic_interrupt(int irq, void *dev_instance);
1da177e4 302static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
7282d491 303static const struct ethtool_ops netdev_ethtool_ops;
1da177e4
LT
304static int epic_close(struct net_device *dev);
305static struct net_device_stats *epic_get_stats(struct net_device *dev);
306static void set_rx_mode(struct net_device *dev);
307
805524cb
SH
308static const struct net_device_ops epic_netdev_ops = {
309 .ndo_open = epic_open,
310 .ndo_stop = epic_close,
311 .ndo_start_xmit = epic_start_xmit,
312 .ndo_tx_timeout = epic_tx_timeout,
313 .ndo_get_stats = epic_get_stats,
afc4b13d 314 .ndo_set_rx_mode = set_rx_mode,
805524cb
SH
315 .ndo_do_ioctl = netdev_ioctl,
316 .ndo_change_mtu = eth_change_mtu,
317 .ndo_set_mac_address = eth_mac_addr,
318 .ndo_validate_addr = eth_validate_addr,
319};
1da177e4 320
aae9bc30
FR
321static int __devinit epic_init_one(struct pci_dev *pdev,
322 const struct pci_device_id *ent)
1da177e4
LT
323{
324 static int card_idx = -1;
aae9bc30 325 void __iomem *ioaddr;
1da177e4
LT
326 int chip_idx = (int) ent->driver_data;
327 int irq;
328 struct net_device *dev;
329 struct epic_private *ep;
330 int i, ret, option = 0, duplex = 0;
331 void *ring_space;
332 dma_addr_t ring_dma;
333
334/* when built into the kernel, we only print version if device is found */
335#ifndef MODULE
336 static int printed_version;
337 if (!printed_version++)
ad361c98 338 printk(KERN_INFO "%s%s", version, version2);
1da177e4 339#endif
f3b197ac 340
1da177e4 341 card_idx++;
f3b197ac 342
1da177e4
LT
343 ret = pci_enable_device(pdev);
344 if (ret)
345 goto out;
346 irq = pdev->irq;
347
36e1e847 348 if (pci_resource_len(pdev, 0) < EPIC_TOTAL_SIZE) {
9b91cf9d 349 dev_err(&pdev->dev, "no PCI region space\n");
1da177e4
LT
350 ret = -ENODEV;
351 goto err_out_disable;
352 }
f3b197ac 353
1da177e4
LT
354 pci_set_master(pdev);
355
356 ret = pci_request_regions(pdev, DRV_NAME);
357 if (ret < 0)
358 goto err_out_disable;
359
360 ret = -ENOMEM;
361
362 dev = alloc_etherdev(sizeof (*ep));
41de8d4c 363 if (!dev)
1da177e4 364 goto err_out_free_res;
41de8d4c 365
1da177e4
LT
366 SET_NETDEV_DEV(dev, &pdev->dev);
367
aae9bc30 368 ioaddr = pci_iomap(pdev, EPIC_BAR, 0);
1da177e4 369 if (!ioaddr) {
9b91cf9d 370 dev_err(&pdev->dev, "ioremap failed\n");
1da177e4
LT
371 goto err_out_free_netdev;
372 }
1da177e4
LT
373
374 pci_set_drvdata(pdev, dev);
4cf1653a 375 ep = netdev_priv(dev);
aae9bc30 376 ep->ioaddr = ioaddr;
1da177e4
LT
377 ep->mii.dev = dev;
378 ep->mii.mdio_read = mdio_read;
379 ep->mii.mdio_write = mdio_write;
380 ep->mii.phy_id_mask = 0x1f;
381 ep->mii.reg_num_mask = 0x1f;
382
383 ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma);
384 if (!ring_space)
385 goto err_out_iounmap;
43d620c8 386 ep->tx_ring = ring_space;
1da177e4
LT
387 ep->tx_ring_dma = ring_dma;
388
389 ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma);
390 if (!ring_space)
391 goto err_out_unmap_tx;
43d620c8 392 ep->rx_ring = ring_space;
1da177e4
LT
393 ep->rx_ring_dma = ring_dma;
394
395 if (dev->mem_start) {
396 option = dev->mem_start;
397 duplex = (dev->mem_start & 16) ? 1 : 0;
398 } else if (card_idx >= 0 && card_idx < MAX_UNITS) {
399 if (options[card_idx] >= 0)
400 option = options[card_idx];
401 if (full_duplex[card_idx] >= 0)
402 duplex = full_duplex[card_idx];
403 }
404
1da177e4
LT
405 spin_lock_init(&ep->lock);
406 spin_lock_init(&ep->napi_lock);
407 ep->reschedule_in_poll = 0;
408
409 /* Bring the chip out of low-power mode. */
aae9bc30 410 ew32(GENCTL, 0x4200);
1da177e4
LT
411 /* Magic?! If we don't set this bit the MII interface won't work. */
412 /* This magic is documented in SMSC app note 7.15 */
413 for (i = 16; i > 0; i--)
aae9bc30 414 ew32(TEST1, 0x0008);
1da177e4
LT
415
416 /* Turn on the MII transceiver. */
aae9bc30 417 ew32(MIICfg, 0x12);
1da177e4 418 if (chip_idx == 1)
aae9bc30
FR
419 ew32(NVCTL, (er32(NVCTL) & ~0x003c) | 0x4800);
420 ew32(GENCTL, 0x0200);
1da177e4
LT
421
422 /* Note: the '175 does not have a serial EEPROM. */
423 for (i = 0; i < 3; i++)
aae9bc30 424 ((__le16 *)dev->dev_addr)[i] = cpu_to_le16(er16(LAN0 + i*4));
1da177e4
LT
425
426 if (debug > 2) {
2e8a538d 427 dev_printk(KERN_DEBUG, &pdev->dev, "EEPROM contents:\n");
1da177e4 428 for (i = 0; i < 64; i++)
aae9bc30 429 printk(" %4.4x%s", read_eeprom(ep, i),
1da177e4
LT
430 i % 16 == 15 ? "\n" : "");
431 }
432
433 ep->pci_dev = pdev;
434 ep->chip_id = chip_idx;
435 ep->chip_flags = pci_id_tbl[chip_idx].drv_flags;
f3b197ac 436 ep->irq_mask =
1da177e4
LT
437 (ep->chip_flags & TYPE2_INTR ? PCIBusErr175 : PCIBusErr170)
438 | CntFull | TxUnderrun | EpicNapiEvent;
439
440 /* Find the connected MII xcvrs.
441 Doing this in open() would allow detecting external xcvrs later, but
442 takes much time and no cards have external MII. */
443 {
444 int phy, phy_idx = 0;
445 for (phy = 1; phy < 32 && phy_idx < sizeof(ep->phys); phy++) {
446 int mii_status = mdio_read(dev, phy, MII_BMSR);
447 if (mii_status != 0xffff && mii_status != 0x0000) {
448 ep->phys[phy_idx++] = phy;
9b91cf9d 449 dev_info(&pdev->dev,
2e8a538d
JG
450 "MII transceiver #%d control "
451 "%4.4x status %4.4x.\n",
452 phy, mdio_read(dev, phy, 0), mii_status);
1da177e4
LT
453 }
454 }
455 ep->mii_phy_cnt = phy_idx;
456 if (phy_idx != 0) {
457 phy = ep->phys[0];
458 ep->mii.advertising = mdio_read(dev, phy, MII_ADVERTISE);
9b91cf9d 459 dev_info(&pdev->dev,
2e8a538d 460 "Autonegotiation advertising %4.4x link "
1da177e4 461 "partner %4.4x.\n",
2e8a538d 462 ep->mii.advertising, mdio_read(dev, phy, 5));
1da177e4 463 } else if ( ! (ep->chip_flags & NO_MII)) {
9b91cf9d 464 dev_warn(&pdev->dev,
2e8a538d 465 "***WARNING***: No MII transceiver found!\n");
1da177e4
LT
466 /* Use the known PHY address of the EPII. */
467 ep->phys[0] = 3;
468 }
469 ep->mii.phy_id = ep->phys[0];
470 }
471
472 /* Turn off the MII xcvr (175 only!), leave the chip in low-power mode. */
473 if (ep->chip_flags & MII_PWRDWN)
aae9bc30
FR
474 ew32(NVCTL, er32(NVCTL) & ~0x483c);
475 ew32(GENCTL, 0x0008);
1da177e4
LT
476
477 /* The lower four bits are the media type. */
478 if (duplex) {
479 ep->mii.force_media = ep->mii.full_duplex = 1;
9b91cf9d 480 dev_info(&pdev->dev, "Forced full duplex requested.\n");
1da177e4
LT
481 }
482 dev->if_port = ep->default_port = option;
483
484 /* The Epic-specific entries in the device structure. */
805524cb 485 dev->netdev_ops = &epic_netdev_ops;
1da177e4
LT
486 dev->ethtool_ops = &netdev_ethtool_ops;
487 dev->watchdog_timeo = TX_TIMEOUT;
bea3348e 488 netif_napi_add(dev, &ep->napi, epic_poll, 64);
1da177e4
LT
489
490 ret = register_netdev(dev);
491 if (ret < 0)
492 goto err_out_unmap_rx;
493
aae9bc30
FR
494 printk(KERN_INFO "%s: %s at %lx, IRQ %d, %pM\n",
495 dev->name, pci_id_tbl[chip_idx].name,
496 (long)pci_resource_start(pdev, EPIC_BAR), pdev->irq,
e174961c 497 dev->dev_addr);
1da177e4
LT
498
499out:
500 return ret;
501
502err_out_unmap_rx:
503 pci_free_consistent(pdev, RX_TOTAL_SIZE, ep->rx_ring, ep->rx_ring_dma);
504err_out_unmap_tx:
505 pci_free_consistent(pdev, TX_TOTAL_SIZE, ep->tx_ring, ep->tx_ring_dma);
506err_out_iounmap:
aae9bc30 507 pci_iounmap(pdev, ioaddr);
1da177e4 508err_out_free_netdev:
1da177e4
LT
509 free_netdev(dev);
510err_out_free_res:
511 pci_release_regions(pdev);
512err_out_disable:
513 pci_disable_device(pdev);
514 goto out;
515}
f3b197ac 516
1da177e4
LT
517/* Serial EEPROM section. */
518
519/* EEPROM_Ctrl bits. */
520#define EE_SHIFT_CLK 0x04 /* EEPROM shift clock. */
521#define EE_CS 0x02 /* EEPROM chip select. */
522#define EE_DATA_WRITE 0x08 /* EEPROM chip data in. */
523#define EE_WRITE_0 0x01
524#define EE_WRITE_1 0x09
525#define EE_DATA_READ 0x10 /* EEPROM chip data out. */
526#define EE_ENB (0x0001 | EE_CS)
527
528/* Delay between EEPROM clock transitions.
529 This serves to flush the operation to the PCI bus.
530 */
531
aae9bc30 532#define eeprom_delay() er32(EECTL)
1da177e4
LT
533
534/* The EEPROM commands include the alway-set leading bit. */
535#define EE_WRITE_CMD (5 << 6)
536#define EE_READ64_CMD (6 << 6)
537#define EE_READ256_CMD (6 << 8)
538#define EE_ERASE_CMD (7 << 6)
539
540static void epic_disable_int(struct net_device *dev, struct epic_private *ep)
541{
aae9bc30 542 void __iomem *ioaddr = ep->ioaddr;
1da177e4 543
aae9bc30 544 ew32(INTMASK, 0x00000000);
1da177e4
LT
545}
546
aae9bc30 547static inline void __epic_pci_commit(void __iomem *ioaddr)
1da177e4
LT
548{
549#ifndef USE_IO_OPS
aae9bc30 550 er32(INTMASK);
1da177e4
LT
551#endif
552}
553
554static inline void epic_napi_irq_off(struct net_device *dev,
555 struct epic_private *ep)
556{
aae9bc30 557 void __iomem *ioaddr = ep->ioaddr;
1da177e4 558
aae9bc30 559 ew32(INTMASK, ep->irq_mask & ~EpicNapiEvent);
1da177e4
LT
560 __epic_pci_commit(ioaddr);
561}
562
563static inline void epic_napi_irq_on(struct net_device *dev,
564 struct epic_private *ep)
565{
aae9bc30 566 void __iomem *ioaddr = ep->ioaddr;
1da177e4
LT
567
568 /* No need to commit possible posted write */
aae9bc30 569 ew32(INTMASK, ep->irq_mask | EpicNapiEvent);
1da177e4
LT
570}
571
aae9bc30 572static int __devinit read_eeprom(struct epic_private *ep, int location)
1da177e4 573{
aae9bc30 574 void __iomem *ioaddr = ep->ioaddr;
1da177e4
LT
575 int i;
576 int retval = 0;
1da177e4 577 int read_cmd = location |
aae9bc30 578 (er32(EECTL) & 0x40 ? EE_READ64_CMD : EE_READ256_CMD);
1da177e4 579
aae9bc30
FR
580 ew32(EECTL, EE_ENB & ~EE_CS);
581 ew32(EECTL, EE_ENB);
1da177e4
LT
582
583 /* Shift the read command bits out. */
584 for (i = 12; i >= 0; i--) {
585 short dataval = (read_cmd & (1 << i)) ? EE_WRITE_1 : EE_WRITE_0;
aae9bc30 586 ew32(EECTL, EE_ENB | dataval);
1da177e4 587 eeprom_delay();
aae9bc30 588 ew32(EECTL, EE_ENB | dataval | EE_SHIFT_CLK);
1da177e4
LT
589 eeprom_delay();
590 }
aae9bc30 591 ew32(EECTL, EE_ENB);
1da177e4
LT
592
593 for (i = 16; i > 0; i--) {
aae9bc30 594 ew32(EECTL, EE_ENB | EE_SHIFT_CLK);
1da177e4 595 eeprom_delay();
aae9bc30
FR
596 retval = (retval << 1) | ((er32(EECTL) & EE_DATA_READ) ? 1 : 0);
597 ew32(EECTL, EE_ENB);
1da177e4
LT
598 eeprom_delay();
599 }
600
601 /* Terminate the EEPROM access. */
aae9bc30 602 ew32(EECTL, EE_ENB & ~EE_CS);
1da177e4
LT
603 return retval;
604}
605
606#define MII_READOP 1
607#define MII_WRITEOP 2
608static int mdio_read(struct net_device *dev, int phy_id, int location)
609{
aae9bc30
FR
610 struct epic_private *ep = netdev_priv(dev);
611 void __iomem *ioaddr = ep->ioaddr;
1da177e4
LT
612 int read_cmd = (phy_id << 9) | (location << 4) | MII_READOP;
613 int i;
614
aae9bc30 615 ew32(MIICtrl, read_cmd);
1da177e4
LT
616 /* Typical operation takes 25 loops. */
617 for (i = 400; i > 0; i--) {
618 barrier();
aae9bc30 619 if ((er32(MIICtrl) & MII_READOP) == 0) {
1da177e4 620 /* Work around read failure bug. */
8e95a202 621 if (phy_id == 1 && location < 6 &&
aae9bc30
FR
622 er16(MIIData) == 0xffff) {
623 ew32(MIICtrl, read_cmd);
1da177e4
LT
624 continue;
625 }
aae9bc30 626 return er16(MIIData);
1da177e4
LT
627 }
628 }
629 return 0xffff;
630}
631
632static void mdio_write(struct net_device *dev, int phy_id, int loc, int value)
633{
aae9bc30
FR
634 struct epic_private *ep = netdev_priv(dev);
635 void __iomem *ioaddr = ep->ioaddr;
1da177e4
LT
636 int i;
637
aae9bc30
FR
638 ew16(MIIData, value);
639 ew32(MIICtrl, (phy_id << 9) | (loc << 4) | MII_WRITEOP);
f3b197ac 640 for (i = 10000; i > 0; i--) {
1da177e4 641 barrier();
aae9bc30 642 if ((er32(MIICtrl) & MII_WRITEOP) == 0)
1da177e4
LT
643 break;
644 }
1da177e4
LT
645}
646
f3b197ac 647
1da177e4
LT
648static int epic_open(struct net_device *dev)
649{
4cf1653a 650 struct epic_private *ep = netdev_priv(dev);
aae9bc30
FR
651 void __iomem *ioaddr = ep->ioaddr;
652 const int irq = ep->pci_dev->irq;
653 int rc, i;
1da177e4
LT
654
655 /* Soft reset the chip. */
aae9bc30 656 ew32(GENCTL, 0x4001);
1da177e4 657
bea3348e 658 napi_enable(&ep->napi);
aae9bc30
FR
659 rc = request_irq(irq, epic_interrupt, IRQF_SHARED, dev->name, dev);
660 if (rc) {
bea3348e 661 napi_disable(&ep->napi);
aae9bc30 662 return rc;
bea3348e 663 }
1da177e4
LT
664
665 epic_init_ring(dev);
666
aae9bc30 667 ew32(GENCTL, 0x4000);
1da177e4
LT
668 /* This magic is documented in SMSC app note 7.15 */
669 for (i = 16; i > 0; i--)
aae9bc30 670 ew32(TEST1, 0x0008);
1da177e4
LT
671
672 /* Pull the chip out of low-power mode, enable interrupts, and set for
673 PCI read multiple. The MIIcfg setting and strange write order are
674 required by the details of which bits are reset and the transceiver
675 wiring on the Ositech CardBus card.
676 */
677#if 0
aae9bc30 678 ew32(MIICfg, dev->if_port == 1 ? 0x13 : 0x12);
1da177e4
LT
679#endif
680 if (ep->chip_flags & MII_PWRDWN)
aae9bc30 681 ew32(NVCTL, (er32(NVCTL) & ~0x003c) | 0x4800);
1da177e4 682
9ebfd492 683 /* Tell the chip to byteswap descriptors on big-endian hosts */
9e2d11b9 684#ifdef __BIG_ENDIAN
aae9bc30
FR
685 ew32(GENCTL, 0x4432 | (RX_FIFO_THRESH << 8));
686 er32(GENCTL);
687 ew32(GENCTL, 0x0432 | (RX_FIFO_THRESH << 8));
1da177e4 688#else
aae9bc30
FR
689 ew32(GENCTL, 0x4412 | (RX_FIFO_THRESH << 8));
690 er32(GENCTL);
691 ew32(GENCTL, 0x0412 | (RX_FIFO_THRESH << 8));
1da177e4
LT
692#endif
693
694 udelay(20); /* Looks like EPII needs that if you want reliable RX init. FIXME: pci posting bug? */
f3b197ac 695
1da177e4 696 for (i = 0; i < 3; i++)
aae9bc30 697 ew32(LAN0 + i*4, le16_to_cpu(((__le16*)dev->dev_addr)[i]));
1da177e4
LT
698
699 ep->tx_threshold = TX_FIFO_THRESH;
aae9bc30 700 ew32(TxThresh, ep->tx_threshold);
1da177e4
LT
701
702 if (media2miictl[dev->if_port & 15]) {
703 if (ep->mii_phy_cnt)
704 mdio_write(dev, ep->phys[0], MII_BMCR, media2miictl[dev->if_port&15]);
705 if (dev->if_port == 1) {
706 if (debug > 1)
707 printk(KERN_INFO "%s: Using the 10base2 transceiver, MII "
708 "status %4.4x.\n",
709 dev->name, mdio_read(dev, ep->phys[0], MII_BMSR));
710 }
711 } else {
712 int mii_lpa = mdio_read(dev, ep->phys[0], MII_LPA);
713 if (mii_lpa != 0xffff) {
714 if ((mii_lpa & LPA_100FULL) || (mii_lpa & 0x01C0) == LPA_10FULL)
715 ep->mii.full_duplex = 1;
716 else if (! (mii_lpa & LPA_LPACK))
717 mdio_write(dev, ep->phys[0], MII_BMCR, BMCR_ANENABLE|BMCR_ANRESTART);
718 if (debug > 1)
719 printk(KERN_INFO "%s: Setting %s-duplex based on MII xcvr %d"
720 " register read of %4.4x.\n", dev->name,
721 ep->mii.full_duplex ? "full" : "half",
722 ep->phys[0], mii_lpa);
723 }
724 }
725
aae9bc30
FR
726 ew32(TxCtrl, ep->mii.full_duplex ? 0x7f : 0x79);
727 ew32(PRxCDAR, ep->rx_ring_dma);
728 ew32(PTxCDAR, ep->tx_ring_dma);
1da177e4
LT
729
730 /* Start the chip's Rx process. */
731 set_rx_mode(dev);
aae9bc30 732 ew32(COMMAND, StartRx | RxQueued);
1da177e4
LT
733
734 netif_start_queue(dev);
735
736 /* Enable interrupts by setting the interrupt mask. */
aae9bc30
FR
737 ew32(INTMASK, RxError | RxHeader | EpicNapiEvent | CntFull |
738 ((ep->chip_flags & TYPE2_INTR) ? PCIBusErr175 : PCIBusErr170) |
739 TxUnderrun);
740
741 if (debug > 1) {
742 printk(KERN_DEBUG "%s: epic_open() ioaddr %p IRQ %d "
743 "status %4.4x %s-duplex.\n",
744 dev->name, ioaddr, irq, er32(GENCTL),
745 ep->mii.full_duplex ? "full" : "half");
746 }
1da177e4
LT
747
748 /* Set the timer to switch to check for link beat and perhaps switch
749 to an alternate media type. */
750 init_timer(&ep->timer);
751 ep->timer.expires = jiffies + 3*HZ;
752 ep->timer.data = (unsigned long)dev;
c061b18d 753 ep->timer.function = epic_timer; /* timer handler */
1da177e4
LT
754 add_timer(&ep->timer);
755
aae9bc30 756 return rc;
1da177e4
LT
757}
758
759/* Reset the chip to recover from a PCI transaction error.
760 This may occur at interrupt time. */
761static void epic_pause(struct net_device *dev)
762{
aae9bc30
FR
763 struct net_device_stats *stats = &dev->stats;
764 struct epic_private *ep = netdev_priv(dev);
765 void __iomem *ioaddr = ep->ioaddr;
1da177e4
LT
766
767 netif_stop_queue (dev);
f3b197ac 768
1da177e4 769 /* Disable interrupts by clearing the interrupt mask. */
aae9bc30 770 ew32(INTMASK, 0x00000000);
1da177e4 771 /* Stop the chip's Tx and Rx DMA processes. */
aae9bc30 772 ew16(COMMAND, StopRx | StopTxDMA | StopRxDMA);
1da177e4
LT
773
774 /* Update the error counts. */
aae9bc30
FR
775 if (er16(COMMAND) != 0xffff) {
776 stats->rx_missed_errors += er8(MPCNT);
777 stats->rx_frame_errors += er8(ALICNT);
778 stats->rx_crc_errors += er8(CRCCNT);
1da177e4
LT
779 }
780
781 /* Remove the packets on the Rx queue. */
782 epic_rx(dev, RX_RING_SIZE);
783}
784
785static void epic_restart(struct net_device *dev)
786{
4cf1653a 787 struct epic_private *ep = netdev_priv(dev);
aae9bc30 788 void __iomem *ioaddr = ep->ioaddr;
1da177e4
LT
789 int i;
790
791 /* Soft reset the chip. */
aae9bc30 792 ew32(GENCTL, 0x4001);
1da177e4
LT
793
794 printk(KERN_DEBUG "%s: Restarting the EPIC chip, Rx %d/%d Tx %d/%d.\n",
795 dev->name, ep->cur_rx, ep->dirty_rx, ep->dirty_tx, ep->cur_tx);
796 udelay(1);
797
798 /* This magic is documented in SMSC app note 7.15 */
799 for (i = 16; i > 0; i--)
aae9bc30 800 ew32(TEST1, 0x0008);
1da177e4 801
9e2d11b9 802#ifdef __BIG_ENDIAN
aae9bc30 803 ew32(GENCTL, 0x0432 | (RX_FIFO_THRESH << 8));
1da177e4 804#else
aae9bc30 805 ew32(GENCTL, 0x0412 | (RX_FIFO_THRESH << 8));
1da177e4 806#endif
aae9bc30 807 ew32(MIICfg, dev->if_port == 1 ? 0x13 : 0x12);
1da177e4 808 if (ep->chip_flags & MII_PWRDWN)
aae9bc30 809 ew32(NVCTL, (er32(NVCTL) & ~0x003c) | 0x4800);
1da177e4
LT
810
811 for (i = 0; i < 3; i++)
aae9bc30 812 ew32(LAN0 + i*4, le16_to_cpu(((__le16*)dev->dev_addr)[i]));
1da177e4
LT
813
814 ep->tx_threshold = TX_FIFO_THRESH;
aae9bc30
FR
815 ew32(TxThresh, ep->tx_threshold);
816 ew32(TxCtrl, ep->mii.full_duplex ? 0x7f : 0x79);
817 ew32(PRxCDAR, ep->rx_ring_dma +
818 (ep->cur_rx % RX_RING_SIZE) * sizeof(struct epic_rx_desc));
819 ew32(PTxCDAR, ep->tx_ring_dma +
820 (ep->dirty_tx % TX_RING_SIZE) * sizeof(struct epic_tx_desc));
1da177e4
LT
821
822 /* Start the chip's Rx process. */
823 set_rx_mode(dev);
aae9bc30 824 ew32(COMMAND, StartRx | RxQueued);
1da177e4
LT
825
826 /* Enable interrupts by setting the interrupt mask. */
aae9bc30
FR
827 ew32(INTMASK, RxError | RxHeader | EpicNapiEvent | CntFull |
828 ((ep->chip_flags & TYPE2_INTR) ? PCIBusErr175 : PCIBusErr170) |
829 TxUnderrun);
1da177e4
LT
830
831 printk(KERN_DEBUG "%s: epic_restart() done, cmd status %4.4x, ctl %4.4x"
832 " interrupt %4.4x.\n",
aae9bc30 833 dev->name, er32(COMMAND), er32(GENCTL), er32(INTSTAT));
1da177e4
LT
834}
835
836static void check_media(struct net_device *dev)
837{
4cf1653a 838 struct epic_private *ep = netdev_priv(dev);
aae9bc30 839 void __iomem *ioaddr = ep->ioaddr;
1da177e4
LT
840 int mii_lpa = ep->mii_phy_cnt ? mdio_read(dev, ep->phys[0], MII_LPA) : 0;
841 int negotiated = mii_lpa & ep->mii.advertising;
842 int duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
843
844 if (ep->mii.force_media)
845 return;
846 if (mii_lpa == 0xffff) /* Bogus read */
847 return;
848 if (ep->mii.full_duplex != duplex) {
849 ep->mii.full_duplex = duplex;
850 printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d link"
851 " partner capability of %4.4x.\n", dev->name,
852 ep->mii.full_duplex ? "full" : "half", ep->phys[0], mii_lpa);
aae9bc30 853 ew32(TxCtrl, ep->mii.full_duplex ? 0x7F : 0x79);
1da177e4
LT
854 }
855}
856
857static void epic_timer(unsigned long data)
858{
859 struct net_device *dev = (struct net_device *)data;
4cf1653a 860 struct epic_private *ep = netdev_priv(dev);
aae9bc30 861 void __iomem *ioaddr = ep->ioaddr;
1da177e4
LT
862 int next_tick = 5*HZ;
863
864 if (debug > 3) {
865 printk(KERN_DEBUG "%s: Media monitor tick, Tx status %8.8x.\n",
aae9bc30 866 dev->name, er32(TxSTAT));
1da177e4 867 printk(KERN_DEBUG "%s: Other registers are IntMask %4.4x "
aae9bc30
FR
868 "IntStatus %4.4x RxStatus %4.4x.\n", dev->name,
869 er32(INTMASK), er32(INTSTAT), er32(RxSTAT));
1da177e4
LT
870 }
871
872 check_media(dev);
873
874 ep->timer.expires = jiffies + next_tick;
875 add_timer(&ep->timer);
876}
877
878static void epic_tx_timeout(struct net_device *dev)
879{
4cf1653a 880 struct epic_private *ep = netdev_priv(dev);
aae9bc30 881 void __iomem *ioaddr = ep->ioaddr;
1da177e4
LT
882
883 if (debug > 0) {
884 printk(KERN_WARNING "%s: Transmit timeout using MII device, "
aae9bc30 885 "Tx status %4.4x.\n", dev->name, er16(TxSTAT));
1da177e4
LT
886 if (debug > 1) {
887 printk(KERN_DEBUG "%s: Tx indices: dirty_tx %d, cur_tx %d.\n",
888 dev->name, ep->dirty_tx, ep->cur_tx);
889 }
890 }
aae9bc30 891 if (er16(TxSTAT) & 0x10) { /* Tx FIFO underflow. */
275defc9 892 dev->stats.tx_fifo_errors++;
aae9bc30 893 ew32(COMMAND, RestartTx);
1da177e4
LT
894 } else {
895 epic_restart(dev);
aae9bc30 896 ew32(COMMAND, TxQueued);
1da177e4
LT
897 }
898
1ae5dc34 899 dev->trans_start = jiffies; /* prevent tx timeout */
275defc9 900 dev->stats.tx_errors++;
1da177e4
LT
901 if (!ep->tx_full)
902 netif_wake_queue(dev);
903}
904
905/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
906static void epic_init_ring(struct net_device *dev)
907{
4cf1653a 908 struct epic_private *ep = netdev_priv(dev);
1da177e4
LT
909 int i;
910
911 ep->tx_full = 0;
912 ep->dirty_tx = ep->cur_tx = 0;
913 ep->cur_rx = ep->dirty_rx = 0;
914 ep->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
915
916 /* Initialize all Rx descriptors. */
917 for (i = 0; i < RX_RING_SIZE; i++) {
918 ep->rx_ring[i].rxstatus = 0;
9ebfd492 919 ep->rx_ring[i].buflength = ep->rx_buf_sz;
f3b197ac 920 ep->rx_ring[i].next = ep->rx_ring_dma +
1da177e4
LT
921 (i+1)*sizeof(struct epic_rx_desc);
922 ep->rx_skbuff[i] = NULL;
923 }
924 /* Mark the last entry as wrapping the ring. */
925 ep->rx_ring[i-1].next = ep->rx_ring_dma;
926
927 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
928 for (i = 0; i < RX_RING_SIZE; i++) {
dae2e9f4 929 struct sk_buff *skb = netdev_alloc_skb(dev, ep->rx_buf_sz + 2);
1da177e4
LT
930 ep->rx_skbuff[i] = skb;
931 if (skb == NULL)
932 break;
1da177e4 933 skb_reserve(skb, 2); /* 16 byte align the IP header. */
f3b197ac 934 ep->rx_ring[i].bufaddr = pci_map_single(ep->pci_dev,
689be439 935 skb->data, ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
9ebfd492 936 ep->rx_ring[i].rxstatus = DescOwn;
1da177e4
LT
937 }
938 ep->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
939
940 /* The Tx buffer descriptor is filled in as needed, but we
941 do need to clear the ownership bit. */
942 for (i = 0; i < TX_RING_SIZE; i++) {
943 ep->tx_skbuff[i] = NULL;
944 ep->tx_ring[i].txstatus = 0x0000;
f3b197ac 945 ep->tx_ring[i].next = ep->tx_ring_dma +
1da177e4
LT
946 (i+1)*sizeof(struct epic_tx_desc);
947 }
948 ep->tx_ring[i-1].next = ep->tx_ring_dma;
1da177e4
LT
949}
950
61357325 951static netdev_tx_t epic_start_xmit(struct sk_buff *skb, struct net_device *dev)
1da177e4 952{
4cf1653a 953 struct epic_private *ep = netdev_priv(dev);
aae9bc30 954 void __iomem *ioaddr = ep->ioaddr;
1da177e4
LT
955 int entry, free_count;
956 u32 ctrl_word;
957 unsigned long flags;
f3b197ac 958
5b057c6b 959 if (skb_padto(skb, ETH_ZLEN))
6ed10654 960 return NETDEV_TX_OK;
1da177e4
LT
961
962 /* Caution: the write order is important here, set the field with the
963 "ownership" bit last. */
964
965 /* Calculate the next Tx descriptor entry. */
966 spin_lock_irqsave(&ep->lock, flags);
967 free_count = ep->cur_tx - ep->dirty_tx;
968 entry = ep->cur_tx % TX_RING_SIZE;
969
970 ep->tx_skbuff[entry] = skb;
f3b197ac 971 ep->tx_ring[entry].bufaddr = pci_map_single(ep->pci_dev, skb->data,
1da177e4
LT
972 skb->len, PCI_DMA_TODEVICE);
973 if (free_count < TX_QUEUE_LEN/2) {/* Typical path */
9ebfd492 974 ctrl_word = 0x100000; /* No interrupt */
1da177e4 975 } else if (free_count == TX_QUEUE_LEN/2) {
9ebfd492 976 ctrl_word = 0x140000; /* Tx-done intr. */
1da177e4 977 } else if (free_count < TX_QUEUE_LEN - 1) {
9ebfd492 978 ctrl_word = 0x100000; /* No Tx-done intr. */
1da177e4
LT
979 } else {
980 /* Leave room for an additional entry. */
9ebfd492 981 ctrl_word = 0x140000; /* Tx-done intr. */
1da177e4
LT
982 ep->tx_full = 1;
983 }
9ebfd492 984 ep->tx_ring[entry].buflength = ctrl_word | skb->len;
1da177e4
LT
985 ep->tx_ring[entry].txstatus =
986 ((skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN) << 16)
9ebfd492 987 | DescOwn;
1da177e4
LT
988
989 ep->cur_tx++;
990 if (ep->tx_full)
991 netif_stop_queue(dev);
992
993 spin_unlock_irqrestore(&ep->lock, flags);
994 /* Trigger an immediate transmit demand. */
aae9bc30 995 ew32(COMMAND, TxQueued);
1da177e4 996
1da177e4
LT
997 if (debug > 4)
998 printk(KERN_DEBUG "%s: Queued Tx packet size %d to slot %d, "
aae9bc30
FR
999 "flag %2.2x Tx status %8.8x.\n", dev->name, skb->len,
1000 entry, ctrl_word, er32(TxSTAT));
1da177e4 1001
6ed10654 1002 return NETDEV_TX_OK;
1da177e4
LT
1003}
1004
1005static void epic_tx_error(struct net_device *dev, struct epic_private *ep,
1006 int status)
1007{
275defc9 1008 struct net_device_stats *stats = &dev->stats;
1da177e4
LT
1009
1010#ifndef final_version
1011 /* There was an major error, log it. */
1012 if (debug > 1)
1013 printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
1014 dev->name, status);
1015#endif
1016 stats->tx_errors++;
1017 if (status & 0x1050)
1018 stats->tx_aborted_errors++;
1019 if (status & 0x0008)
1020 stats->tx_carrier_errors++;
1021 if (status & 0x0040)
1022 stats->tx_window_errors++;
1023 if (status & 0x0010)
1024 stats->tx_fifo_errors++;
1025}
1026
1027static void epic_tx(struct net_device *dev, struct epic_private *ep)
1028{
1029 unsigned int dirty_tx, cur_tx;
1030
1031 /*
1032 * Note: if this lock becomes a problem we can narrow the locked
1033 * region at the cost of occasionally grabbing the lock more times.
1034 */
1035 cur_tx = ep->cur_tx;
1036 for (dirty_tx = ep->dirty_tx; cur_tx - dirty_tx > 0; dirty_tx++) {
1037 struct sk_buff *skb;
1038 int entry = dirty_tx % TX_RING_SIZE;
9ebfd492 1039 int txstatus = ep->tx_ring[entry].txstatus;
1da177e4
LT
1040
1041 if (txstatus & DescOwn)
1042 break; /* It still hasn't been Txed */
1043
1044 if (likely(txstatus & 0x0001)) {
275defc9
KV
1045 dev->stats.collisions += (txstatus >> 8) & 15;
1046 dev->stats.tx_packets++;
1047 dev->stats.tx_bytes += ep->tx_skbuff[entry]->len;
1da177e4
LT
1048 } else
1049 epic_tx_error(dev, ep, txstatus);
1050
1051 /* Free the original skb. */
1052 skb = ep->tx_skbuff[entry];
f3b197ac 1053 pci_unmap_single(ep->pci_dev, ep->tx_ring[entry].bufaddr,
1da177e4
LT
1054 skb->len, PCI_DMA_TODEVICE);
1055 dev_kfree_skb_irq(skb);
1056 ep->tx_skbuff[entry] = NULL;
1057 }
1058
1059#ifndef final_version
1060 if (cur_tx - dirty_tx > TX_RING_SIZE) {
1061 printk(KERN_WARNING
1062 "%s: Out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
1063 dev->name, dirty_tx, cur_tx, ep->tx_full);
1064 dirty_tx += TX_RING_SIZE;
1065 }
1066#endif
1067 ep->dirty_tx = dirty_tx;
1068 if (ep->tx_full && cur_tx - dirty_tx < TX_QUEUE_LEN - 4) {
1069 /* The ring is no longer full, allow new TX entries. */
1070 ep->tx_full = 0;
1071 netif_wake_queue(dev);
1072 }
1073}
1074
1075/* The interrupt handler does all of the Rx thread work and cleans up
1076 after the Tx thread. */
7d12e780 1077static irqreturn_t epic_interrupt(int irq, void *dev_instance)
1da177e4
LT
1078{
1079 struct net_device *dev = dev_instance;
4cf1653a 1080 struct epic_private *ep = netdev_priv(dev);
aae9bc30 1081 void __iomem *ioaddr = ep->ioaddr;
1da177e4
LT
1082 unsigned int handled = 0;
1083 int status;
1084
aae9bc30 1085 status = er32(INTSTAT);
1da177e4 1086 /* Acknowledge all of the current interrupt sources ASAP. */
aae9bc30 1087 ew32(INTSTAT, status & EpicNormalEvent);
1da177e4
LT
1088
1089 if (debug > 4) {
1090 printk(KERN_DEBUG "%s: Interrupt, status=%#8.8x new "
aae9bc30 1091 "intstat=%#8.8x.\n", dev->name, status, er32(INTSTAT));
1da177e4
LT
1092 }
1093
1094 if ((status & IntrSummary) == 0)
1095 goto out;
1096
1097 handled = 1;
1098
1099 if ((status & EpicNapiEvent) && !ep->reschedule_in_poll) {
1100 spin_lock(&ep->napi_lock);
288379f0 1101 if (napi_schedule_prep(&ep->napi)) {
1da177e4 1102 epic_napi_irq_off(dev, ep);
288379f0 1103 __napi_schedule(&ep->napi);
1da177e4
LT
1104 } else
1105 ep->reschedule_in_poll++;
1106 spin_unlock(&ep->napi_lock);
1107 }
1108 status &= ~EpicNapiEvent;
1109
1110 /* Check uncommon events all at once. */
1111 if (status & (CntFull | TxUnderrun | PCIBusErr170 | PCIBusErr175)) {
aae9bc30
FR
1112 struct net_device_stats *stats = &dev->stats;
1113
1da177e4
LT
1114 if (status == EpicRemoved)
1115 goto out;
1116
1117 /* Always update the error counts to avoid overhead later. */
aae9bc30
FR
1118 stats->rx_missed_errors += er8(MPCNT);
1119 stats->rx_frame_errors += er8(ALICNT);
1120 stats->rx_crc_errors += er8(CRCCNT);
1da177e4
LT
1121
1122 if (status & TxUnderrun) { /* Tx FIFO underflow. */
aae9bc30
FR
1123 stats->tx_fifo_errors++;
1124 ew32(TxThresh, ep->tx_threshold += 128);
1da177e4 1125 /* Restart the transmit process. */
aae9bc30 1126 ew32(COMMAND, RestartTx);
1da177e4
LT
1127 }
1128 if (status & PCIBusErr170) {
1129 printk(KERN_ERR "%s: PCI Bus Error! status %4.4x.\n",
1130 dev->name, status);
1131 epic_pause(dev);
1132 epic_restart(dev);
1133 }
1134 /* Clear all error sources. */
aae9bc30 1135 ew32(INTSTAT, status & 0x7f18);
1da177e4
LT
1136 }
1137
1138out:
1139 if (debug > 3) {
1140 printk(KERN_DEBUG "%s: exit interrupt, intr_status=%#4.4x.\n",
1141 dev->name, status);
1142 }
1143
1144 return IRQ_RETVAL(handled);
1145}
1146
1147static int epic_rx(struct net_device *dev, int budget)
1148{
4cf1653a 1149 struct epic_private *ep = netdev_priv(dev);
1da177e4
LT
1150 int entry = ep->cur_rx % RX_RING_SIZE;
1151 int rx_work_limit = ep->dirty_rx + RX_RING_SIZE - ep->cur_rx;
1152 int work_done = 0;
1153
1154 if (debug > 4)
1155 printk(KERN_DEBUG " In epic_rx(), entry %d %8.8x.\n", entry,
1156 ep->rx_ring[entry].rxstatus);
1157
1158 if (rx_work_limit > budget)
1159 rx_work_limit = budget;
1160
1161 /* If we own the next entry, it's a new packet. Send it up. */
9ebfd492
AV
1162 while ((ep->rx_ring[entry].rxstatus & DescOwn) == 0) {
1163 int status = ep->rx_ring[entry].rxstatus;
1da177e4
LT
1164
1165 if (debug > 4)
1166 printk(KERN_DEBUG " epic_rx() status was %8.8x.\n", status);
1167 if (--rx_work_limit < 0)
1168 break;
1169 if (status & 0x2006) {
1170 if (debug > 2)
1171 printk(KERN_DEBUG "%s: epic_rx() error status was %8.8x.\n",
1172 dev->name, status);
1173 if (status & 0x2000) {
1174 printk(KERN_WARNING "%s: Oversized Ethernet frame spanned "
1175 "multiple buffers, status %4.4x!\n", dev->name, status);
275defc9 1176 dev->stats.rx_length_errors++;
1da177e4
LT
1177 } else if (status & 0x0006)
1178 /* Rx Frame errors are counted in hardware. */
275defc9 1179 dev->stats.rx_errors++;
1da177e4
LT
1180 } else {
1181 /* Malloc up new buffer, compatible with net-2e. */
1182 /* Omit the four octet CRC from the length. */
1183 short pkt_len = (status >> 16) - 4;
1184 struct sk_buff *skb;
1185
1186 if (pkt_len > PKT_BUF_SZ - 4) {
1187 printk(KERN_ERR "%s: Oversized Ethernet frame, status %x "
1188 "%d bytes.\n",
1189 dev->name, status, pkt_len);
1190 pkt_len = 1514;
1191 }
1192 /* Check if the packet is long enough to accept without copying
1193 to a minimally-sized skbuff. */
8e95a202 1194 if (pkt_len < rx_copybreak &&
dae2e9f4 1195 (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
1da177e4
LT
1196 skb_reserve(skb, 2); /* 16 byte align the IP header */
1197 pci_dma_sync_single_for_cpu(ep->pci_dev,
1198 ep->rx_ring[entry].bufaddr,
1199 ep->rx_buf_sz,
1200 PCI_DMA_FROMDEVICE);
8c7b7faa 1201 skb_copy_to_linear_data(skb, ep->rx_skbuff[entry]->data, pkt_len);
1da177e4
LT
1202 skb_put(skb, pkt_len);
1203 pci_dma_sync_single_for_device(ep->pci_dev,
1204 ep->rx_ring[entry].bufaddr,
1205 ep->rx_buf_sz,
1206 PCI_DMA_FROMDEVICE);
1207 } else {
f3b197ac
JG
1208 pci_unmap_single(ep->pci_dev,
1209 ep->rx_ring[entry].bufaddr,
1da177e4
LT
1210 ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
1211 skb_put(skb = ep->rx_skbuff[entry], pkt_len);
1212 ep->rx_skbuff[entry] = NULL;
1213 }
1214 skb->protocol = eth_type_trans(skb, dev);
1215 netif_receive_skb(skb);
275defc9
KV
1216 dev->stats.rx_packets++;
1217 dev->stats.rx_bytes += pkt_len;
1da177e4
LT
1218 }
1219 work_done++;
1220 entry = (++ep->cur_rx) % RX_RING_SIZE;
1221 }
1222
1223 /* Refill the Rx ring buffers. */
1224 for (; ep->cur_rx - ep->dirty_rx > 0; ep->dirty_rx++) {
1225 entry = ep->dirty_rx % RX_RING_SIZE;
1226 if (ep->rx_skbuff[entry] == NULL) {
1227 struct sk_buff *skb;
dae2e9f4 1228 skb = ep->rx_skbuff[entry] = netdev_alloc_skb(dev, ep->rx_buf_sz + 2);
1da177e4
LT
1229 if (skb == NULL)
1230 break;
1da177e4 1231 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
f3b197ac 1232 ep->rx_ring[entry].bufaddr = pci_map_single(ep->pci_dev,
689be439 1233 skb->data, ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
1da177e4
LT
1234 work_done++;
1235 }
9ebfd492
AV
1236 /* AV: shouldn't we add a barrier here? */
1237 ep->rx_ring[entry].rxstatus = DescOwn;
1da177e4
LT
1238 }
1239 return work_done;
1240}
1241
1242static void epic_rx_err(struct net_device *dev, struct epic_private *ep)
1243{
aae9bc30 1244 void __iomem *ioaddr = ep->ioaddr;
1da177e4
LT
1245 int status;
1246
aae9bc30 1247 status = er32(INTSTAT);
1da177e4
LT
1248
1249 if (status == EpicRemoved)
1250 return;
1251 if (status & RxOverflow) /* Missed a Rx frame. */
275defc9 1252 dev->stats.rx_errors++;
1da177e4 1253 if (status & (RxOverflow | RxFull))
aae9bc30 1254 ew16(COMMAND, RxQueued);
1da177e4
LT
1255}
1256
bea3348e 1257static int epic_poll(struct napi_struct *napi, int budget)
1da177e4 1258{
bea3348e
SH
1259 struct epic_private *ep = container_of(napi, struct epic_private, napi);
1260 struct net_device *dev = ep->mii.dev;
1261 int work_done = 0;
aae9bc30 1262 void __iomem *ioaddr = ep->ioaddr;
1da177e4 1263
1da177e4
LT
1264rx_action:
1265
1266 epic_tx(dev, ep);
1267
bea3348e 1268 work_done += epic_rx(dev, budget);
1da177e4
LT
1269
1270 epic_rx_err(dev, ep);
1271
4ec24119 1272 if (work_done < budget) {
1da177e4
LT
1273 unsigned long flags;
1274 int more;
1275
1276 /* A bit baroque but it avoids a (space hungry) spin_unlock */
1277
1278 spin_lock_irqsave(&ep->napi_lock, flags);
1279
1280 more = ep->reschedule_in_poll;
1281 if (!more) {
288379f0 1282 __napi_complete(napi);
aae9bc30 1283 ew32(INTSTAT, EpicNapiEvent);
1da177e4
LT
1284 epic_napi_irq_on(dev, ep);
1285 } else
1286 ep->reschedule_in_poll--;
1287
1288 spin_unlock_irqrestore(&ep->napi_lock, flags);
1289
1290 if (more)
1291 goto rx_action;
1292 }
1293
bea3348e 1294 return work_done;
1da177e4
LT
1295}
1296
1297static int epic_close(struct net_device *dev)
1298{
4cf1653a 1299 struct epic_private *ep = netdev_priv(dev);
aae9bc30
FR
1300 struct pci_dev *pdev = ep->pci_dev;
1301 void __iomem *ioaddr = ep->ioaddr;
1da177e4
LT
1302 struct sk_buff *skb;
1303 int i;
1304
1305 netif_stop_queue(dev);
bea3348e 1306 napi_disable(&ep->napi);
1da177e4
LT
1307
1308 if (debug > 1)
1309 printk(KERN_DEBUG "%s: Shutting down ethercard, status was %2.2x.\n",
aae9bc30 1310 dev->name, er32(INTSTAT));
1da177e4
LT
1311
1312 del_timer_sync(&ep->timer);
1313
1314 epic_disable_int(dev, ep);
1315
aae9bc30 1316 free_irq(pdev->irq, dev);
1da177e4
LT
1317
1318 epic_pause(dev);
1319
1320 /* Free all the skbuffs in the Rx queue. */
1321 for (i = 0; i < RX_RING_SIZE; i++) {
1322 skb = ep->rx_skbuff[i];
1323 ep->rx_skbuff[i] = NULL;
1324 ep->rx_ring[i].rxstatus = 0; /* Not owned by Epic chip. */
1325 ep->rx_ring[i].buflength = 0;
1326 if (skb) {
aae9bc30 1327 pci_unmap_single(pdev, ep->rx_ring[i].bufaddr,
1da177e4
LT
1328 ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
1329 dev_kfree_skb(skb);
1330 }
1331 ep->rx_ring[i].bufaddr = 0xBADF00D0; /* An invalid address. */
1332 }
1333 for (i = 0; i < TX_RING_SIZE; i++) {
1334 skb = ep->tx_skbuff[i];
1335 ep->tx_skbuff[i] = NULL;
1336 if (!skb)
1337 continue;
aae9bc30
FR
1338 pci_unmap_single(pdev, ep->tx_ring[i].bufaddr, skb->len,
1339 PCI_DMA_TODEVICE);
1da177e4
LT
1340 dev_kfree_skb(skb);
1341 }
1342
1343 /* Green! Leave the chip in low-power mode. */
aae9bc30 1344 ew32(GENCTL, 0x0008);
1da177e4
LT
1345
1346 return 0;
1347}
1348
1349static struct net_device_stats *epic_get_stats(struct net_device *dev)
1350{
aae9bc30
FR
1351 struct epic_private *ep = netdev_priv(dev);
1352 void __iomem *ioaddr = ep->ioaddr;
1da177e4
LT
1353
1354 if (netif_running(dev)) {
aae9bc30
FR
1355 struct net_device_stats *stats = &dev->stats;
1356
1357 stats->rx_missed_errors += er8(MPCNT);
1358 stats->rx_frame_errors += er8(ALICNT);
1359 stats->rx_crc_errors += er8(CRCCNT);
1da177e4
LT
1360 }
1361
275defc9 1362 return &dev->stats;
1da177e4
LT
1363}
1364
1365/* Set or clear the multicast filter for this adaptor.
1366 Note that we only use exclusion around actually queueing the
1367 new frame, not around filling ep->setup_frame. This is non-deterministic
1368 when re-entered but still correct. */
1369
1370static void set_rx_mode(struct net_device *dev)
1371{
4cf1653a 1372 struct epic_private *ep = netdev_priv(dev);
aae9bc30 1373 void __iomem *ioaddr = ep->ioaddr;
1da177e4
LT
1374 unsigned char mc_filter[8]; /* Multicast hash filter */
1375 int i;
1376
1377 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
aae9bc30 1378 ew32(RxCtrl, 0x002c);
1da177e4 1379 /* Unconditionally log net taps. */
1da177e4 1380 memset(mc_filter, 0xff, sizeof(mc_filter));
4cd24eaf 1381 } else if ((!netdev_mc_empty(dev)) || (dev->flags & IFF_ALLMULTI)) {
1da177e4
LT
1382 /* There is apparently a chip bug, so the multicast filter
1383 is never enabled. */
1384 /* Too many to filter perfectly -- accept all multicasts. */
1385 memset(mc_filter, 0xff, sizeof(mc_filter));
aae9bc30 1386 ew32(RxCtrl, 0x000c);
4cd24eaf 1387 } else if (netdev_mc_empty(dev)) {
aae9bc30 1388 ew32(RxCtrl, 0x0004);
1da177e4
LT
1389 return;
1390 } else { /* Never executed, for now. */
22bedad3 1391 struct netdev_hw_addr *ha;
1da177e4
LT
1392
1393 memset(mc_filter, 0, sizeof(mc_filter));
22bedad3 1394 netdev_for_each_mc_addr(ha, dev) {
1da177e4 1395 unsigned int bit_nr =
22bedad3 1396 ether_crc_le(ETH_ALEN, ha->addr) & 0x3f;
1da177e4
LT
1397 mc_filter[bit_nr >> 3] |= (1 << bit_nr);
1398 }
1399 }
1400 /* ToDo: perhaps we need to stop the Tx and Rx process here? */
1401 if (memcmp(mc_filter, ep->mc_filter, sizeof(mc_filter))) {
1402 for (i = 0; i < 4; i++)
aae9bc30 1403 ew16(MC0 + i*4, ((u16 *)mc_filter)[i]);
1da177e4
LT
1404 memcpy(ep->mc_filter, mc_filter, sizeof(mc_filter));
1405 }
1da177e4
LT
1406}
1407
1408static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1409{
4cf1653a 1410 struct epic_private *np = netdev_priv(dev);
1da177e4 1411
68aad78c
RJ
1412 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1413 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1414 strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
1da177e4
LT
1415}
1416
1417static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1418{
4cf1653a 1419 struct epic_private *np = netdev_priv(dev);
1da177e4
LT
1420 int rc;
1421
1422 spin_lock_irq(&np->lock);
1423 rc = mii_ethtool_gset(&np->mii, cmd);
1424 spin_unlock_irq(&np->lock);
1425
1426 return rc;
1427}
1428
1429static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1430{
4cf1653a 1431 struct epic_private *np = netdev_priv(dev);
1da177e4
LT
1432 int rc;
1433
1434 spin_lock_irq(&np->lock);
1435 rc = mii_ethtool_sset(&np->mii, cmd);
1436 spin_unlock_irq(&np->lock);
1437
1438 return rc;
1439}
1440
1441static int netdev_nway_reset(struct net_device *dev)
1442{
4cf1653a 1443 struct epic_private *np = netdev_priv(dev);
1da177e4
LT
1444 return mii_nway_restart(&np->mii);
1445}
1446
1447static u32 netdev_get_link(struct net_device *dev)
1448{
4cf1653a 1449 struct epic_private *np = netdev_priv(dev);
1da177e4
LT
1450 return mii_link_ok(&np->mii);
1451}
1452
1453static u32 netdev_get_msglevel(struct net_device *dev)
1454{
1455 return debug;
1456}
1457
1458static void netdev_set_msglevel(struct net_device *dev, u32 value)
1459{
1460 debug = value;
1461}
1462
1463static int ethtool_begin(struct net_device *dev)
1464{
aae9bc30
FR
1465 struct epic_private *ep = netdev_priv(dev);
1466 void __iomem *ioaddr = ep->ioaddr;
1467
1da177e4 1468 /* power-up, if interface is down */
aae9bc30
FR
1469 if (!netif_running(dev)) {
1470 ew32(GENCTL, 0x0200);
1471 ew32(NVCTL, (er32(NVCTL) & ~0x003c) | 0x4800);
1da177e4
LT
1472 }
1473 return 0;
1474}
1475
1476static void ethtool_complete(struct net_device *dev)
1477{
aae9bc30
FR
1478 struct epic_private *ep = netdev_priv(dev);
1479 void __iomem *ioaddr = ep->ioaddr;
1480
1da177e4 1481 /* power-down, if interface is down */
aae9bc30
FR
1482 if (!netif_running(dev)) {
1483 ew32(GENCTL, 0x0008);
1484 ew32(NVCTL, (er32(NVCTL) & ~0x483c) | 0x0000);
1da177e4
LT
1485 }
1486}
1487
7282d491 1488static const struct ethtool_ops netdev_ethtool_ops = {
1da177e4
LT
1489 .get_drvinfo = netdev_get_drvinfo,
1490 .get_settings = netdev_get_settings,
1491 .set_settings = netdev_set_settings,
1492 .nway_reset = netdev_nway_reset,
1493 .get_link = netdev_get_link,
1494 .get_msglevel = netdev_get_msglevel,
1495 .set_msglevel = netdev_set_msglevel,
1da177e4
LT
1496 .begin = ethtool_begin,
1497 .complete = ethtool_complete
1498};
1499
1500static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1501{
4cf1653a 1502 struct epic_private *np = netdev_priv(dev);
aae9bc30 1503 void __iomem *ioaddr = np->ioaddr;
1da177e4
LT
1504 struct mii_ioctl_data *data = if_mii(rq);
1505 int rc;
1506
1507 /* power-up, if interface is down */
1508 if (! netif_running(dev)) {
aae9bc30
FR
1509 ew32(GENCTL, 0x0200);
1510 ew32(NVCTL, (er32(NVCTL) & ~0x003c) | 0x4800);
1da177e4
LT
1511 }
1512
1513 /* all non-ethtool ioctls (the SIOC[GS]MIIxxx ioctls) */
1514 spin_lock_irq(&np->lock);
1515 rc = generic_mii_ioctl(&np->mii, data, cmd, NULL);
1516 spin_unlock_irq(&np->lock);
1517
1518 /* power-down, if interface is down */
1519 if (! netif_running(dev)) {
aae9bc30
FR
1520 ew32(GENCTL, 0x0008);
1521 ew32(NVCTL, (er32(NVCTL) & ~0x483c) | 0x0000);
1da177e4
LT
1522 }
1523 return rc;
1524}
1525
1526
aae9bc30 1527static void __devexit epic_remove_one(struct pci_dev *pdev)
1da177e4
LT
1528{
1529 struct net_device *dev = pci_get_drvdata(pdev);
4cf1653a 1530 struct epic_private *ep = netdev_priv(dev);
f3b197ac 1531
1da177e4
LT
1532 pci_free_consistent(pdev, TX_TOTAL_SIZE, ep->tx_ring, ep->tx_ring_dma);
1533 pci_free_consistent(pdev, RX_TOTAL_SIZE, ep->rx_ring, ep->rx_ring_dma);
1534 unregister_netdev(dev);
aae9bc30 1535 pci_iounmap(pdev, ep->ioaddr);
1da177e4
LT
1536 pci_release_regions(pdev);
1537 free_netdev(dev);
1538 pci_disable_device(pdev);
1539 pci_set_drvdata(pdev, NULL);
1540 /* pci_power_off(pdev, -1); */
1541}
1542
1543
1544#ifdef CONFIG_PM
1545
1546static int epic_suspend (struct pci_dev *pdev, pm_message_t state)
1547{
1548 struct net_device *dev = pci_get_drvdata(pdev);
aae9bc30
FR
1549 struct epic_private *ep = netdev_priv(dev);
1550 void __iomem *ioaddr = ep->ioaddr;
1da177e4
LT
1551
1552 if (!netif_running(dev))
1553 return 0;
1554 epic_pause(dev);
1555 /* Put the chip into low-power mode. */
aae9bc30 1556 ew32(GENCTL, 0x0008);
1da177e4
LT
1557 /* pci_power_off(pdev, -1); */
1558 return 0;
1559}
1560
1561
1562static int epic_resume (struct pci_dev *pdev)
1563{
1564 struct net_device *dev = pci_get_drvdata(pdev);
1565
1566 if (!netif_running(dev))
1567 return 0;
1568 epic_restart(dev);
1569 /* pci_power_on(pdev); */
1570 return 0;
1571}
1572
1573#endif /* CONFIG_PM */
1574
1575
1576static struct pci_driver epic_driver = {
1577 .name = DRV_NAME,
1578 .id_table = epic_pci_tbl,
1579 .probe = epic_init_one,
1580 .remove = __devexit_p(epic_remove_one),
1581#ifdef CONFIG_PM
1582 .suspend = epic_suspend,
1583 .resume = epic_resume,
1584#endif /* CONFIG_PM */
1585};
1586
1587
1588static int __init epic_init (void)
1589{
1590/* when a module, this is printed whether or not devices are found in probe */
1591#ifdef MODULE
ad361c98 1592 printk (KERN_INFO "%s%s",
2c2a8c53 1593 version, version2);
1da177e4
LT
1594#endif
1595
29917620 1596 return pci_register_driver(&epic_driver);
1da177e4
LT
1597}
1598
1599
1600static void __exit epic_cleanup (void)
1601{
1602 pci_unregister_driver (&epic_driver);
1603}
1604
1605
1606module_init(epic_init);
1607module_exit(epic_cleanup);