]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/net/ethernet/via/via-rhine.c
module_param: make bool parameters really bool (net & drivers/net)
[mirror_ubuntu-artful-kernel.git] / drivers / net / ethernet / via / via-rhine.c
CommitLineData
1da177e4
LT
1/* via-rhine.c: A Linux Ethernet device driver for VIA Rhine family chips. */
2/*
3 Written 1998-2001 by Donald Becker.
4
5 Current Maintainer: Roger Luethi <rl@hellgate.ch>
6
7 This software may be used and distributed according to the terms of
8 the GNU General Public License (GPL), incorporated herein by reference.
9 Drivers based on or derived from this code fall under the GPL and must
10 retain the authorship, copyright and license notice. This file is not
11 a complete program and may only be used when the entire operating
12 system is licensed under the GPL.
13
14 This driver is designed for the VIA VT86C100A Rhine-I.
15 It also works with the Rhine-II (6102) and Rhine-III (6105/6105L/6105LOM
16 and management NIC 6105M).
17
18 The author may be reached as becker@scyld.com, or C/O
19 Scyld Computing Corporation
20 410 Severn Ave., Suite 210
21 Annapolis MD 21403
22
23
24 This driver contains some changes from the original Donald Becker
25 version. He may or may not be interested in bug reports on this
26 code. You can find his versions at:
27 http://www.scyld.com/network/via-rhine.html
03a8c661 28 [link no longer provides useful info -jgarzik]
1da177e4
LT
29
30*/
31
df4511fe
JP
32#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
33
1da177e4 34#define DRV_NAME "via-rhine"
38f49e88
RL
35#define DRV_VERSION "1.5.0"
36#define DRV_RELDATE "2010-10-09"
1da177e4 37
eb939922 38#include <linux/types.h>
1da177e4
LT
39
40/* A few user-configurable values.
41 These may be modified when a driver module is loaded. */
42
df4511fe 43#define DEBUG
1da177e4
LT
44static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
45static int max_interrupt_work = 20;
46
47/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
48 Setting to > 1518 effectively disables this feature. */
8e95a202
JP
49#if defined(__alpha__) || defined(__arm__) || defined(__hppa__) || \
50 defined(CONFIG_SPARC) || defined(__ia64__) || \
51 defined(__sh__) || defined(__mips__)
b47157f0
DM
52static int rx_copybreak = 1518;
53#else
1da177e4 54static int rx_copybreak;
b47157f0 55#endif
1da177e4 56
b933b4d9
RL
57/* Work-around for broken BIOSes: they are unable to get the chip back out of
58 power state D3 so PXE booting fails. bootparam(7): via-rhine.avoid_D3=1 */
eb939922 59static bool avoid_D3;
b933b4d9 60
1da177e4
LT
61/*
62 * In case you are looking for 'options[]' or 'full_duplex[]', they
63 * are gone. Use ethtool(8) instead.
64 */
65
66/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
67 The Rhine has a 64 element 8390-like hash table. */
68static const int multicast_filter_limit = 32;
69
70
71/* Operational parameters that are set at compile time. */
72
73/* Keep the ring sizes a power of two for compile efficiency.
74 The compiler will convert <unsigned>'%'<2^N> into a bit mask.
75 Making the Tx ring too large decreases the effectiveness of channel
76 bonding and packet priority.
77 There are no ill effects from too-large receive rings. */
78#define TX_RING_SIZE 16
79#define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */
633949a1 80#define RX_RING_SIZE 64
1da177e4
LT
81
82/* Operational parameters that usually are not changed. */
83
84/* Time in jiffies before concluding the transmitter is hung. */
85#define TX_TIMEOUT (2*HZ)
86
87#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
88
89#include <linux/module.h>
90#include <linux/moduleparam.h>
91#include <linux/kernel.h>
92#include <linux/string.h>
93#include <linux/timer.h>
94#include <linux/errno.h>
95#include <linux/ioport.h>
1da177e4
LT
96#include <linux/interrupt.h>
97#include <linux/pci.h>
1e7f0bd8 98#include <linux/dma-mapping.h>
1da177e4
LT
99#include <linux/netdevice.h>
100#include <linux/etherdevice.h>
101#include <linux/skbuff.h>
102#include <linux/init.h>
103#include <linux/delay.h>
104#include <linux/mii.h>
105#include <linux/ethtool.h>
106#include <linux/crc32.h>
38f49e88 107#include <linux/if_vlan.h>
1da177e4 108#include <linux/bitops.h>
c0d7a021 109#include <linux/workqueue.h>
1da177e4
LT
110#include <asm/processor.h> /* Processor type for cache alignment. */
111#include <asm/io.h>
112#include <asm/irq.h>
113#include <asm/uaccess.h>
e84df485 114#include <linux/dmi.h>
1da177e4
LT
115
116/* These identify the driver base version and may not be removed. */
c8de1fce 117static const char version[] __devinitconst =
df4511fe 118 "v1.10-LK" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker";
1da177e4
LT
119
120/* This driver was written to use PCI memory space. Some early versions
121 of the Rhine may only work correctly with I/O space accesses. */
122#ifdef CONFIG_VIA_RHINE_MMIO
123#define USE_MMIO
124#else
125#endif
126
127MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
128MODULE_DESCRIPTION("VIA Rhine PCI Fast Ethernet driver");
129MODULE_LICENSE("GPL");
130
131module_param(max_interrupt_work, int, 0);
132module_param(debug, int, 0);
133module_param(rx_copybreak, int, 0);
b933b4d9 134module_param(avoid_D3, bool, 0);
1da177e4
LT
135MODULE_PARM_DESC(max_interrupt_work, "VIA Rhine maximum events handled per interrupt");
136MODULE_PARM_DESC(debug, "VIA Rhine debug level (0-7)");
137MODULE_PARM_DESC(rx_copybreak, "VIA Rhine copy breakpoint for copy-only-tiny-frames");
b933b4d9 138MODULE_PARM_DESC(avoid_D3, "Avoid power state D3 (work-around for broken BIOSes)");
1da177e4 139
38f49e88
RL
140#define MCAM_SIZE 32
141#define VCAM_SIZE 32
142
1da177e4
LT
143/*
144 Theory of Operation
145
146I. Board Compatibility
147
148This driver is designed for the VIA 86c100A Rhine-II PCI Fast Ethernet
149controller.
150
151II. Board-specific settings
152
153Boards with this chip are functional only in a bus-master PCI slot.
154
155Many operational settings are loaded from the EEPROM to the Config word at
156offset 0x78. For most of these settings, this driver assumes that they are
157correct.
158If this driver is compiled to use PCI memory space operations the EEPROM
159must be configured to enable memory ops.
160
161III. Driver operation
162
163IIIa. Ring buffers
164
165This driver uses two statically allocated fixed-size descriptor lists
166formed into rings by a branch from the final descriptor to the beginning of
167the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
168
169IIIb/c. Transmit/Receive Structure
170
171This driver attempts to use a zero-copy receive and transmit scheme.
172
173Alas, all data buffers are required to start on a 32 bit boundary, so
174the driver must often copy transmit packets into bounce buffers.
175
176The driver allocates full frame size skbuffs for the Rx ring buffers at
177open() time and passes the skb->data field to the chip as receive data
178buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
179a fresh skbuff is allocated and the frame is copied to the new skbuff.
180When the incoming frame is larger, the skbuff is passed directly up the
181protocol stack. Buffers consumed this way are replaced by newly allocated
182skbuffs in the last phase of rhine_rx().
183
184The RX_COPYBREAK value is chosen to trade-off the memory wasted by
185using a full-sized skbuff for small frames vs. the copying costs of larger
186frames. New boards are typically used in generously configured machines
187and the underfilled buffers have negligible impact compared to the benefit of
188a single allocation size, so the default value of zero results in never
189copying packets. When copying is done, the cost is usually mitigated by using
190a combined copy/checksum routine. Copying also preloads the cache, which is
191most useful with small frames.
192
193Since the VIA chips are only able to transfer data to buffers on 32 bit
194boundaries, the IP header at offset 14 in an ethernet frame isn't
195longword aligned for further processing. Copying these unaligned buffers
196has the beneficial effect of 16-byte aligning the IP header.
197
198IIId. Synchronization
199
200The driver runs as two independent, single-threaded flows of control. One
201is the send-packet routine, which enforces single-threaded use by the
b74ca3a8
WC
202netdev_priv(dev)->lock spinlock. The other thread is the interrupt handler,
203which is single threaded by the hardware and interrupt handling software.
1da177e4
LT
204
205The send packet thread has partial control over the Tx ring. It locks the
b74ca3a8
WC
206netdev_priv(dev)->lock whenever it's queuing a Tx packet. If the next slot in
207the ring is not available it stops the transmit queue by
208calling netif_stop_queue.
1da177e4
LT
209
210The interrupt handler has exclusive control over the Rx ring and records stats
211from the Tx ring. After reaping the stats, it marks the Tx queue entry as
212empty by incrementing the dirty_tx mark. If at least half of the entries in
213the Rx ring are available the transmit queue is woken up if it was stopped.
214
215IV. Notes
216
217IVb. References
218
219Preliminary VT86C100A manual from http://www.via.com.tw/
220http://www.scyld.com/expert/100mbps.html
221http://www.scyld.com/expert/NWay.html
222ftp://ftp.via.com.tw/public/lan/Products/NIC/VT86C100A/Datasheet/VT86C100A03.pdf
223ftp://ftp.via.com.tw/public/lan/Products/NIC/VT6102/Datasheet/VT6102_021.PDF
224
225
226IVc. Errata
227
228The VT86C100A manual is not reliable information.
229The 3043 chip does not handle unaligned transmit or receive buffers, resulting
230in significant performance degradation for bounce buffer copies on transmit
231and unaligned IP headers on receive.
232The chip does not pad to minimum transmit length.
233
234*/
235
236
237/* This table drives the PCI probe routines. It's mostly boilerplate in all
238 of the drivers, and will likely be provided by some future kernel.
239 Note the matching code -- the first table entry matchs all 56** cards but
240 second only the 1234 card.
241*/
242
243enum rhine_revs {
244 VT86C100A = 0x00,
245 VTunknown0 = 0x20,
246 VT6102 = 0x40,
247 VT8231 = 0x50, /* Integrated MAC */
248 VT8233 = 0x60, /* Integrated MAC */
249 VT8235 = 0x74, /* Integrated MAC */
250 VT8237 = 0x78, /* Integrated MAC */
251 VTunknown1 = 0x7C,
252 VT6105 = 0x80,
253 VT6105_B0 = 0x83,
254 VT6105L = 0x8A,
255 VT6107 = 0x8C,
256 VTunknown2 = 0x8E,
257 VT6105M = 0x90, /* Management adapter */
258};
259
260enum rhine_quirks {
261 rqWOL = 0x0001, /* Wake-On-LAN support */
262 rqForceReset = 0x0002,
263 rq6patterns = 0x0040, /* 6 instead of 4 patterns for WOL */
264 rqStatusWBRace = 0x0080, /* Tx Status Writeback Error possible */
265 rqRhineI = 0x0100, /* See comment below */
266};
267/*
268 * rqRhineI: VT86C100A (aka Rhine-I) uses different bits to enable
269 * MMIO as well as for the collision counter and the Tx FIFO underflow
270 * indicator. In addition, Tx and Rx buffers need to 4 byte aligned.
271 */
272
273/* Beware of PCI posted writes */
274#define IOSYNC do { ioread8(ioaddr + StationAddr); } while (0)
275
a3aa1884 276static DEFINE_PCI_DEVICE_TABLE(rhine_pci_tbl) = {
46009c8b
JG
277 { 0x1106, 0x3043, PCI_ANY_ID, PCI_ANY_ID, }, /* VT86C100A */
278 { 0x1106, 0x3065, PCI_ANY_ID, PCI_ANY_ID, }, /* VT6102 */
279 { 0x1106, 0x3106, PCI_ANY_ID, PCI_ANY_ID, }, /* 6105{,L,LOM} */
280 { 0x1106, 0x3053, PCI_ANY_ID, PCI_ANY_ID, }, /* VT6105M */
1da177e4
LT
281 { } /* terminate list */
282};
283MODULE_DEVICE_TABLE(pci, rhine_pci_tbl);
284
285
286/* Offsets to the device registers. */
287enum register_offsets {
288 StationAddr=0x00, RxConfig=0x06, TxConfig=0x07, ChipCmd=0x08,
38f49e88 289 ChipCmd1=0x09, TQWake=0x0A,
1da177e4
LT
290 IntrStatus=0x0C, IntrEnable=0x0E,
291 MulticastFilter0=0x10, MulticastFilter1=0x14,
292 RxRingPtr=0x18, TxRingPtr=0x1C, GFIFOTest=0x54,
38f49e88 293 MIIPhyAddr=0x6C, MIIStatus=0x6D, PCIBusConfig=0x6E, PCIBusConfig1=0x6F,
1da177e4
LT
294 MIICmd=0x70, MIIRegAddr=0x71, MIIData=0x72, MACRegEEcsr=0x74,
295 ConfigA=0x78, ConfigB=0x79, ConfigC=0x7A, ConfigD=0x7B,
296 RxMissed=0x7C, RxCRCErrs=0x7E, MiscCmd=0x81,
297 StickyHW=0x83, IntrStatus2=0x84,
38f49e88 298 CamMask=0x88, CamCon=0x92, CamAddr=0x93,
1da177e4
LT
299 WOLcrSet=0xA0, PwcfgSet=0xA1, WOLcgSet=0xA3, WOLcrClr=0xA4,
300 WOLcrClr1=0xA6, WOLcgClr=0xA7,
301 PwrcsrSet=0xA8, PwrcsrSet1=0xA9, PwrcsrClr=0xAC, PwrcsrClr1=0xAD,
302};
303
304/* Bits in ConfigD */
305enum backoff_bits {
306 BackOptional=0x01, BackModify=0x02,
307 BackCaptureEffect=0x04, BackRandom=0x08
308};
309
38f49e88
RL
310/* Bits in the TxConfig (TCR) register */
311enum tcr_bits {
312 TCR_PQEN=0x01,
313 TCR_LB0=0x02, /* loopback[0] */
314 TCR_LB1=0x04, /* loopback[1] */
315 TCR_OFSET=0x08,
316 TCR_RTGOPT=0x10,
317 TCR_RTFT0=0x20,
318 TCR_RTFT1=0x40,
319 TCR_RTSF=0x80,
320};
321
322/* Bits in the CamCon (CAMC) register */
323enum camcon_bits {
324 CAMC_CAMEN=0x01,
325 CAMC_VCAMSL=0x02,
326 CAMC_CAMWR=0x04,
327 CAMC_CAMRD=0x08,
328};
329
330/* Bits in the PCIBusConfig1 (BCR1) register */
331enum bcr1_bits {
332 BCR1_POT0=0x01,
333 BCR1_POT1=0x02,
334 BCR1_POT2=0x04,
335 BCR1_CTFT0=0x08,
336 BCR1_CTFT1=0x10,
337 BCR1_CTSF=0x20,
338 BCR1_TXQNOBK=0x40, /* for VT6105 */
339 BCR1_VIDFR=0x80, /* for VT6105 */
340 BCR1_MED0=0x40, /* for VT6102 */
341 BCR1_MED1=0x80, /* for VT6102 */
342};
343
1da177e4
LT
344#ifdef USE_MMIO
345/* Registers we check that mmio and reg are the same. */
346static const int mmio_verify_registers[] = {
347 RxConfig, TxConfig, IntrEnable, ConfigA, ConfigB, ConfigC, ConfigD,
348 0
349};
350#endif
351
352/* Bits in the interrupt status/mask registers. */
353enum intr_status_bits {
354 IntrRxDone=0x0001, IntrRxErr=0x0004, IntrRxEmpty=0x0020,
355 IntrTxDone=0x0002, IntrTxError=0x0008, IntrTxUnderrun=0x0210,
356 IntrPCIErr=0x0040,
357 IntrStatsMax=0x0080, IntrRxEarly=0x0100,
358 IntrRxOverflow=0x0400, IntrRxDropped=0x0800, IntrRxNoBuf=0x1000,
359 IntrTxAborted=0x2000, IntrLinkChange=0x4000,
360 IntrRxWakeUp=0x8000,
361 IntrNormalSummary=0x0003, IntrAbnormalSummary=0xC260,
362 IntrTxDescRace=0x080000, /* mapped from IntrStatus2 */
363 IntrTxErrSummary=0x082218,
364};
365
366/* Bits in WOLcrSet/WOLcrClr and PwrcsrSet/PwrcsrClr */
367enum wol_bits {
368 WOLucast = 0x10,
369 WOLmagic = 0x20,
370 WOLbmcast = 0x30,
371 WOLlnkon = 0x40,
372 WOLlnkoff = 0x80,
373};
374
375/* The Rx and Tx buffer descriptors. */
376struct rx_desc {
53c03f5c
AV
377 __le32 rx_status;
378 __le32 desc_length; /* Chain flag, Buffer/frame length */
379 __le32 addr;
380 __le32 next_desc;
1da177e4
LT
381};
382struct tx_desc {
53c03f5c
AV
383 __le32 tx_status;
384 __le32 desc_length; /* Chain flag, Tx Config, Frame length */
385 __le32 addr;
386 __le32 next_desc;
1da177e4
LT
387};
388
389/* Initial value for tx_desc.desc_length, Buffer size goes to bits 0-10 */
390#define TXDESC 0x00e08000
391
392enum rx_status_bits {
393 RxOK=0x8000, RxWholePkt=0x0300, RxErr=0x008F
394};
395
396/* Bits in *_desc.*_status */
397enum desc_status_bits {
398 DescOwn=0x80000000
399};
400
38f49e88
RL
401/* Bits in *_desc.*_length */
402enum desc_length_bits {
403 DescTag=0x00010000
404};
405
1da177e4
LT
406/* Bits in ChipCmd. */
407enum chip_cmd_bits {
408 CmdInit=0x01, CmdStart=0x02, CmdStop=0x04, CmdRxOn=0x08,
409 CmdTxOn=0x10, Cmd1TxDemand=0x20, CmdRxDemand=0x40,
410 Cmd1EarlyRx=0x01, Cmd1EarlyTx=0x02, Cmd1FDuplex=0x04,
411 Cmd1NoTxPoll=0x08, Cmd1Reset=0x80,
412};
413
414struct rhine_private {
38f49e88
RL
415 /* Bit mask for configured VLAN ids */
416 unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
417
1da177e4
LT
418 /* Descriptor rings */
419 struct rx_desc *rx_ring;
420 struct tx_desc *tx_ring;
421 dma_addr_t rx_ring_dma;
422 dma_addr_t tx_ring_dma;
423
424 /* The addresses of receive-in-place skbuffs. */
425 struct sk_buff *rx_skbuff[RX_RING_SIZE];
426 dma_addr_t rx_skbuff_dma[RX_RING_SIZE];
427
428 /* The saved address of a sent-in-place packet/buffer, for later free(). */
429 struct sk_buff *tx_skbuff[TX_RING_SIZE];
430 dma_addr_t tx_skbuff_dma[TX_RING_SIZE];
431
4be5de25 432 /* Tx bounce buffers (Rhine-I only) */
1da177e4
LT
433 unsigned char *tx_buf[TX_RING_SIZE];
434 unsigned char *tx_bufs;
435 dma_addr_t tx_bufs_dma;
436
437 struct pci_dev *pdev;
438 long pioaddr;
bea3348e
SH
439 struct net_device *dev;
440 struct napi_struct napi;
1da177e4 441 spinlock_t lock;
c0d7a021 442 struct work_struct reset_task;
1da177e4
LT
443
444 /* Frequently used values: keep some adjacent for cache effect. */
445 u32 quirks;
446 struct rx_desc *rx_head_desc;
447 unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
448 unsigned int cur_tx, dirty_tx;
449 unsigned int rx_buf_sz; /* Based on MTU+slack. */
450 u8 wolopts;
451
452 u8 tx_thresh, rx_thresh;
453
454 struct mii_if_info mii_if;
455 void __iomem *base;
456};
457
38f49e88
RL
458#define BYTE_REG_BITS_ON(x, p) do { iowrite8((ioread8((p))|(x)), (p)); } while (0)
459#define WORD_REG_BITS_ON(x, p) do { iowrite16((ioread16((p))|(x)), (p)); } while (0)
460#define DWORD_REG_BITS_ON(x, p) do { iowrite32((ioread32((p))|(x)), (p)); } while (0)
461
462#define BYTE_REG_BITS_IS_ON(x, p) (ioread8((p)) & (x))
463#define WORD_REG_BITS_IS_ON(x, p) (ioread16((p)) & (x))
464#define DWORD_REG_BITS_IS_ON(x, p) (ioread32((p)) & (x))
465
466#define BYTE_REG_BITS_OFF(x, p) do { iowrite8(ioread8((p)) & (~(x)), (p)); } while (0)
467#define WORD_REG_BITS_OFF(x, p) do { iowrite16(ioread16((p)) & (~(x)), (p)); } while (0)
468#define DWORD_REG_BITS_OFF(x, p) do { iowrite32(ioread32((p)) & (~(x)), (p)); } while (0)
469
470#define BYTE_REG_BITS_SET(x, m, p) do { iowrite8((ioread8((p)) & (~(m)))|(x), (p)); } while (0)
471#define WORD_REG_BITS_SET(x, m, p) do { iowrite16((ioread16((p)) & (~(m)))|(x), (p)); } while (0)
472#define DWORD_REG_BITS_SET(x, m, p) do { iowrite32((ioread32((p)) & (~(m)))|(x), (p)); } while (0)
473
474
1da177e4
LT
475static int mdio_read(struct net_device *dev, int phy_id, int location);
476static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
477static int rhine_open(struct net_device *dev);
c0d7a021 478static void rhine_reset_task(struct work_struct *work);
1da177e4 479static void rhine_tx_timeout(struct net_device *dev);
61357325
SH
480static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
481 struct net_device *dev);
7d12e780 482static irqreturn_t rhine_interrupt(int irq, void *dev_instance);
1da177e4 483static void rhine_tx(struct net_device *dev);
633949a1 484static int rhine_rx(struct net_device *dev, int limit);
1da177e4
LT
485static void rhine_error(struct net_device *dev, int intr_status);
486static void rhine_set_rx_mode(struct net_device *dev);
487static struct net_device_stats *rhine_get_stats(struct net_device *dev);
488static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
7282d491 489static const struct ethtool_ops netdev_ethtool_ops;
1da177e4 490static int rhine_close(struct net_device *dev);
d18c3db5 491static void rhine_shutdown (struct pci_dev *pdev);
8e586137
JP
492static int rhine_vlan_rx_add_vid(struct net_device *dev, unsigned short vid);
493static int rhine_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid);
38f49e88
RL
494static void rhine_set_cam(void __iomem *ioaddr, int idx, u8 *addr);
495static void rhine_set_vlan_cam(void __iomem *ioaddr, int idx, u8 *addr);
496static void rhine_set_cam_mask(void __iomem *ioaddr, u32 mask);
497static void rhine_set_vlan_cam_mask(void __iomem *ioaddr, u32 mask);
498static void rhine_init_cam_filter(struct net_device *dev);
499static void rhine_update_vcam(struct net_device *dev);
1da177e4 500
df4511fe
JP
501#define RHINE_WAIT_FOR(condition) \
502do { \
503 int i = 1024; \
504 while (!(condition) && --i) \
505 ; \
506 if (debug > 1 && i < 512) \
507 pr_info("%4d cycles used @ %s:%d\n", \
508 1024 - i, __func__, __LINE__); \
509} while (0)
1da177e4
LT
510
511static inline u32 get_intr_status(struct net_device *dev)
512{
513 struct rhine_private *rp = netdev_priv(dev);
514 void __iomem *ioaddr = rp->base;
515 u32 intr_status;
516
517 intr_status = ioread16(ioaddr + IntrStatus);
518 /* On Rhine-II, Bit 3 indicates Tx descriptor write-back race. */
519 if (rp->quirks & rqStatusWBRace)
520 intr_status |= ioread8(ioaddr + IntrStatus2) << 16;
521 return intr_status;
522}
523
524/*
525 * Get power related registers into sane state.
526 * Notify user about past WOL event.
527 */
528static void rhine_power_init(struct net_device *dev)
529{
530 struct rhine_private *rp = netdev_priv(dev);
531 void __iomem *ioaddr = rp->base;
532 u16 wolstat;
533
534 if (rp->quirks & rqWOL) {
535 /* Make sure chip is in power state D0 */
536 iowrite8(ioread8(ioaddr + StickyHW) & 0xFC, ioaddr + StickyHW);
537
538 /* Disable "force PME-enable" */
539 iowrite8(0x80, ioaddr + WOLcgClr);
540
541 /* Clear power-event config bits (WOL) */
542 iowrite8(0xFF, ioaddr + WOLcrClr);
543 /* More recent cards can manage two additional patterns */
544 if (rp->quirks & rq6patterns)
545 iowrite8(0x03, ioaddr + WOLcrClr1);
546
547 /* Save power-event status bits */
548 wolstat = ioread8(ioaddr + PwrcsrSet);
549 if (rp->quirks & rq6patterns)
550 wolstat |= (ioread8(ioaddr + PwrcsrSet1) & 0x03) << 8;
551
552 /* Clear power-event status bits */
553 iowrite8(0xFF, ioaddr + PwrcsrClr);
554 if (rp->quirks & rq6patterns)
555 iowrite8(0x03, ioaddr + PwrcsrClr1);
556
557 if (wolstat) {
558 char *reason;
559 switch (wolstat) {
560 case WOLmagic:
561 reason = "Magic packet";
562 break;
563 case WOLlnkon:
564 reason = "Link went up";
565 break;
566 case WOLlnkoff:
567 reason = "Link went down";
568 break;
569 case WOLucast:
570 reason = "Unicast packet";
571 break;
572 case WOLbmcast:
573 reason = "Multicast/broadcast packet";
574 break;
575 default:
576 reason = "Unknown";
577 }
df4511fe
JP
578 netdev_info(dev, "Woke system up. Reason: %s\n",
579 reason);
1da177e4
LT
580 }
581 }
582}
583
584static void rhine_chip_reset(struct net_device *dev)
585{
586 struct rhine_private *rp = netdev_priv(dev);
587 void __iomem *ioaddr = rp->base;
588
589 iowrite8(Cmd1Reset, ioaddr + ChipCmd1);
590 IOSYNC;
591
592 if (ioread8(ioaddr + ChipCmd1) & Cmd1Reset) {
df4511fe 593 netdev_info(dev, "Reset not complete yet. Trying harder.\n");
1da177e4
LT
594
595 /* Force reset */
596 if (rp->quirks & rqForceReset)
597 iowrite8(0x40, ioaddr + MiscCmd);
598
599 /* Reset can take somewhat longer (rare) */
600 RHINE_WAIT_FOR(!(ioread8(ioaddr + ChipCmd1) & Cmd1Reset));
601 }
602
603 if (debug > 1)
df4511fe
JP
604 netdev_info(dev, "Reset %s\n",
605 (ioread8(ioaddr + ChipCmd1) & Cmd1Reset) ?
606 "failed" : "succeeded");
1da177e4
LT
607}
608
609#ifdef USE_MMIO
610static void enable_mmio(long pioaddr, u32 quirks)
611{
612 int n;
613 if (quirks & rqRhineI) {
614 /* More recent docs say that this bit is reserved ... */
615 n = inb(pioaddr + ConfigA) | 0x20;
616 outb(n, pioaddr + ConfigA);
617 } else {
618 n = inb(pioaddr + ConfigD) | 0x80;
619 outb(n, pioaddr + ConfigD);
620 }
621}
622#endif
623
624/*
625 * Loads bytes 0x00-0x05, 0x6E-0x6F, 0x78-0x7B from EEPROM
626 * (plus 0x6C for Rhine-I/II)
627 */
628static void __devinit rhine_reload_eeprom(long pioaddr, struct net_device *dev)
629{
630 struct rhine_private *rp = netdev_priv(dev);
631 void __iomem *ioaddr = rp->base;
632
633 outb(0x20, pioaddr + MACRegEEcsr);
634 RHINE_WAIT_FOR(!(inb(pioaddr + MACRegEEcsr) & 0x20));
635
636#ifdef USE_MMIO
637 /*
638 * Reloading from EEPROM overwrites ConfigA-D, so we must re-enable
639 * MMIO. If reloading EEPROM was done first this could be avoided, but
640 * it is not known if that still works with the "win98-reboot" problem.
641 */
642 enable_mmio(pioaddr, rp->quirks);
643#endif
644
645 /* Turn off EEPROM-controlled wake-up (magic packet) */
646 if (rp->quirks & rqWOL)
647 iowrite8(ioread8(ioaddr + ConfigA) & 0xFC, ioaddr + ConfigA);
648
649}
650
651#ifdef CONFIG_NET_POLL_CONTROLLER
652static void rhine_poll(struct net_device *dev)
653{
654 disable_irq(dev->irq);
7d12e780 655 rhine_interrupt(dev->irq, (void *)dev);
1da177e4
LT
656 enable_irq(dev->irq);
657}
658#endif
659
bea3348e 660static int rhine_napipoll(struct napi_struct *napi, int budget)
633949a1 661{
bea3348e
SH
662 struct rhine_private *rp = container_of(napi, struct rhine_private, napi);
663 struct net_device *dev = rp->dev;
633949a1 664 void __iomem *ioaddr = rp->base;
bea3348e 665 int work_done;
633949a1 666
bea3348e 667 work_done = rhine_rx(dev, budget);
633949a1 668
bea3348e 669 if (work_done < budget) {
288379f0 670 napi_complete(napi);
633949a1
RL
671
672 iowrite16(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow |
673 IntrRxDropped | IntrRxNoBuf | IntrTxAborted |
674 IntrTxDone | IntrTxError | IntrTxUnderrun |
675 IntrPCIErr | IntrStatsMax | IntrLinkChange,
676 ioaddr + IntrEnable);
633949a1 677 }
bea3348e 678 return work_done;
633949a1 679}
633949a1 680
de4e7c88 681static void __devinit rhine_hw_init(struct net_device *dev, long pioaddr)
1da177e4
LT
682{
683 struct rhine_private *rp = netdev_priv(dev);
684
685 /* Reset the chip to erase previous misconfiguration. */
686 rhine_chip_reset(dev);
687
688 /* Rhine-I needs extra time to recuperate before EEPROM reload */
689 if (rp->quirks & rqRhineI)
690 msleep(5);
691
692 /* Reload EEPROM controlled bytes cleared by soft reset */
693 rhine_reload_eeprom(pioaddr, dev);
694}
695
5d1d07d8
SH
696static const struct net_device_ops rhine_netdev_ops = {
697 .ndo_open = rhine_open,
698 .ndo_stop = rhine_close,
699 .ndo_start_xmit = rhine_start_tx,
700 .ndo_get_stats = rhine_get_stats,
afc4b13d 701 .ndo_set_rx_mode = rhine_set_rx_mode,
635ecaa7 702 .ndo_change_mtu = eth_change_mtu,
5d1d07d8 703 .ndo_validate_addr = eth_validate_addr,
fe96aaa1 704 .ndo_set_mac_address = eth_mac_addr,
5d1d07d8
SH
705 .ndo_do_ioctl = netdev_ioctl,
706 .ndo_tx_timeout = rhine_tx_timeout,
38f49e88
RL
707 .ndo_vlan_rx_add_vid = rhine_vlan_rx_add_vid,
708 .ndo_vlan_rx_kill_vid = rhine_vlan_rx_kill_vid,
5d1d07d8
SH
709#ifdef CONFIG_NET_POLL_CONTROLLER
710 .ndo_poll_controller = rhine_poll,
711#endif
712};
713
1da177e4
LT
714static int __devinit rhine_init_one(struct pci_dev *pdev,
715 const struct pci_device_id *ent)
716{
717 struct net_device *dev;
718 struct rhine_private *rp;
719 int i, rc;
1da177e4
LT
720 u32 quirks;
721 long pioaddr;
722 long memaddr;
723 void __iomem *ioaddr;
724 int io_size, phy_id;
725 const char *name;
726#ifdef USE_MMIO
727 int bar = 1;
728#else
729 int bar = 0;
730#endif
731
732/* when built into the kernel, we only print version if device is found */
733#ifndef MODULE
df4511fe 734 pr_info_once("%s\n", version);
1da177e4
LT
735#endif
736
1da177e4
LT
737 io_size = 256;
738 phy_id = 0;
739 quirks = 0;
740 name = "Rhine";
44c10138 741 if (pdev->revision < VTunknown0) {
1da177e4
LT
742 quirks = rqRhineI;
743 io_size = 128;
744 }
44c10138 745 else if (pdev->revision >= VT6102) {
1da177e4 746 quirks = rqWOL | rqForceReset;
44c10138 747 if (pdev->revision < VT6105) {
1da177e4
LT
748 name = "Rhine II";
749 quirks |= rqStatusWBRace; /* Rhine-II exclusive */
750 }
751 else {
752 phy_id = 1; /* Integrated PHY, phy_id fixed to 1 */
44c10138 753 if (pdev->revision >= VT6105_B0)
1da177e4 754 quirks |= rq6patterns;
44c10138 755 if (pdev->revision < VT6105M)
1da177e4
LT
756 name = "Rhine III";
757 else
758 name = "Rhine III (Management Adapter)";
759 }
760 }
761
762 rc = pci_enable_device(pdev);
763 if (rc)
764 goto err_out;
765
766 /* this should always be supported */
284901a9 767 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1da177e4 768 if (rc) {
df4511fe
JP
769 dev_err(&pdev->dev,
770 "32-bit PCI DMA addresses not supported by the card!?\n");
1da177e4
LT
771 goto err_out;
772 }
773
774 /* sanity check */
775 if ((pci_resource_len(pdev, 0) < io_size) ||
776 (pci_resource_len(pdev, 1) < io_size)) {
777 rc = -EIO;
df4511fe 778 dev_err(&pdev->dev, "Insufficient PCI resources, aborting\n");
1da177e4
LT
779 goto err_out;
780 }
781
782 pioaddr = pci_resource_start(pdev, 0);
783 memaddr = pci_resource_start(pdev, 1);
784
785 pci_set_master(pdev);
786
787 dev = alloc_etherdev(sizeof(struct rhine_private));
788 if (!dev) {
789 rc = -ENOMEM;
df4511fe 790 dev_err(&pdev->dev, "alloc_etherdev failed\n");
1da177e4
LT
791 goto err_out;
792 }
1da177e4
LT
793 SET_NETDEV_DEV(dev, &pdev->dev);
794
795 rp = netdev_priv(dev);
bea3348e 796 rp->dev = dev;
1da177e4
LT
797 rp->quirks = quirks;
798 rp->pioaddr = pioaddr;
799 rp->pdev = pdev;
800
801 rc = pci_request_regions(pdev, DRV_NAME);
802 if (rc)
803 goto err_out_free_netdev;
804
805 ioaddr = pci_iomap(pdev, bar, io_size);
806 if (!ioaddr) {
807 rc = -EIO;
df4511fe
JP
808 dev_err(&pdev->dev,
809 "ioremap failed for device %s, region 0x%X @ 0x%lX\n",
810 pci_name(pdev), io_size, memaddr);
1da177e4
LT
811 goto err_out_free_res;
812 }
813
814#ifdef USE_MMIO
815 enable_mmio(pioaddr, quirks);
816
817 /* Check that selected MMIO registers match the PIO ones */
818 i = 0;
819 while (mmio_verify_registers[i]) {
820 int reg = mmio_verify_registers[i++];
821 unsigned char a = inb(pioaddr+reg);
822 unsigned char b = readb(ioaddr+reg);
823 if (a != b) {
824 rc = -EIO;
df4511fe
JP
825 dev_err(&pdev->dev,
826 "MMIO do not match PIO [%02x] (%02x != %02x)\n",
827 reg, a, b);
1da177e4
LT
828 goto err_out_unmap;
829 }
830 }
831#endif /* USE_MMIO */
832
833 dev->base_addr = (unsigned long)ioaddr;
834 rp->base = ioaddr;
835
836 /* Get chip registers into a sane state */
837 rhine_power_init(dev);
838 rhine_hw_init(dev, pioaddr);
839
840 for (i = 0; i < 6; i++)
841 dev->dev_addr[i] = ioread8(ioaddr + StationAddr + i);
842
482e3feb
JP
843 if (!is_valid_ether_addr(dev->dev_addr)) {
844 /* Report it and use a random ethernet address instead */
845 netdev_err(dev, "Invalid MAC address: %pM\n", dev->dev_addr);
846 random_ether_addr(dev->dev_addr);
847 netdev_info(dev, "Using random MAC address: %pM\n",
848 dev->dev_addr);
1da177e4 849 }
482e3feb 850 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
1da177e4
LT
851
852 /* For Rhine-I/II, phy_id is loaded from EEPROM */
853 if (!phy_id)
854 phy_id = ioread8(ioaddr + 0x6C);
855
856 dev->irq = pdev->irq;
857
858 spin_lock_init(&rp->lock);
c0d7a021
JP
859 INIT_WORK(&rp->reset_task, rhine_reset_task);
860
1da177e4
LT
861 rp->mii_if.dev = dev;
862 rp->mii_if.mdio_read = mdio_read;
863 rp->mii_if.mdio_write = mdio_write;
864 rp->mii_if.phy_id_mask = 0x1f;
865 rp->mii_if.reg_num_mask = 0x1f;
866
867 /* The chip-specific entries in the device structure. */
5d1d07d8
SH
868 dev->netdev_ops = &rhine_netdev_ops;
869 dev->ethtool_ops = &netdev_ethtool_ops,
1da177e4 870 dev->watchdog_timeo = TX_TIMEOUT;
5d1d07d8 871
bea3348e 872 netif_napi_add(dev, &rp->napi, rhine_napipoll, 64);
32b0f53e 873
1da177e4
LT
874 if (rp->quirks & rqRhineI)
875 dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM;
876
38f49e88
RL
877 if (pdev->revision >= VT6105M)
878 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
879 NETIF_F_HW_VLAN_FILTER;
880
1da177e4
LT
881 /* dev->name not defined before register_netdev()! */
882 rc = register_netdev(dev);
883 if (rc)
884 goto err_out_unmap;
885
df4511fe
JP
886 netdev_info(dev, "VIA %s at 0x%lx, %pM, IRQ %d\n",
887 name,
1da177e4 888#ifdef USE_MMIO
df4511fe 889 memaddr,
1da177e4 890#else
df4511fe 891 (long)ioaddr,
1da177e4 892#endif
df4511fe 893 dev->dev_addr, pdev->irq);
1da177e4
LT
894
895 pci_set_drvdata(pdev, dev);
896
897 {
898 u16 mii_cmd;
899 int mii_status = mdio_read(dev, phy_id, 1);
900 mii_cmd = mdio_read(dev, phy_id, MII_BMCR) & ~BMCR_ISOLATE;
901 mdio_write(dev, phy_id, MII_BMCR, mii_cmd);
902 if (mii_status != 0xffff && mii_status != 0x0000) {
903 rp->mii_if.advertising = mdio_read(dev, phy_id, 4);
df4511fe
JP
904 netdev_info(dev,
905 "MII PHY found at address %d, status 0x%04x advertising %04x Link %04x\n",
906 phy_id,
907 mii_status, rp->mii_if.advertising,
908 mdio_read(dev, phy_id, 5));
1da177e4
LT
909
910 /* set IFF_RUNNING */
911 if (mii_status & BMSR_LSTATUS)
912 netif_carrier_on(dev);
913 else
914 netif_carrier_off(dev);
915
916 }
917 }
918 rp->mii_if.phy_id = phy_id;
b933b4d9 919 if (debug > 1 && avoid_D3)
df4511fe 920 netdev_info(dev, "No D3 power state at shutdown\n");
1da177e4
LT
921
922 return 0;
923
924err_out_unmap:
925 pci_iounmap(pdev, ioaddr);
926err_out_free_res:
927 pci_release_regions(pdev);
928err_out_free_netdev:
929 free_netdev(dev);
930err_out:
931 return rc;
932}
933
934static int alloc_ring(struct net_device* dev)
935{
936 struct rhine_private *rp = netdev_priv(dev);
937 void *ring;
938 dma_addr_t ring_dma;
939
940 ring = pci_alloc_consistent(rp->pdev,
941 RX_RING_SIZE * sizeof(struct rx_desc) +
942 TX_RING_SIZE * sizeof(struct tx_desc),
943 &ring_dma);
944 if (!ring) {
df4511fe 945 netdev_err(dev, "Could not allocate DMA memory\n");
1da177e4
LT
946 return -ENOMEM;
947 }
948 if (rp->quirks & rqRhineI) {
949 rp->tx_bufs = pci_alloc_consistent(rp->pdev,
950 PKT_BUF_SZ * TX_RING_SIZE,
951 &rp->tx_bufs_dma);
952 if (rp->tx_bufs == NULL) {
953 pci_free_consistent(rp->pdev,
954 RX_RING_SIZE * sizeof(struct rx_desc) +
955 TX_RING_SIZE * sizeof(struct tx_desc),
956 ring, ring_dma);
957 return -ENOMEM;
958 }
959 }
960
961 rp->rx_ring = ring;
962 rp->tx_ring = ring + RX_RING_SIZE * sizeof(struct rx_desc);
963 rp->rx_ring_dma = ring_dma;
964 rp->tx_ring_dma = ring_dma + RX_RING_SIZE * sizeof(struct rx_desc);
965
966 return 0;
967}
968
969static void free_ring(struct net_device* dev)
970{
971 struct rhine_private *rp = netdev_priv(dev);
972
973 pci_free_consistent(rp->pdev,
974 RX_RING_SIZE * sizeof(struct rx_desc) +
975 TX_RING_SIZE * sizeof(struct tx_desc),
976 rp->rx_ring, rp->rx_ring_dma);
977 rp->tx_ring = NULL;
978
979 if (rp->tx_bufs)
980 pci_free_consistent(rp->pdev, PKT_BUF_SZ * TX_RING_SIZE,
981 rp->tx_bufs, rp->tx_bufs_dma);
982
983 rp->tx_bufs = NULL;
984
985}
986
987static void alloc_rbufs(struct net_device *dev)
988{
989 struct rhine_private *rp = netdev_priv(dev);
990 dma_addr_t next;
991 int i;
992
993 rp->dirty_rx = rp->cur_rx = 0;
994
995 rp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
996 rp->rx_head_desc = &rp->rx_ring[0];
997 next = rp->rx_ring_dma;
998
999 /* Init the ring entries */
1000 for (i = 0; i < RX_RING_SIZE; i++) {
1001 rp->rx_ring[i].rx_status = 0;
1002 rp->rx_ring[i].desc_length = cpu_to_le32(rp->rx_buf_sz);
1003 next += sizeof(struct rx_desc);
1004 rp->rx_ring[i].next_desc = cpu_to_le32(next);
1005 rp->rx_skbuff[i] = NULL;
1006 }
1007 /* Mark the last entry as wrapping the ring. */
1008 rp->rx_ring[i-1].next_desc = cpu_to_le32(rp->rx_ring_dma);
1009
1010 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
1011 for (i = 0; i < RX_RING_SIZE; i++) {
b26b555a 1012 struct sk_buff *skb = netdev_alloc_skb(dev, rp->rx_buf_sz);
1da177e4
LT
1013 rp->rx_skbuff[i] = skb;
1014 if (skb == NULL)
1015 break;
1016 skb->dev = dev; /* Mark as being used by this device. */
1017
1018 rp->rx_skbuff_dma[i] =
689be439 1019 pci_map_single(rp->pdev, skb->data, rp->rx_buf_sz,
1da177e4
LT
1020 PCI_DMA_FROMDEVICE);
1021
1022 rp->rx_ring[i].addr = cpu_to_le32(rp->rx_skbuff_dma[i]);
1023 rp->rx_ring[i].rx_status = cpu_to_le32(DescOwn);
1024 }
1025 rp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1026}
1027
1028static void free_rbufs(struct net_device* dev)
1029{
1030 struct rhine_private *rp = netdev_priv(dev);
1031 int i;
1032
1033 /* Free all the skbuffs in the Rx queue. */
1034 for (i = 0; i < RX_RING_SIZE; i++) {
1035 rp->rx_ring[i].rx_status = 0;
1036 rp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1037 if (rp->rx_skbuff[i]) {
1038 pci_unmap_single(rp->pdev,
1039 rp->rx_skbuff_dma[i],
1040 rp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1041 dev_kfree_skb(rp->rx_skbuff[i]);
1042 }
1043 rp->rx_skbuff[i] = NULL;
1044 }
1045}
1046
1047static void alloc_tbufs(struct net_device* dev)
1048{
1049 struct rhine_private *rp = netdev_priv(dev);
1050 dma_addr_t next;
1051 int i;
1052
1053 rp->dirty_tx = rp->cur_tx = 0;
1054 next = rp->tx_ring_dma;
1055 for (i = 0; i < TX_RING_SIZE; i++) {
1056 rp->tx_skbuff[i] = NULL;
1057 rp->tx_ring[i].tx_status = 0;
1058 rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
1059 next += sizeof(struct tx_desc);
1060 rp->tx_ring[i].next_desc = cpu_to_le32(next);
4be5de25
RL
1061 if (rp->quirks & rqRhineI)
1062 rp->tx_buf[i] = &rp->tx_bufs[i * PKT_BUF_SZ];
1da177e4
LT
1063 }
1064 rp->tx_ring[i-1].next_desc = cpu_to_le32(rp->tx_ring_dma);
1065
1066}
1067
1068static void free_tbufs(struct net_device* dev)
1069{
1070 struct rhine_private *rp = netdev_priv(dev);
1071 int i;
1072
1073 for (i = 0; i < TX_RING_SIZE; i++) {
1074 rp->tx_ring[i].tx_status = 0;
1075 rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
1076 rp->tx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1077 if (rp->tx_skbuff[i]) {
1078 if (rp->tx_skbuff_dma[i]) {
1079 pci_unmap_single(rp->pdev,
1080 rp->tx_skbuff_dma[i],
1081 rp->tx_skbuff[i]->len,
1082 PCI_DMA_TODEVICE);
1083 }
1084 dev_kfree_skb(rp->tx_skbuff[i]);
1085 }
1086 rp->tx_skbuff[i] = NULL;
1087 rp->tx_buf[i] = NULL;
1088 }
1089}
1090
1091static void rhine_check_media(struct net_device *dev, unsigned int init_media)
1092{
1093 struct rhine_private *rp = netdev_priv(dev);
1094 void __iomem *ioaddr = rp->base;
1095
1096 mii_check_media(&rp->mii_if, debug, init_media);
1097
1098 if (rp->mii_if.full_duplex)
1099 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1FDuplex,
1100 ioaddr + ChipCmd1);
1101 else
1102 iowrite8(ioread8(ioaddr + ChipCmd1) & ~Cmd1FDuplex,
1103 ioaddr + ChipCmd1);
00b428c2 1104 if (debug > 1)
df4511fe
JP
1105 netdev_info(dev, "force_media %d, carrier %d\n",
1106 rp->mii_if.force_media, netif_carrier_ok(dev));
00b428c2
RL
1107}
1108
1109/* Called after status of force_media possibly changed */
0761be4f 1110static void rhine_set_carrier(struct mii_if_info *mii)
00b428c2
RL
1111{
1112 if (mii->force_media) {
1113 /* autoneg is off: Link is always assumed to be up */
1114 if (!netif_carrier_ok(mii->dev))
1115 netif_carrier_on(mii->dev);
1116 }
1117 else /* Let MMI library update carrier status */
1118 rhine_check_media(mii->dev, 0);
1119 if (debug > 1)
df4511fe
JP
1120 netdev_info(mii->dev, "force_media %d, carrier %d\n",
1121 mii->force_media, netif_carrier_ok(mii->dev));
1da177e4
LT
1122}
1123
38f49e88
RL
1124/**
1125 * rhine_set_cam - set CAM multicast filters
1126 * @ioaddr: register block of this Rhine
1127 * @idx: multicast CAM index [0..MCAM_SIZE-1]
1128 * @addr: multicast address (6 bytes)
1129 *
1130 * Load addresses into multicast filters.
1131 */
1132static void rhine_set_cam(void __iomem *ioaddr, int idx, u8 *addr)
1133{
1134 int i;
1135
1136 iowrite8(CAMC_CAMEN, ioaddr + CamCon);
1137 wmb();
1138
1139 /* Paranoid -- idx out of range should never happen */
1140 idx &= (MCAM_SIZE - 1);
1141
1142 iowrite8((u8) idx, ioaddr + CamAddr);
1143
1144 for (i = 0; i < 6; i++, addr++)
1145 iowrite8(*addr, ioaddr + MulticastFilter0 + i);
1146 udelay(10);
1147 wmb();
1148
1149 iowrite8(CAMC_CAMWR | CAMC_CAMEN, ioaddr + CamCon);
1150 udelay(10);
1151
1152 iowrite8(0, ioaddr + CamCon);
1153}
1154
1155/**
1156 * rhine_set_vlan_cam - set CAM VLAN filters
1157 * @ioaddr: register block of this Rhine
1158 * @idx: VLAN CAM index [0..VCAM_SIZE-1]
1159 * @addr: VLAN ID (2 bytes)
1160 *
1161 * Load addresses into VLAN filters.
1162 */
1163static void rhine_set_vlan_cam(void __iomem *ioaddr, int idx, u8 *addr)
1164{
1165 iowrite8(CAMC_CAMEN | CAMC_VCAMSL, ioaddr + CamCon);
1166 wmb();
1167
1168 /* Paranoid -- idx out of range should never happen */
1169 idx &= (VCAM_SIZE - 1);
1170
1171 iowrite8((u8) idx, ioaddr + CamAddr);
1172
1173 iowrite16(*((u16 *) addr), ioaddr + MulticastFilter0 + 6);
1174 udelay(10);
1175 wmb();
1176
1177 iowrite8(CAMC_CAMWR | CAMC_CAMEN, ioaddr + CamCon);
1178 udelay(10);
1179
1180 iowrite8(0, ioaddr + CamCon);
1181}
1182
1183/**
1184 * rhine_set_cam_mask - set multicast CAM mask
1185 * @ioaddr: register block of this Rhine
1186 * @mask: multicast CAM mask
1187 *
1188 * Mask sets multicast filters active/inactive.
1189 */
1190static void rhine_set_cam_mask(void __iomem *ioaddr, u32 mask)
1191{
1192 iowrite8(CAMC_CAMEN, ioaddr + CamCon);
1193 wmb();
1194
1195 /* write mask */
1196 iowrite32(mask, ioaddr + CamMask);
1197
1198 /* disable CAMEN */
1199 iowrite8(0, ioaddr + CamCon);
1200}
1201
1202/**
1203 * rhine_set_vlan_cam_mask - set VLAN CAM mask
1204 * @ioaddr: register block of this Rhine
1205 * @mask: VLAN CAM mask
1206 *
1207 * Mask sets VLAN filters active/inactive.
1208 */
1209static void rhine_set_vlan_cam_mask(void __iomem *ioaddr, u32 mask)
1210{
1211 iowrite8(CAMC_CAMEN | CAMC_VCAMSL, ioaddr + CamCon);
1212 wmb();
1213
1214 /* write mask */
1215 iowrite32(mask, ioaddr + CamMask);
1216
1217 /* disable CAMEN */
1218 iowrite8(0, ioaddr + CamCon);
1219}
1220
1221/**
1222 * rhine_init_cam_filter - initialize CAM filters
1223 * @dev: network device
1224 *
1225 * Initialize (disable) hardware VLAN and multicast support on this
1226 * Rhine.
1227 */
1228static void rhine_init_cam_filter(struct net_device *dev)
1229{
1230 struct rhine_private *rp = netdev_priv(dev);
1231 void __iomem *ioaddr = rp->base;
1232
1233 /* Disable all CAMs */
1234 rhine_set_vlan_cam_mask(ioaddr, 0);
1235 rhine_set_cam_mask(ioaddr, 0);
1236
1237 /* disable hardware VLAN support */
1238 BYTE_REG_BITS_ON(TCR_PQEN, ioaddr + TxConfig);
1239 BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1);
1240}
1241
1242/**
1243 * rhine_update_vcam - update VLAN CAM filters
1244 * @rp: rhine_private data of this Rhine
1245 *
1246 * Update VLAN CAM filters to match configuration change.
1247 */
1248static void rhine_update_vcam(struct net_device *dev)
1249{
1250 struct rhine_private *rp = netdev_priv(dev);
1251 void __iomem *ioaddr = rp->base;
1252 u16 vid;
1253 u32 vCAMmask = 0; /* 32 vCAMs (6105M and better) */
1254 unsigned int i = 0;
1255
1256 for_each_set_bit(vid, rp->active_vlans, VLAN_N_VID) {
1257 rhine_set_vlan_cam(ioaddr, i, (u8 *)&vid);
1258 vCAMmask |= 1 << i;
1259 if (++i >= VCAM_SIZE)
1260 break;
1261 }
1262 rhine_set_vlan_cam_mask(ioaddr, vCAMmask);
1263}
1264
8e586137 1265static int rhine_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
38f49e88
RL
1266{
1267 struct rhine_private *rp = netdev_priv(dev);
1268
1269 spin_lock_irq(&rp->lock);
1270 set_bit(vid, rp->active_vlans);
1271 rhine_update_vcam(dev);
1272 spin_unlock_irq(&rp->lock);
8e586137 1273 return 0;
38f49e88
RL
1274}
1275
8e586137 1276static int rhine_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
38f49e88
RL
1277{
1278 struct rhine_private *rp = netdev_priv(dev);
1279
1280 spin_lock_irq(&rp->lock);
1281 clear_bit(vid, rp->active_vlans);
1282 rhine_update_vcam(dev);
1283 spin_unlock_irq(&rp->lock);
8e586137 1284 return 0;
38f49e88
RL
1285}
1286
1da177e4
LT
1287static void init_registers(struct net_device *dev)
1288{
1289 struct rhine_private *rp = netdev_priv(dev);
1290 void __iomem *ioaddr = rp->base;
1291 int i;
1292
1293 for (i = 0; i < 6; i++)
1294 iowrite8(dev->dev_addr[i], ioaddr + StationAddr + i);
1295
1296 /* Initialize other registers. */
1297 iowrite16(0x0006, ioaddr + PCIBusConfig); /* Tune configuration??? */
1298 /* Configure initial FIFO thresholds. */
1299 iowrite8(0x20, ioaddr + TxConfig);
1300 rp->tx_thresh = 0x20;
1301 rp->rx_thresh = 0x60; /* Written in rhine_set_rx_mode(). */
1302
1303 iowrite32(rp->rx_ring_dma, ioaddr + RxRingPtr);
1304 iowrite32(rp->tx_ring_dma, ioaddr + TxRingPtr);
1305
1306 rhine_set_rx_mode(dev);
1307
38f49e88
RL
1308 if (rp->pdev->revision >= VT6105M)
1309 rhine_init_cam_filter(dev);
1310
bea3348e 1311 napi_enable(&rp->napi);
ab197668 1312
1da177e4
LT
1313 /* Enable interrupts by setting the interrupt mask. */
1314 iowrite16(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow |
1315 IntrRxDropped | IntrRxNoBuf | IntrTxAborted |
1316 IntrTxDone | IntrTxError | IntrTxUnderrun |
1317 IntrPCIErr | IntrStatsMax | IntrLinkChange,
1318 ioaddr + IntrEnable);
1319
1320 iowrite16(CmdStart | CmdTxOn | CmdRxOn | (Cmd1NoTxPoll << 8),
1321 ioaddr + ChipCmd);
1322 rhine_check_media(dev, 1);
1323}
1324
1325/* Enable MII link status auto-polling (required for IntrLinkChange) */
1326static void rhine_enable_linkmon(void __iomem *ioaddr)
1327{
1328 iowrite8(0, ioaddr + MIICmd);
1329 iowrite8(MII_BMSR, ioaddr + MIIRegAddr);
1330 iowrite8(0x80, ioaddr + MIICmd);
1331
1332 RHINE_WAIT_FOR((ioread8(ioaddr + MIIRegAddr) & 0x20));
1333
1334 iowrite8(MII_BMSR | 0x40, ioaddr + MIIRegAddr);
1335}
1336
1337/* Disable MII link status auto-polling (required for MDIO access) */
1338static void rhine_disable_linkmon(void __iomem *ioaddr, u32 quirks)
1339{
1340 iowrite8(0, ioaddr + MIICmd);
1341
1342 if (quirks & rqRhineI) {
1343 iowrite8(0x01, ioaddr + MIIRegAddr); // MII_BMSR
1344
38bb6b28
JL
1345 /* Can be called from ISR. Evil. */
1346 mdelay(1);
1da177e4
LT
1347
1348 /* 0x80 must be set immediately before turning it off */
1349 iowrite8(0x80, ioaddr + MIICmd);
1350
1351 RHINE_WAIT_FOR(ioread8(ioaddr + MIIRegAddr) & 0x20);
1352
1353 /* Heh. Now clear 0x80 again. */
1354 iowrite8(0, ioaddr + MIICmd);
1355 }
1356 else
1357 RHINE_WAIT_FOR(ioread8(ioaddr + MIIRegAddr) & 0x80);
1358}
1359
1360/* Read and write over the MII Management Data I/O (MDIO) interface. */
1361
1362static int mdio_read(struct net_device *dev, int phy_id, int regnum)
1363{
1364 struct rhine_private *rp = netdev_priv(dev);
1365 void __iomem *ioaddr = rp->base;
1366 int result;
1367
1368 rhine_disable_linkmon(ioaddr, rp->quirks);
1369
1370 /* rhine_disable_linkmon already cleared MIICmd */
1371 iowrite8(phy_id, ioaddr + MIIPhyAddr);
1372 iowrite8(regnum, ioaddr + MIIRegAddr);
1373 iowrite8(0x40, ioaddr + MIICmd); /* Trigger read */
1374 RHINE_WAIT_FOR(!(ioread8(ioaddr + MIICmd) & 0x40));
1375 result = ioread16(ioaddr + MIIData);
1376
1377 rhine_enable_linkmon(ioaddr);
1378 return result;
1379}
1380
1381static void mdio_write(struct net_device *dev, int phy_id, int regnum, int value)
1382{
1383 struct rhine_private *rp = netdev_priv(dev);
1384 void __iomem *ioaddr = rp->base;
1385
1386 rhine_disable_linkmon(ioaddr, rp->quirks);
1387
1388 /* rhine_disable_linkmon already cleared MIICmd */
1389 iowrite8(phy_id, ioaddr + MIIPhyAddr);
1390 iowrite8(regnum, ioaddr + MIIRegAddr);
1391 iowrite16(value, ioaddr + MIIData);
1392 iowrite8(0x20, ioaddr + MIICmd); /* Trigger write */
1393 RHINE_WAIT_FOR(!(ioread8(ioaddr + MIICmd) & 0x20));
1394
1395 rhine_enable_linkmon(ioaddr);
1396}
1397
1398static int rhine_open(struct net_device *dev)
1399{
1400 struct rhine_private *rp = netdev_priv(dev);
1401 void __iomem *ioaddr = rp->base;
1402 int rc;
1403
76781382 1404 rc = request_irq(rp->pdev->irq, rhine_interrupt, IRQF_SHARED, dev->name,
1da177e4
LT
1405 dev);
1406 if (rc)
1407 return rc;
1408
1409 if (debug > 1)
df4511fe 1410 netdev_dbg(dev, "%s() irq %d\n", __func__, rp->pdev->irq);
1da177e4
LT
1411
1412 rc = alloc_ring(dev);
1413 if (rc) {
1414 free_irq(rp->pdev->irq, dev);
1415 return rc;
1416 }
1417 alloc_rbufs(dev);
1418 alloc_tbufs(dev);
1419 rhine_chip_reset(dev);
1420 init_registers(dev);
1421 if (debug > 2)
df4511fe
JP
1422 netdev_dbg(dev, "%s() Done - status %04x MII status: %04x\n",
1423 __func__, ioread16(ioaddr + ChipCmd),
1424 mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
1da177e4
LT
1425
1426 netif_start_queue(dev);
1427
1428 return 0;
1429}
1430
c0d7a021 1431static void rhine_reset_task(struct work_struct *work)
1da177e4 1432{
c0d7a021
JP
1433 struct rhine_private *rp = container_of(work, struct rhine_private,
1434 reset_task);
1435 struct net_device *dev = rp->dev;
1da177e4
LT
1436
1437 /* protect against concurrent rx interrupts */
1438 disable_irq(rp->pdev->irq);
1439
bea3348e 1440 napi_disable(&rp->napi);
bea3348e 1441
c0d7a021 1442 spin_lock_bh(&rp->lock);
1da177e4
LT
1443
1444 /* clear all descriptors */
1445 free_tbufs(dev);
1446 free_rbufs(dev);
1447 alloc_tbufs(dev);
1448 alloc_rbufs(dev);
1449
1450 /* Reinitialize the hardware. */
1451 rhine_chip_reset(dev);
1452 init_registers(dev);
1453
c0d7a021 1454 spin_unlock_bh(&rp->lock);
1da177e4
LT
1455 enable_irq(rp->pdev->irq);
1456
1ae5dc34 1457 dev->trans_start = jiffies; /* prevent tx timeout */
553e2335 1458 dev->stats.tx_errors++;
1da177e4
LT
1459 netif_wake_queue(dev);
1460}
1461
c0d7a021
JP
1462static void rhine_tx_timeout(struct net_device *dev)
1463{
1464 struct rhine_private *rp = netdev_priv(dev);
1465 void __iomem *ioaddr = rp->base;
1466
df4511fe
JP
1467 netdev_warn(dev, "Transmit timed out, status %04x, PHY status %04x, resetting...\n",
1468 ioread16(ioaddr + IntrStatus),
1469 mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
c0d7a021
JP
1470
1471 schedule_work(&rp->reset_task);
1472}
1473
61357325
SH
1474static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
1475 struct net_device *dev)
1da177e4
LT
1476{
1477 struct rhine_private *rp = netdev_priv(dev);
1478 void __iomem *ioaddr = rp->base;
1479 unsigned entry;
22580f89 1480 unsigned long flags;
1da177e4
LT
1481
1482 /* Caution: the write order is important here, set the field
1483 with the "ownership" bits last. */
1484
1485 /* Calculate the next Tx descriptor entry. */
1486 entry = rp->cur_tx % TX_RING_SIZE;
1487
5b057c6b 1488 if (skb_padto(skb, ETH_ZLEN))
6ed10654 1489 return NETDEV_TX_OK;
1da177e4
LT
1490
1491 rp->tx_skbuff[entry] = skb;
1492
1493 if ((rp->quirks & rqRhineI) &&
84fa7933 1494 (((unsigned long)skb->data & 3) || skb_shinfo(skb)->nr_frags != 0 || skb->ip_summed == CHECKSUM_PARTIAL)) {
1da177e4
LT
1495 /* Must use alignment buffer. */
1496 if (skb->len > PKT_BUF_SZ) {
1497 /* packet too long, drop it */
1498 dev_kfree_skb(skb);
1499 rp->tx_skbuff[entry] = NULL;
553e2335 1500 dev->stats.tx_dropped++;
6ed10654 1501 return NETDEV_TX_OK;
1da177e4 1502 }
3e0d167a
CB
1503
1504 /* Padding is not copied and so must be redone. */
1da177e4 1505 skb_copy_and_csum_dev(skb, rp->tx_buf[entry]);
3e0d167a
CB
1506 if (skb->len < ETH_ZLEN)
1507 memset(rp->tx_buf[entry] + skb->len, 0,
1508 ETH_ZLEN - skb->len);
1da177e4
LT
1509 rp->tx_skbuff_dma[entry] = 0;
1510 rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_bufs_dma +
1511 (rp->tx_buf[entry] -
1512 rp->tx_bufs));
1513 } else {
1514 rp->tx_skbuff_dma[entry] =
1515 pci_map_single(rp->pdev, skb->data, skb->len,
1516 PCI_DMA_TODEVICE);
1517 rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_skbuff_dma[entry]);
1518 }
1519
1520 rp->tx_ring[entry].desc_length =
1521 cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
1522
38f49e88
RL
1523 if (unlikely(vlan_tx_tag_present(skb))) {
1524 rp->tx_ring[entry].tx_status = cpu_to_le32((vlan_tx_tag_get(skb)) << 16);
1525 /* request tagging */
1526 rp->tx_ring[entry].desc_length |= cpu_to_le32(0x020000);
1527 }
1528 else
1529 rp->tx_ring[entry].tx_status = 0;
1530
1da177e4 1531 /* lock eth irq */
22580f89 1532 spin_lock_irqsave(&rp->lock, flags);
1da177e4 1533 wmb();
38f49e88 1534 rp->tx_ring[entry].tx_status |= cpu_to_le32(DescOwn);
1da177e4
LT
1535 wmb();
1536
1537 rp->cur_tx++;
1538
1539 /* Non-x86 Todo: explicitly flush cache lines here. */
1540
38f49e88
RL
1541 if (vlan_tx_tag_present(skb))
1542 /* Tx queues are bits 7-0 (first Tx queue: bit 7) */
1543 BYTE_REG_BITS_ON(1 << 7, ioaddr + TQWake);
1544
1da177e4
LT
1545 /* Wake the potentially-idle transmit channel */
1546 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
1547 ioaddr + ChipCmd1);
1548 IOSYNC;
1549
1550 if (rp->cur_tx == rp->dirty_tx + TX_QUEUE_LEN)
1551 netif_stop_queue(dev);
1552
22580f89 1553 spin_unlock_irqrestore(&rp->lock, flags);
1da177e4
LT
1554
1555 if (debug > 4) {
df4511fe
JP
1556 netdev_dbg(dev, "Transmit frame #%d queued in slot %d\n",
1557 rp->cur_tx-1, entry);
1da177e4 1558 }
6ed10654 1559 return NETDEV_TX_OK;
1da177e4
LT
1560}
1561
1562/* The interrupt handler does all of the Rx thread work and cleans up
1563 after the Tx thread. */
7d12e780 1564static irqreturn_t rhine_interrupt(int irq, void *dev_instance)
1da177e4
LT
1565{
1566 struct net_device *dev = dev_instance;
1567 struct rhine_private *rp = netdev_priv(dev);
1568 void __iomem *ioaddr = rp->base;
1569 u32 intr_status;
1570 int boguscnt = max_interrupt_work;
1571 int handled = 0;
1572
1573 while ((intr_status = get_intr_status(dev))) {
1574 handled = 1;
1575
1576 /* Acknowledge all of the current interrupt sources ASAP. */
1577 if (intr_status & IntrTxDescRace)
1578 iowrite8(0x08, ioaddr + IntrStatus2);
1579 iowrite16(intr_status & 0xffff, ioaddr + IntrStatus);
1580 IOSYNC;
1581
1582 if (debug > 4)
df4511fe
JP
1583 netdev_dbg(dev, "Interrupt, status %08x\n",
1584 intr_status);
1da177e4
LT
1585
1586 if (intr_status & (IntrRxDone | IntrRxErr | IntrRxDropped |
633949a1 1587 IntrRxWakeUp | IntrRxEmpty | IntrRxNoBuf)) {
633949a1
RL
1588 iowrite16(IntrTxAborted |
1589 IntrTxDone | IntrTxError | IntrTxUnderrun |
1590 IntrPCIErr | IntrStatsMax | IntrLinkChange,
1591 ioaddr + IntrEnable);
1592
288379f0 1593 napi_schedule(&rp->napi);
633949a1 1594 }
1da177e4
LT
1595
1596 if (intr_status & (IntrTxErrSummary | IntrTxDone)) {
1597 if (intr_status & IntrTxErrSummary) {
1598 /* Avoid scavenging before Tx engine turned off */
1599 RHINE_WAIT_FOR(!(ioread8(ioaddr+ChipCmd) & CmdTxOn));
1600 if (debug > 2 &&
1601 ioread8(ioaddr+ChipCmd) & CmdTxOn)
df4511fe
JP
1602 netdev_warn(dev,
1603 "%s: Tx engine still on\n",
1604 __func__);
1da177e4
LT
1605 }
1606 rhine_tx(dev);
1607 }
1608
1609 /* Abnormal error summary/uncommon events handlers. */
1610 if (intr_status & (IntrPCIErr | IntrLinkChange |
1611 IntrStatsMax | IntrTxError | IntrTxAborted |
1612 IntrTxUnderrun | IntrTxDescRace))
1613 rhine_error(dev, intr_status);
1614
1615 if (--boguscnt < 0) {
df4511fe
JP
1616 netdev_warn(dev, "Too much work at interrupt, status=%#08x\n",
1617 intr_status);
1da177e4
LT
1618 break;
1619 }
1620 }
1621
1622 if (debug > 3)
df4511fe
JP
1623 netdev_dbg(dev, "exiting interrupt, status=%08x\n",
1624 ioread16(ioaddr + IntrStatus));
1da177e4
LT
1625 return IRQ_RETVAL(handled);
1626}
1627
1628/* This routine is logically part of the interrupt handler, but isolated
1629 for clarity. */
1630static void rhine_tx(struct net_device *dev)
1631{
1632 struct rhine_private *rp = netdev_priv(dev);
1633 int txstatus = 0, entry = rp->dirty_tx % TX_RING_SIZE;
1634
1635 spin_lock(&rp->lock);
1636
1637 /* find and cleanup dirty tx descriptors */
1638 while (rp->dirty_tx != rp->cur_tx) {
1639 txstatus = le32_to_cpu(rp->tx_ring[entry].tx_status);
1640 if (debug > 6)
df4511fe
JP
1641 netdev_dbg(dev, "Tx scavenge %d status %08x\n",
1642 entry, txstatus);
1da177e4
LT
1643 if (txstatus & DescOwn)
1644 break;
1645 if (txstatus & 0x8000) {
1646 if (debug > 1)
df4511fe
JP
1647 netdev_dbg(dev, "Transmit error, Tx status %08x\n",
1648 txstatus);
553e2335
ED
1649 dev->stats.tx_errors++;
1650 if (txstatus & 0x0400)
1651 dev->stats.tx_carrier_errors++;
1652 if (txstatus & 0x0200)
1653 dev->stats.tx_window_errors++;
1654 if (txstatus & 0x0100)
1655 dev->stats.tx_aborted_errors++;
1656 if (txstatus & 0x0080)
1657 dev->stats.tx_heartbeat_errors++;
1da177e4
LT
1658 if (((rp->quirks & rqRhineI) && txstatus & 0x0002) ||
1659 (txstatus & 0x0800) || (txstatus & 0x1000)) {
553e2335 1660 dev->stats.tx_fifo_errors++;
1da177e4
LT
1661 rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
1662 break; /* Keep the skb - we try again */
1663 }
1664 /* Transmitter restarted in 'abnormal' handler. */
1665 } else {
1666 if (rp->quirks & rqRhineI)
553e2335 1667 dev->stats.collisions += (txstatus >> 3) & 0x0F;
1da177e4 1668 else
553e2335 1669 dev->stats.collisions += txstatus & 0x0F;
1da177e4 1670 if (debug > 6)
df4511fe
JP
1671 netdev_dbg(dev, "collisions: %1.1x:%1.1x\n",
1672 (txstatus >> 3) & 0xF,
1673 txstatus & 0xF);
553e2335
ED
1674 dev->stats.tx_bytes += rp->tx_skbuff[entry]->len;
1675 dev->stats.tx_packets++;
1da177e4
LT
1676 }
1677 /* Free the original skb. */
1678 if (rp->tx_skbuff_dma[entry]) {
1679 pci_unmap_single(rp->pdev,
1680 rp->tx_skbuff_dma[entry],
1681 rp->tx_skbuff[entry]->len,
1682 PCI_DMA_TODEVICE);
1683 }
1684 dev_kfree_skb_irq(rp->tx_skbuff[entry]);
1685 rp->tx_skbuff[entry] = NULL;
1686 entry = (++rp->dirty_tx) % TX_RING_SIZE;
1687 }
1688 if ((rp->cur_tx - rp->dirty_tx) < TX_QUEUE_LEN - 4)
1689 netif_wake_queue(dev);
1690
1691 spin_unlock(&rp->lock);
1692}
1693
38f49e88
RL
1694/**
1695 * rhine_get_vlan_tci - extract TCI from Rx data buffer
1696 * @skb: pointer to sk_buff
1697 * @data_size: used data area of the buffer including CRC
1698 *
1699 * If hardware VLAN tag extraction is enabled and the chip indicates a 802.1Q
1700 * packet, the extracted 802.1Q header (2 bytes TPID + 2 bytes TCI) is 4-byte
1701 * aligned following the CRC.
1702 */
1703static inline u16 rhine_get_vlan_tci(struct sk_buff *skb, int data_size)
1704{
1705 u8 *trailer = (u8 *)skb->data + ((data_size + 3) & ~3) + 2;
4562b2fe 1706 return be16_to_cpup((__be16 *)trailer);
38f49e88
RL
1707}
1708
633949a1
RL
1709/* Process up to limit frames from receive ring */
1710static int rhine_rx(struct net_device *dev, int limit)
1da177e4
LT
1711{
1712 struct rhine_private *rp = netdev_priv(dev);
633949a1 1713 int count;
1da177e4 1714 int entry = rp->cur_rx % RX_RING_SIZE;
1da177e4
LT
1715
1716 if (debug > 4) {
df4511fe
JP
1717 netdev_dbg(dev, "%s(), entry %d status %08x\n",
1718 __func__, entry,
1719 le32_to_cpu(rp->rx_head_desc->rx_status));
1da177e4
LT
1720 }
1721
1722 /* If EOP is set on the next entry, it's a new packet. Send it up. */
633949a1 1723 for (count = 0; count < limit; ++count) {
1da177e4
LT
1724 struct rx_desc *desc = rp->rx_head_desc;
1725 u32 desc_status = le32_to_cpu(desc->rx_status);
38f49e88 1726 u32 desc_length = le32_to_cpu(desc->desc_length);
1da177e4
LT
1727 int data_size = desc_status >> 16;
1728
633949a1
RL
1729 if (desc_status & DescOwn)
1730 break;
1731
1da177e4 1732 if (debug > 4)
df4511fe
JP
1733 netdev_dbg(dev, "%s() status is %08x\n",
1734 __func__, desc_status);
633949a1 1735
1da177e4
LT
1736 if ((desc_status & (RxWholePkt | RxErr)) != RxWholePkt) {
1737 if ((desc_status & RxWholePkt) != RxWholePkt) {
df4511fe
JP
1738 netdev_warn(dev,
1739 "Oversized Ethernet frame spanned multiple buffers, "
1740 "entry %#x length %d status %08x!\n",
1741 entry, data_size,
1742 desc_status);
1743 netdev_warn(dev,
1744 "Oversized Ethernet frame %p vs %p\n",
1745 rp->rx_head_desc,
1746 &rp->rx_ring[entry]);
553e2335 1747 dev->stats.rx_length_errors++;
1da177e4
LT
1748 } else if (desc_status & RxErr) {
1749 /* There was a error. */
1750 if (debug > 2)
df4511fe
JP
1751 netdev_dbg(dev, "%s() Rx error was %08x\n",
1752 __func__, desc_status);
553e2335
ED
1753 dev->stats.rx_errors++;
1754 if (desc_status & 0x0030)
1755 dev->stats.rx_length_errors++;
1756 if (desc_status & 0x0048)
1757 dev->stats.rx_fifo_errors++;
1758 if (desc_status & 0x0004)
1759 dev->stats.rx_frame_errors++;
1da177e4
LT
1760 if (desc_status & 0x0002) {
1761 /* this can also be updated outside the interrupt handler */
1762 spin_lock(&rp->lock);
553e2335 1763 dev->stats.rx_crc_errors++;
1da177e4
LT
1764 spin_unlock(&rp->lock);
1765 }
1766 }
1767 } else {
89d71a66 1768 struct sk_buff *skb = NULL;
1da177e4
LT
1769 /* Length should omit the CRC */
1770 int pkt_len = data_size - 4;
38f49e88 1771 u16 vlan_tci = 0;
1da177e4
LT
1772
1773 /* Check if the packet is long enough to accept without
1774 copying to a minimally-sized skbuff. */
89d71a66
ED
1775 if (pkt_len < rx_copybreak)
1776 skb = netdev_alloc_skb_ip_align(dev, pkt_len);
1777 if (skb) {
1da177e4
LT
1778 pci_dma_sync_single_for_cpu(rp->pdev,
1779 rp->rx_skbuff_dma[entry],
1780 rp->rx_buf_sz,
1781 PCI_DMA_FROMDEVICE);
1782
8c7b7faa 1783 skb_copy_to_linear_data(skb,
689be439 1784 rp->rx_skbuff[entry]->data,
8c7b7faa 1785 pkt_len);
1da177e4
LT
1786 skb_put(skb, pkt_len);
1787 pci_dma_sync_single_for_device(rp->pdev,
1788 rp->rx_skbuff_dma[entry],
1789 rp->rx_buf_sz,
1790 PCI_DMA_FROMDEVICE);
1791 } else {
1792 skb = rp->rx_skbuff[entry];
1793 if (skb == NULL) {
df4511fe 1794 netdev_err(dev, "Inconsistent Rx descriptor chain\n");
1da177e4
LT
1795 break;
1796 }
1797 rp->rx_skbuff[entry] = NULL;
1798 skb_put(skb, pkt_len);
1799 pci_unmap_single(rp->pdev,
1800 rp->rx_skbuff_dma[entry],
1801 rp->rx_buf_sz,
1802 PCI_DMA_FROMDEVICE);
1803 }
38f49e88
RL
1804
1805 if (unlikely(desc_length & DescTag))
1806 vlan_tci = rhine_get_vlan_tci(skb, data_size);
1807
1da177e4 1808 skb->protocol = eth_type_trans(skb, dev);
38f49e88
RL
1809
1810 if (unlikely(desc_length & DescTag))
1811 __vlan_hwaccel_put_tag(skb, vlan_tci);
633949a1 1812 netif_receive_skb(skb);
553e2335
ED
1813 dev->stats.rx_bytes += pkt_len;
1814 dev->stats.rx_packets++;
1da177e4
LT
1815 }
1816 entry = (++rp->cur_rx) % RX_RING_SIZE;
1817 rp->rx_head_desc = &rp->rx_ring[entry];
1818 }
1819
1820 /* Refill the Rx ring buffers. */
1821 for (; rp->cur_rx - rp->dirty_rx > 0; rp->dirty_rx++) {
1822 struct sk_buff *skb;
1823 entry = rp->dirty_rx % RX_RING_SIZE;
1824 if (rp->rx_skbuff[entry] == NULL) {
b26b555a 1825 skb = netdev_alloc_skb(dev, rp->rx_buf_sz);
1da177e4
LT
1826 rp->rx_skbuff[entry] = skb;
1827 if (skb == NULL)
1828 break; /* Better luck next round. */
1829 skb->dev = dev; /* Mark as being used by this device. */
1830 rp->rx_skbuff_dma[entry] =
689be439 1831 pci_map_single(rp->pdev, skb->data,
1da177e4
LT
1832 rp->rx_buf_sz,
1833 PCI_DMA_FROMDEVICE);
1834 rp->rx_ring[entry].addr = cpu_to_le32(rp->rx_skbuff_dma[entry]);
1835 }
1836 rp->rx_ring[entry].rx_status = cpu_to_le32(DescOwn);
1837 }
633949a1
RL
1838
1839 return count;
1da177e4
LT
1840}
1841
1842/*
1843 * Clears the "tally counters" for CRC errors and missed frames(?).
1844 * It has been reported that some chips need a write of 0 to clear
1845 * these, for others the counters are set to 1 when written to and
1846 * instead cleared when read. So we clear them both ways ...
1847 */
1848static inline void clear_tally_counters(void __iomem *ioaddr)
1849{
1850 iowrite32(0, ioaddr + RxMissed);
1851 ioread16(ioaddr + RxCRCErrs);
1852 ioread16(ioaddr + RxMissed);
1853}
1854
1855static void rhine_restart_tx(struct net_device *dev) {
1856 struct rhine_private *rp = netdev_priv(dev);
1857 void __iomem *ioaddr = rp->base;
1858 int entry = rp->dirty_tx % TX_RING_SIZE;
1859 u32 intr_status;
1860
1861 /*
25985edc 1862 * If new errors occurred, we need to sort them out before doing Tx.
1da177e4
LT
1863 * In that case the ISR will be back here RSN anyway.
1864 */
1865 intr_status = get_intr_status(dev);
1866
1867 if ((intr_status & IntrTxErrSummary) == 0) {
1868
1869 /* We know better than the chip where it should continue. */
1870 iowrite32(rp->tx_ring_dma + entry * sizeof(struct tx_desc),
1871 ioaddr + TxRingPtr);
1872
1873 iowrite8(ioread8(ioaddr + ChipCmd) | CmdTxOn,
1874 ioaddr + ChipCmd);
38f49e88
RL
1875
1876 if (rp->tx_ring[entry].desc_length & cpu_to_le32(0x020000))
1877 /* Tx queues are bits 7-0 (first Tx queue: bit 7) */
1878 BYTE_REG_BITS_ON(1 << 7, ioaddr + TQWake);
1879
1da177e4
LT
1880 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
1881 ioaddr + ChipCmd1);
1882 IOSYNC;
1883 }
1884 else {
1885 /* This should never happen */
1886 if (debug > 1)
df4511fe
JP
1887 netdev_warn(dev, "%s() Another error occurred %08x\n",
1888 __func__, intr_status);
1da177e4
LT
1889 }
1890
1891}
1892
1893static void rhine_error(struct net_device *dev, int intr_status)
1894{
1895 struct rhine_private *rp = netdev_priv(dev);
1896 void __iomem *ioaddr = rp->base;
1897
1898 spin_lock(&rp->lock);
1899
1900 if (intr_status & IntrLinkChange)
38bb6b28 1901 rhine_check_media(dev, 0);
1da177e4 1902 if (intr_status & IntrStatsMax) {
553e2335
ED
1903 dev->stats.rx_crc_errors += ioread16(ioaddr + RxCRCErrs);
1904 dev->stats.rx_missed_errors += ioread16(ioaddr + RxMissed);
1da177e4
LT
1905 clear_tally_counters(ioaddr);
1906 }
1907 if (intr_status & IntrTxAborted) {
1908 if (debug > 1)
df4511fe
JP
1909 netdev_info(dev, "Abort %08x, frame dropped\n",
1910 intr_status);
1da177e4
LT
1911 }
1912 if (intr_status & IntrTxUnderrun) {
1913 if (rp->tx_thresh < 0xE0)
38f49e88 1914 BYTE_REG_BITS_SET((rp->tx_thresh += 0x20), 0x80, ioaddr + TxConfig);
1da177e4 1915 if (debug > 1)
df4511fe
JP
1916 netdev_info(dev, "Transmitter underrun, Tx threshold now %02x\n",
1917 rp->tx_thresh);
1da177e4
LT
1918 }
1919 if (intr_status & IntrTxDescRace) {
1920 if (debug > 2)
df4511fe 1921 netdev_info(dev, "Tx descriptor write-back race\n");
1da177e4
LT
1922 }
1923 if ((intr_status & IntrTxError) &&
1924 (intr_status & (IntrTxAborted |
1925 IntrTxUnderrun | IntrTxDescRace)) == 0) {
1926 if (rp->tx_thresh < 0xE0) {
38f49e88 1927 BYTE_REG_BITS_SET((rp->tx_thresh += 0x20), 0x80, ioaddr + TxConfig);
1da177e4
LT
1928 }
1929 if (debug > 1)
df4511fe
JP
1930 netdev_info(dev, "Unspecified error. Tx threshold now %02x\n",
1931 rp->tx_thresh);
1da177e4
LT
1932 }
1933 if (intr_status & (IntrTxAborted | IntrTxUnderrun | IntrTxDescRace |
1934 IntrTxError))
1935 rhine_restart_tx(dev);
1936
1937 if (intr_status & ~(IntrLinkChange | IntrStatsMax | IntrTxUnderrun |
1938 IntrTxError | IntrTxAborted | IntrNormalSummary |
1939 IntrTxDescRace)) {
1940 if (debug > 1)
df4511fe
JP
1941 netdev_err(dev, "Something Wicked happened! %08x\n",
1942 intr_status);
1da177e4
LT
1943 }
1944
1945 spin_unlock(&rp->lock);
1946}
1947
1948static struct net_device_stats *rhine_get_stats(struct net_device *dev)
1949{
1950 struct rhine_private *rp = netdev_priv(dev);
1951 void __iomem *ioaddr = rp->base;
1952 unsigned long flags;
1953
1954 spin_lock_irqsave(&rp->lock, flags);
553e2335
ED
1955 dev->stats.rx_crc_errors += ioread16(ioaddr + RxCRCErrs);
1956 dev->stats.rx_missed_errors += ioread16(ioaddr + RxMissed);
1da177e4
LT
1957 clear_tally_counters(ioaddr);
1958 spin_unlock_irqrestore(&rp->lock, flags);
1959
553e2335 1960 return &dev->stats;
1da177e4
LT
1961}
1962
1963static void rhine_set_rx_mode(struct net_device *dev)
1964{
1965 struct rhine_private *rp = netdev_priv(dev);
1966 void __iomem *ioaddr = rp->base;
1967 u32 mc_filter[2]; /* Multicast hash filter */
38f49e88
RL
1968 u8 rx_mode = 0x0C; /* Note: 0x02=accept runt, 0x01=accept errs */
1969 struct netdev_hw_addr *ha;
1da177e4
LT
1970
1971 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1da177e4
LT
1972 rx_mode = 0x1C;
1973 iowrite32(0xffffffff, ioaddr + MulticastFilter0);
1974 iowrite32(0xffffffff, ioaddr + MulticastFilter1);
4cd24eaf 1975 } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
8e95a202 1976 (dev->flags & IFF_ALLMULTI)) {
1da177e4
LT
1977 /* Too many to match, or accept all multicasts. */
1978 iowrite32(0xffffffff, ioaddr + MulticastFilter0);
1979 iowrite32(0xffffffff, ioaddr + MulticastFilter1);
38f49e88
RL
1980 } else if (rp->pdev->revision >= VT6105M) {
1981 int i = 0;
1982 u32 mCAMmask = 0; /* 32 mCAMs (6105M and better) */
1983 netdev_for_each_mc_addr(ha, dev) {
1984 if (i == MCAM_SIZE)
1985 break;
1986 rhine_set_cam(ioaddr, i, ha->addr);
1987 mCAMmask |= 1 << i;
1988 i++;
1989 }
1990 rhine_set_cam_mask(ioaddr, mCAMmask);
1da177e4 1991 } else {
1da177e4 1992 memset(mc_filter, 0, sizeof(mc_filter));
22bedad3
JP
1993 netdev_for_each_mc_addr(ha, dev) {
1994 int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
1da177e4
LT
1995
1996 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
1997 }
1998 iowrite32(mc_filter[0], ioaddr + MulticastFilter0);
1999 iowrite32(mc_filter[1], ioaddr + MulticastFilter1);
1da177e4 2000 }
38f49e88
RL
2001 /* enable/disable VLAN receive filtering */
2002 if (rp->pdev->revision >= VT6105M) {
2003 if (dev->flags & IFF_PROMISC)
2004 BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1);
2005 else
2006 BYTE_REG_BITS_ON(BCR1_VIDFR, ioaddr + PCIBusConfig1);
2007 }
2008 BYTE_REG_BITS_ON(rx_mode, ioaddr + RxConfig);
1da177e4
LT
2009}
2010
2011static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
2012{
2013 struct rhine_private *rp = netdev_priv(dev);
2014
23020ab3
RJ
2015 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
2016 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
2017 strlcpy(info->bus_info, pci_name(rp->pdev), sizeof(info->bus_info));
1da177e4
LT
2018}
2019
2020static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2021{
2022 struct rhine_private *rp = netdev_priv(dev);
2023 int rc;
2024
2025 spin_lock_irq(&rp->lock);
2026 rc = mii_ethtool_gset(&rp->mii_if, cmd);
2027 spin_unlock_irq(&rp->lock);
2028
2029 return rc;
2030}
2031
2032static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2033{
2034 struct rhine_private *rp = netdev_priv(dev);
2035 int rc;
2036
2037 spin_lock_irq(&rp->lock);
2038 rc = mii_ethtool_sset(&rp->mii_if, cmd);
2039 spin_unlock_irq(&rp->lock);
00b428c2 2040 rhine_set_carrier(&rp->mii_if);
1da177e4
LT
2041
2042 return rc;
2043}
2044
2045static int netdev_nway_reset(struct net_device *dev)
2046{
2047 struct rhine_private *rp = netdev_priv(dev);
2048
2049 return mii_nway_restart(&rp->mii_if);
2050}
2051
2052static u32 netdev_get_link(struct net_device *dev)
2053{
2054 struct rhine_private *rp = netdev_priv(dev);
2055
2056 return mii_link_ok(&rp->mii_if);
2057}
2058
2059static u32 netdev_get_msglevel(struct net_device *dev)
2060{
2061 return debug;
2062}
2063
2064static void netdev_set_msglevel(struct net_device *dev, u32 value)
2065{
2066 debug = value;
2067}
2068
2069static void rhine_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2070{
2071 struct rhine_private *rp = netdev_priv(dev);
2072
2073 if (!(rp->quirks & rqWOL))
2074 return;
2075
2076 spin_lock_irq(&rp->lock);
2077 wol->supported = WAKE_PHY | WAKE_MAGIC |
2078 WAKE_UCAST | WAKE_MCAST | WAKE_BCAST; /* Untested */
2079 wol->wolopts = rp->wolopts;
2080 spin_unlock_irq(&rp->lock);
2081}
2082
2083static int rhine_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2084{
2085 struct rhine_private *rp = netdev_priv(dev);
2086 u32 support = WAKE_PHY | WAKE_MAGIC |
2087 WAKE_UCAST | WAKE_MCAST | WAKE_BCAST; /* Untested */
2088
2089 if (!(rp->quirks & rqWOL))
2090 return -EINVAL;
2091
2092 if (wol->wolopts & ~support)
2093 return -EINVAL;
2094
2095 spin_lock_irq(&rp->lock);
2096 rp->wolopts = wol->wolopts;
2097 spin_unlock_irq(&rp->lock);
2098
2099 return 0;
2100}
2101
7282d491 2102static const struct ethtool_ops netdev_ethtool_ops = {
1da177e4
LT
2103 .get_drvinfo = netdev_get_drvinfo,
2104 .get_settings = netdev_get_settings,
2105 .set_settings = netdev_set_settings,
2106 .nway_reset = netdev_nway_reset,
2107 .get_link = netdev_get_link,
2108 .get_msglevel = netdev_get_msglevel,
2109 .set_msglevel = netdev_set_msglevel,
2110 .get_wol = rhine_get_wol,
2111 .set_wol = rhine_set_wol,
1da177e4
LT
2112};
2113
2114static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2115{
2116 struct rhine_private *rp = netdev_priv(dev);
2117 int rc;
2118
2119 if (!netif_running(dev))
2120 return -EINVAL;
2121
2122 spin_lock_irq(&rp->lock);
2123 rc = generic_mii_ioctl(&rp->mii_if, if_mii(rq), cmd, NULL);
2124 spin_unlock_irq(&rp->lock);
00b428c2 2125 rhine_set_carrier(&rp->mii_if);
1da177e4
LT
2126
2127 return rc;
2128}
2129
2130static int rhine_close(struct net_device *dev)
2131{
2132 struct rhine_private *rp = netdev_priv(dev);
2133 void __iomem *ioaddr = rp->base;
2134
bea3348e 2135 napi_disable(&rp->napi);
c0d7a021
JP
2136 cancel_work_sync(&rp->reset_task);
2137 netif_stop_queue(dev);
2138
2139 spin_lock_irq(&rp->lock);
1da177e4
LT
2140
2141 if (debug > 1)
df4511fe
JP
2142 netdev_dbg(dev, "Shutting down ethercard, status was %04x\n",
2143 ioread16(ioaddr + ChipCmd));
1da177e4
LT
2144
2145 /* Switch to loopback mode to avoid hardware races. */
2146 iowrite8(rp->tx_thresh | 0x02, ioaddr + TxConfig);
2147
2148 /* Disable interrupts by clearing the interrupt mask. */
2149 iowrite16(0x0000, ioaddr + IntrEnable);
2150
2151 /* Stop the chip's Tx and Rx processes. */
2152 iowrite16(CmdStop, ioaddr + ChipCmd);
2153
2154 spin_unlock_irq(&rp->lock);
2155
2156 free_irq(rp->pdev->irq, dev);
2157 free_rbufs(dev);
2158 free_tbufs(dev);
2159 free_ring(dev);
2160
2161 return 0;
2162}
2163
2164
2165static void __devexit rhine_remove_one(struct pci_dev *pdev)
2166{
2167 struct net_device *dev = pci_get_drvdata(pdev);
2168 struct rhine_private *rp = netdev_priv(dev);
2169
2170 unregister_netdev(dev);
2171
2172 pci_iounmap(pdev, rp->base);
2173 pci_release_regions(pdev);
2174
2175 free_netdev(dev);
2176 pci_disable_device(pdev);
2177 pci_set_drvdata(pdev, NULL);
2178}
2179
d18c3db5 2180static void rhine_shutdown (struct pci_dev *pdev)
1da177e4 2181{
1da177e4
LT
2182 struct net_device *dev = pci_get_drvdata(pdev);
2183 struct rhine_private *rp = netdev_priv(dev);
2184 void __iomem *ioaddr = rp->base;
2185
2186 if (!(rp->quirks & rqWOL))
2187 return; /* Nothing to do for non-WOL adapters */
2188
2189 rhine_power_init(dev);
2190
2191 /* Make sure we use pattern 0, 1 and not 4, 5 */
2192 if (rp->quirks & rq6patterns)
f11cf25e 2193 iowrite8(0x04, ioaddr + WOLcgClr);
1da177e4
LT
2194
2195 if (rp->wolopts & WAKE_MAGIC) {
2196 iowrite8(WOLmagic, ioaddr + WOLcrSet);
2197 /*
2198 * Turn EEPROM-controlled wake-up back on -- some hardware may
2199 * not cooperate otherwise.
2200 */
2201 iowrite8(ioread8(ioaddr + ConfigA) | 0x03, ioaddr + ConfigA);
2202 }
2203
2204 if (rp->wolopts & (WAKE_BCAST|WAKE_MCAST))
2205 iowrite8(WOLbmcast, ioaddr + WOLcgSet);
2206
2207 if (rp->wolopts & WAKE_PHY)
2208 iowrite8(WOLlnkon | WOLlnkoff, ioaddr + WOLcrSet);
2209
2210 if (rp->wolopts & WAKE_UCAST)
2211 iowrite8(WOLucast, ioaddr + WOLcrSet);
2212
2213 if (rp->wolopts) {
2214 /* Enable legacy WOL (for old motherboards) */
2215 iowrite8(0x01, ioaddr + PwcfgSet);
2216 iowrite8(ioread8(ioaddr + StickyHW) | 0x04, ioaddr + StickyHW);
2217 }
2218
2219 /* Hit power state D3 (sleep) */
b933b4d9
RL
2220 if (!avoid_D3)
2221 iowrite8(ioread8(ioaddr + StickyHW) | 0x03, ioaddr + StickyHW);
1da177e4
LT
2222
2223 /* TODO: Check use of pci_enable_wake() */
2224
2225}
2226
2227#ifdef CONFIG_PM
2228static int rhine_suspend(struct pci_dev *pdev, pm_message_t state)
2229{
2230 struct net_device *dev = pci_get_drvdata(pdev);
2231 struct rhine_private *rp = netdev_priv(dev);
2232 unsigned long flags;
2233
2234 if (!netif_running(dev))
2235 return 0;
2236
bea3348e 2237 napi_disable(&rp->napi);
32b0f53e 2238
1da177e4
LT
2239 netif_device_detach(dev);
2240 pci_save_state(pdev);
2241
2242 spin_lock_irqsave(&rp->lock, flags);
d18c3db5 2243 rhine_shutdown(pdev);
1da177e4
LT
2244 spin_unlock_irqrestore(&rp->lock, flags);
2245
2246 free_irq(dev->irq, dev);
2247 return 0;
2248}
2249
2250static int rhine_resume(struct pci_dev *pdev)
2251{
2252 struct net_device *dev = pci_get_drvdata(pdev);
2253 struct rhine_private *rp = netdev_priv(dev);
2254 unsigned long flags;
2255 int ret;
2256
2257 if (!netif_running(dev))
2258 return 0;
2259
38f49e88 2260 if (request_irq(dev->irq, rhine_interrupt, IRQF_SHARED, dev->name, dev))
df4511fe 2261 netdev_err(dev, "request_irq failed\n");
1da177e4
LT
2262
2263 ret = pci_set_power_state(pdev, PCI_D0);
2264 if (debug > 1)
df4511fe
JP
2265 netdev_info(dev, "Entering power state D0 %s (%d)\n",
2266 ret ? "failed" : "succeeded", ret);
1da177e4
LT
2267
2268 pci_restore_state(pdev);
2269
2270 spin_lock_irqsave(&rp->lock, flags);
2271#ifdef USE_MMIO
2272 enable_mmio(rp->pioaddr, rp->quirks);
2273#endif
2274 rhine_power_init(dev);
2275 free_tbufs(dev);
2276 free_rbufs(dev);
2277 alloc_tbufs(dev);
2278 alloc_rbufs(dev);
2279 init_registers(dev);
2280 spin_unlock_irqrestore(&rp->lock, flags);
2281
2282 netif_device_attach(dev);
2283
2284 return 0;
2285}
2286#endif /* CONFIG_PM */
2287
2288static struct pci_driver rhine_driver = {
2289 .name = DRV_NAME,
2290 .id_table = rhine_pci_tbl,
2291 .probe = rhine_init_one,
2292 .remove = __devexit_p(rhine_remove_one),
2293#ifdef CONFIG_PM
2294 .suspend = rhine_suspend,
2295 .resume = rhine_resume,
2296#endif /* CONFIG_PM */
d18c3db5 2297 .shutdown = rhine_shutdown,
1da177e4
LT
2298};
2299
e84df485
RL
2300static struct dmi_system_id __initdata rhine_dmi_table[] = {
2301 {
2302 .ident = "EPIA-M",
2303 .matches = {
2304 DMI_MATCH(DMI_BIOS_VENDOR, "Award Software International, Inc."),
2305 DMI_MATCH(DMI_BIOS_VERSION, "6.00 PG"),
2306 },
2307 },
2308 {
2309 .ident = "KV7",
2310 .matches = {
2311 DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies, LTD"),
2312 DMI_MATCH(DMI_BIOS_VERSION, "6.00 PG"),
2313 },
2314 },
2315 { NULL }
2316};
1da177e4
LT
2317
2318static int __init rhine_init(void)
2319{
2320/* when a module, this is printed whether or not devices are found in probe */
2321#ifdef MODULE
df4511fe 2322 pr_info("%s\n", version);
1da177e4 2323#endif
e84df485
RL
2324 if (dmi_check_system(rhine_dmi_table)) {
2325 /* these BIOSes fail at PXE boot if chip is in D3 */
eb939922 2326 avoid_D3 = true;
df4511fe 2327 pr_warn("Broken BIOS detected, avoid_D3 enabled\n");
e84df485
RL
2328 }
2329 else if (avoid_D3)
df4511fe 2330 pr_info("avoid_D3 set\n");
e84df485 2331
29917620 2332 return pci_register_driver(&rhine_driver);
1da177e4
LT
2333}
2334
2335
2336static void __exit rhine_cleanup(void)
2337{
2338 pci_unregister_driver(&rhine_driver);
2339}
2340
2341
2342module_init(rhine_init);
2343module_exit(rhine_cleanup);