]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/net/forcedeth.c
Pull video into release branch
[mirror_ubuntu-artful-kernel.git] / drivers / net / forcedeth.c
CommitLineData
1da177e4
LT
1/*
2 * forcedeth: Ethernet driver for NVIDIA nForce media access controllers.
3 *
4 * Note: This driver is a cleanroom reimplementation based on reverse
5 * engineered documentation written by Carl-Daniel Hailfinger
87046e50 6 * and Andrew de Quincey.
1da177e4
LT
7 *
8 * NVIDIA, nForce and other NVIDIA marks are trademarks or registered
9 * trademarks of NVIDIA Corporation in the United States and other
10 * countries.
11 *
1836098f 12 * Copyright (C) 2003,4,5 Manfred Spraul
1da177e4
LT
13 * Copyright (C) 2004 Andrew de Quincey (wol support)
14 * Copyright (C) 2004 Carl-Daniel Hailfinger (invalid MAC handling, insane
15 * IRQ rate fixes, bigendian fixes, cleanups, verification)
87046e50 16 * Copyright (c) 2004,5,6 NVIDIA Corporation
1da177e4
LT
17 *
18 * This program is free software; you can redistribute it and/or modify
19 * it under the terms of the GNU General Public License as published by
20 * the Free Software Foundation; either version 2 of the License, or
21 * (at your option) any later version.
22 *
23 * This program is distributed in the hope that it will be useful,
24 * but WITHOUT ANY WARRANTY; without even the implied warranty of
25 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
26 * GNU General Public License for more details.
27 *
28 * You should have received a copy of the GNU General Public License
29 * along with this program; if not, write to the Free Software
30 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
31 *
32 * Changelog:
33 * 0.01: 05 Oct 2003: First release that compiles without warnings.
34 * 0.02: 05 Oct 2003: Fix bug for nv_drain_tx: do not try to free NULL skbs.
35 * Check all PCI BARs for the register window.
36 * udelay added to mii_rw.
37 * 0.03: 06 Oct 2003: Initialize dev->irq.
38 * 0.04: 07 Oct 2003: Initialize np->lock, reduce handled irqs, add printks.
39 * 0.05: 09 Oct 2003: printk removed again, irq status print tx_timeout.
40 * 0.06: 10 Oct 2003: MAC Address read updated, pff flag generation updated,
41 * irq mask updated
42 * 0.07: 14 Oct 2003: Further irq mask updates.
43 * 0.08: 20 Oct 2003: rx_desc.Length initialization added, nv_alloc_rx refill
44 * added into irq handler, NULL check for drain_ring.
45 * 0.09: 20 Oct 2003: Basic link speed irq implementation. Only handle the
46 * requested interrupt sources.
47 * 0.10: 20 Oct 2003: First cleanup for release.
48 * 0.11: 21 Oct 2003: hexdump for tx added, rx buffer sizes increased.
49 * MAC Address init fix, set_multicast cleanup.
50 * 0.12: 23 Oct 2003: Cleanups for release.
51 * 0.13: 25 Oct 2003: Limit for concurrent tx packets increased to 10.
52 * Set link speed correctly. start rx before starting
53 * tx (nv_start_rx sets the link speed).
54 * 0.14: 25 Oct 2003: Nic dependant irq mask.
55 * 0.15: 08 Nov 2003: fix smp deadlock with set_multicast_list during
56 * open.
57 * 0.16: 15 Nov 2003: include file cleanup for ppc64, rx buffer size
58 * increased to 1628 bytes.
59 * 0.17: 16 Nov 2003: undo rx buffer size increase. Substract 1 from
60 * the tx length.
61 * 0.18: 17 Nov 2003: fix oops due to late initialization of dev_stats
62 * 0.19: 29 Nov 2003: Handle RxNoBuf, detect & handle invalid mac
63 * addresses, really stop rx if already running
64 * in nv_start_rx, clean up a bit.
65 * 0.20: 07 Dec 2003: alloc fixes
66 * 0.21: 12 Jan 2004: additional alloc fix, nic polling fix.
67 * 0.22: 19 Jan 2004: reprogram timer to a sane rate, avoid lockup
68 * on close.
69 * 0.23: 26 Jan 2004: various small cleanups
70 * 0.24: 27 Feb 2004: make driver even less anonymous in backtraces
71 * 0.25: 09 Mar 2004: wol support
72 * 0.26: 03 Jun 2004: netdriver specific annotation, sparse-related fixes
73 * 0.27: 19 Jun 2004: Gigabit support, new descriptor rings,
74 * added CK804/MCP04 device IDs, code fixes
75 * for registers, link status and other minor fixes.
76 * 0.28: 21 Jun 2004: Big cleanup, making driver mostly endian safe
77 * 0.29: 31 Aug 2004: Add backup timer for link change notification.
78 * 0.30: 25 Sep 2004: rx checksum support for nf 250 Gb. Add rx reset
79 * into nv_close, otherwise reenabling for wol can
80 * cause DMA to kfree'd memory.
81 * 0.31: 14 Nov 2004: ethtool support for getting/setting link
4ea7f299 82 * capabilities.
22c6d143 83 * 0.32: 16 Apr 2005: RX_ERROR4 handling added.
8f767fc8
MS
84 * 0.33: 16 May 2005: Support for MCP51 added.
85 * 0.34: 18 Jun 2005: Add DEV_NEED_LINKTIMER to all nForce nics.
f49d16ef 86 * 0.35: 26 Jun 2005: Support for MCP55 added.
dc8216c1
MS
87 * 0.36: 28 Jun 2005: Add jumbo frame support.
88 * 0.37: 10 Jul 2005: Additional ethtool support, cleanup of pci id list
c2dba06d
MS
89 * 0.38: 16 Jul 2005: tx irq rewrite: Use global flags instead of
90 * per-packet flags.
4ea7f299
AA
91 * 0.39: 18 Jul 2005: Add 64bit descriptor support.
92 * 0.40: 19 Jul 2005: Add support for mac address change.
93 * 0.41: 30 Jul 2005: Write back original MAC in nv_close instead
b3df9f81 94 * of nv_remove
4ea7f299 95 * 0.42: 06 Aug 2005: Fix lack of link speed initialization
1b1b3c9b 96 * in the second (and later) nv_open call
4ea7f299
AA
97 * 0.43: 10 Aug 2005: Add support for tx checksum.
98 * 0.44: 20 Aug 2005: Add support for scatter gather and segmentation.
99 * 0.45: 18 Sep 2005: Remove nv_stop/start_rx from every link check
a971c324 100 * 0.46: 20 Oct 2005: Add irq optimization modes.
7a33e45a 101 * 0.47: 26 Oct 2005: Add phyaddr 0 in phy scan.
1836098f 102 * 0.48: 24 Dec 2005: Disable TSO, bugfix for pci_map_single
fa45459e 103 * 0.49: 10 Dec 2005: Fix tso for large buffers.
ee407b02 104 * 0.50: 20 Jan 2006: Add 8021pq tagging support.
0832b25a 105 * 0.51: 20 Jan 2006: Add 64bit consistent memory allocation for rings.
d33a73c8 106 * 0.52: 20 Jan 2006: Add MSI/MSIX support.
86a0f043 107 * 0.53: 19 Mar 2006: Fix init from low power mode and add hw reset.
84b3932b 108 * 0.54: 21 Mar 2006: Fix spin locks for multi irqs and cleanup.
eb91f61b 109 * 0.55: 22 Mar 2006: Add flow control (pause frame).
ebe611a4 110 * 0.56: 22 Mar 2006: Additional ethtool config and moduleparam support.
5070d340 111 * 0.57: 14 May 2006: Mac address set in probe/remove and order corrections.
7e680c22 112 * 0.58: 30 Oct 2006: Added support for sideband management unit.
c5cf9101 113 * 0.59: 30 Oct 2006: Added support for recoverable error.
21828163 114 * 0.60: 20 Jan 2007: Code optimizations for rings, rx & tx data paths, and stats.
1da177e4
LT
115 *
116 * Known bugs:
117 * We suspect that on some hardware no TX done interrupts are generated.
118 * This means recovery from netif_stop_queue only happens if the hw timer
119 * interrupt fires (100 times/second, configurable with NVREG_POLL_DEFAULT)
120 * and the timer is active in the IRQMask, or if a rx packet arrives by chance.
121 * If your hardware reliably generates tx done interrupts, then you can remove
122 * DEV_NEED_TIMERIRQ from the driver_data flags.
123 * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few
124 * superfluous timer interrupts from the nic.
125 */
e27cdba5
SH
126#ifdef CONFIG_FORCEDETH_NAPI
127#define DRIVERNAPI "-NAPI"
128#else
129#define DRIVERNAPI
130#endif
21828163 131#define FORCEDETH_VERSION "0.60"
1da177e4
LT
132#define DRV_NAME "forcedeth"
133
134#include <linux/module.h>
135#include <linux/types.h>
136#include <linux/pci.h>
137#include <linux/interrupt.h>
138#include <linux/netdevice.h>
139#include <linux/etherdevice.h>
140#include <linux/delay.h>
141#include <linux/spinlock.h>
142#include <linux/ethtool.h>
143#include <linux/timer.h>
144#include <linux/skbuff.h>
145#include <linux/mii.h>
146#include <linux/random.h>
147#include <linux/init.h>
22c6d143 148#include <linux/if_vlan.h>
910638ae 149#include <linux/dma-mapping.h>
1da177e4
LT
150
151#include <asm/irq.h>
152#include <asm/io.h>
153#include <asm/uaccess.h>
154#include <asm/system.h>
155
156#if 0
157#define dprintk printk
158#else
159#define dprintk(x...) do { } while (0)
160#endif
161
162
163/*
164 * Hardware access:
165 */
166
c2dba06d
MS
167#define DEV_NEED_TIMERIRQ 0x0001 /* set the timer irq flag in the irq mask */
168#define DEV_NEED_LINKTIMER 0x0002 /* poll link settings. Relies on the timer irq */
169#define DEV_HAS_LARGEDESC 0x0004 /* device supports jumbo frames and needs packet format 2 */
ee73362c 170#define DEV_HAS_HIGH_DMA 0x0008 /* device supports 64bit dma */
8a4ae7f2 171#define DEV_HAS_CHECKSUM 0x0010 /* device supports tx and rx checksum offloads */
ee407b02 172#define DEV_HAS_VLAN 0x0020 /* device supports vlan tagging and striping */
d33a73c8
AA
173#define DEV_HAS_MSI 0x0040 /* device supports MSI */
174#define DEV_HAS_MSI_X 0x0080 /* device supports MSI-X */
86a0f043 175#define DEV_HAS_POWER_CNTRL 0x0100 /* device supports power savings */
eb91f61b 176#define DEV_HAS_PAUSEFRAME_TX 0x0200 /* device supports tx pause frames */
57fff698
AA
177#define DEV_HAS_STATISTICS_V1 0x0400 /* device supports hw statistics version 1 */
178#define DEV_HAS_STATISTICS_V2 0x0800 /* device supports hw statistics version 2 */
179#define DEV_HAS_TEST_EXTENDED 0x1000 /* device supports extended diagnostic test */
180#define DEV_HAS_MGMT_UNIT 0x2000 /* device supports management unit */
1da177e4
LT
181
182enum {
183 NvRegIrqStatus = 0x000,
184#define NVREG_IRQSTAT_MIIEVENT 0x040
c5cf9101 185#define NVREG_IRQSTAT_MASK 0x81ff
1da177e4
LT
186 NvRegIrqMask = 0x004,
187#define NVREG_IRQ_RX_ERROR 0x0001
188#define NVREG_IRQ_RX 0x0002
189#define NVREG_IRQ_RX_NOBUF 0x0004
190#define NVREG_IRQ_TX_ERR 0x0008
c2dba06d 191#define NVREG_IRQ_TX_OK 0x0010
1da177e4
LT
192#define NVREG_IRQ_TIMER 0x0020
193#define NVREG_IRQ_LINK 0x0040
d33a73c8
AA
194#define NVREG_IRQ_RX_FORCED 0x0080
195#define NVREG_IRQ_TX_FORCED 0x0100
c5cf9101 196#define NVREG_IRQ_RECOVER_ERROR 0x8000
a971c324 197#define NVREG_IRQMASK_THROUGHPUT 0x00df
096a458c 198#define NVREG_IRQMASK_CPU 0x0060
d33a73c8
AA
199#define NVREG_IRQ_TX_ALL (NVREG_IRQ_TX_ERR|NVREG_IRQ_TX_OK|NVREG_IRQ_TX_FORCED)
200#define NVREG_IRQ_RX_ALL (NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_RX_FORCED)
c5cf9101 201#define NVREG_IRQ_OTHER (NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_RECOVER_ERROR)
c2dba06d
MS
202
203#define NVREG_IRQ_UNKNOWN (~(NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_TX_ERR| \
d33a73c8 204 NVREG_IRQ_TX_OK|NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_RX_FORCED| \
c5cf9101 205 NVREG_IRQ_TX_FORCED|NVREG_IRQ_RECOVER_ERROR))
1da177e4
LT
206
207 NvRegUnknownSetupReg6 = 0x008,
208#define NVREG_UNKSETUP6_VAL 3
209
210/*
211 * NVREG_POLL_DEFAULT is the interval length of the timer source on the nic
212 * NVREG_POLL_DEFAULT=97 would result in an interval length of 1 ms
213 */
214 NvRegPollingInterval = 0x00c,
4e16ed1b 215#define NVREG_POLL_DEFAULT_THROUGHPUT 970 /* backup tx cleanup if loop max reached */
a971c324 216#define NVREG_POLL_DEFAULT_CPU 13
d33a73c8
AA
217 NvRegMSIMap0 = 0x020,
218 NvRegMSIMap1 = 0x024,
219 NvRegMSIIrqMask = 0x030,
220#define NVREG_MSI_VECTOR_0_ENABLED 0x01
1da177e4 221 NvRegMisc1 = 0x080,
eb91f61b 222#define NVREG_MISC1_PAUSE_TX 0x01
1da177e4
LT
223#define NVREG_MISC1_HD 0x02
224#define NVREG_MISC1_FORCE 0x3b0f3c
225
86a0f043
AA
226 NvRegMacReset = 0x3c,
227#define NVREG_MAC_RESET_ASSERT 0x0F3
1da177e4
LT
228 NvRegTransmitterControl = 0x084,
229#define NVREG_XMITCTL_START 0x01
7e680c22
AA
230#define NVREG_XMITCTL_MGMT_ST 0x40000000
231#define NVREG_XMITCTL_SYNC_MASK 0x000f0000
232#define NVREG_XMITCTL_SYNC_NOT_READY 0x0
233#define NVREG_XMITCTL_SYNC_PHY_INIT 0x00040000
234#define NVREG_XMITCTL_MGMT_SEMA_MASK 0x00000f00
235#define NVREG_XMITCTL_MGMT_SEMA_FREE 0x0
236#define NVREG_XMITCTL_HOST_SEMA_MASK 0x0000f000
237#define NVREG_XMITCTL_HOST_SEMA_ACQ 0x0000f000
238#define NVREG_XMITCTL_HOST_LOADED 0x00004000
f35723ec 239#define NVREG_XMITCTL_TX_PATH_EN 0x01000000
1da177e4
LT
240 NvRegTransmitterStatus = 0x088,
241#define NVREG_XMITSTAT_BUSY 0x01
242
243 NvRegPacketFilterFlags = 0x8c,
eb91f61b
AA
244#define NVREG_PFF_PAUSE_RX 0x08
245#define NVREG_PFF_ALWAYS 0x7F0000
1da177e4
LT
246#define NVREG_PFF_PROMISC 0x80
247#define NVREG_PFF_MYADDR 0x20
9589c77a 248#define NVREG_PFF_LOOPBACK 0x10
1da177e4
LT
249
250 NvRegOffloadConfig = 0x90,
251#define NVREG_OFFLOAD_HOMEPHY 0x601
252#define NVREG_OFFLOAD_NORMAL RX_NIC_BUFSIZE
253 NvRegReceiverControl = 0x094,
254#define NVREG_RCVCTL_START 0x01
f35723ec 255#define NVREG_RCVCTL_RX_PATH_EN 0x01000000
1da177e4
LT
256 NvRegReceiverStatus = 0x98,
257#define NVREG_RCVSTAT_BUSY 0x01
258
259 NvRegRandomSeed = 0x9c,
260#define NVREG_RNDSEED_MASK 0x00ff
261#define NVREG_RNDSEED_FORCE 0x7f00
262#define NVREG_RNDSEED_FORCE2 0x2d00
263#define NVREG_RNDSEED_FORCE3 0x7400
264
9744e218
AA
265 NvRegTxDeferral = 0xA0,
266#define NVREG_TX_DEFERRAL_DEFAULT 0x15050f
267#define NVREG_TX_DEFERRAL_RGMII_10_100 0x16070f
268#define NVREG_TX_DEFERRAL_RGMII_1000 0x14050f
269 NvRegRxDeferral = 0xA4,
270#define NVREG_RX_DEFERRAL_DEFAULT 0x16
1da177e4
LT
271 NvRegMacAddrA = 0xA8,
272 NvRegMacAddrB = 0xAC,
273 NvRegMulticastAddrA = 0xB0,
274#define NVREG_MCASTADDRA_FORCE 0x01
275 NvRegMulticastAddrB = 0xB4,
276 NvRegMulticastMaskA = 0xB8,
277 NvRegMulticastMaskB = 0xBC,
278
279 NvRegPhyInterface = 0xC0,
280#define PHY_RGMII 0x10000000
281
282 NvRegTxRingPhysAddr = 0x100,
283 NvRegRxRingPhysAddr = 0x104,
284 NvRegRingSizes = 0x108,
285#define NVREG_RINGSZ_TXSHIFT 0
286#define NVREG_RINGSZ_RXSHIFT 16
5070d340
AA
287 NvRegTransmitPoll = 0x10c,
288#define NVREG_TRANSMITPOLL_MAC_ADDR_REV 0x00008000
1da177e4
LT
289 NvRegLinkSpeed = 0x110,
290#define NVREG_LINKSPEED_FORCE 0x10000
291#define NVREG_LINKSPEED_10 1000
292#define NVREG_LINKSPEED_100 100
293#define NVREG_LINKSPEED_1000 50
294#define NVREG_LINKSPEED_MASK (0xFFF)
295 NvRegUnknownSetupReg5 = 0x130,
296#define NVREG_UNKSETUP5_BIT31 (1<<31)
95d161cb
AA
297 NvRegTxWatermark = 0x13c,
298#define NVREG_TX_WM_DESC1_DEFAULT 0x0200010
299#define NVREG_TX_WM_DESC2_3_DEFAULT 0x1e08000
300#define NVREG_TX_WM_DESC2_3_1000 0xfe08000
1da177e4
LT
301 NvRegTxRxControl = 0x144,
302#define NVREG_TXRXCTL_KICK 0x0001
303#define NVREG_TXRXCTL_BIT1 0x0002
304#define NVREG_TXRXCTL_BIT2 0x0004
305#define NVREG_TXRXCTL_IDLE 0x0008
306#define NVREG_TXRXCTL_RESET 0x0010
307#define NVREG_TXRXCTL_RXCHECK 0x0400
8a4ae7f2 308#define NVREG_TXRXCTL_DESC_1 0
d2f78412
AA
309#define NVREG_TXRXCTL_DESC_2 0x002100
310#define NVREG_TXRXCTL_DESC_3 0xc02200
ee407b02
AA
311#define NVREG_TXRXCTL_VLANSTRIP 0x00040
312#define NVREG_TXRXCTL_VLANINS 0x00080
0832b25a
AA
313 NvRegTxRingPhysAddrHigh = 0x148,
314 NvRegRxRingPhysAddrHigh = 0x14C,
eb91f61b
AA
315 NvRegTxPauseFrame = 0x170,
316#define NVREG_TX_PAUSEFRAME_DISABLE 0x1ff0080
317#define NVREG_TX_PAUSEFRAME_ENABLE 0x0c00030
1da177e4
LT
318 NvRegMIIStatus = 0x180,
319#define NVREG_MIISTAT_ERROR 0x0001
320#define NVREG_MIISTAT_LINKCHANGE 0x0008
321#define NVREG_MIISTAT_MASK 0x000f
322#define NVREG_MIISTAT_MASK2 0x000f
7e680c22
AA
323 NvRegMIIMask = 0x184,
324#define NVREG_MII_LINKCHANGE 0x0008
1da177e4
LT
325
326 NvRegAdapterControl = 0x188,
327#define NVREG_ADAPTCTL_START 0x02
328#define NVREG_ADAPTCTL_LINKUP 0x04
329#define NVREG_ADAPTCTL_PHYVALID 0x40000
330#define NVREG_ADAPTCTL_RUNNING 0x100000
331#define NVREG_ADAPTCTL_PHYSHIFT 24
332 NvRegMIISpeed = 0x18c,
333#define NVREG_MIISPEED_BIT8 (1<<8)
334#define NVREG_MIIDELAY 5
335 NvRegMIIControl = 0x190,
336#define NVREG_MIICTL_INUSE 0x08000
337#define NVREG_MIICTL_WRITE 0x00400
338#define NVREG_MIICTL_ADDRSHIFT 5
339 NvRegMIIData = 0x194,
340 NvRegWakeUpFlags = 0x200,
341#define NVREG_WAKEUPFLAGS_VAL 0x7770
342#define NVREG_WAKEUPFLAGS_BUSYSHIFT 24
343#define NVREG_WAKEUPFLAGS_ENABLESHIFT 16
344#define NVREG_WAKEUPFLAGS_D3SHIFT 12
345#define NVREG_WAKEUPFLAGS_D2SHIFT 8
346#define NVREG_WAKEUPFLAGS_D1SHIFT 4
347#define NVREG_WAKEUPFLAGS_D0SHIFT 0
348#define NVREG_WAKEUPFLAGS_ACCEPT_MAGPAT 0x01
349#define NVREG_WAKEUPFLAGS_ACCEPT_WAKEUPPAT 0x02
350#define NVREG_WAKEUPFLAGS_ACCEPT_LINKCHANGE 0x04
351#define NVREG_WAKEUPFLAGS_ENABLE 0x1111
352
353 NvRegPatternCRC = 0x204,
354 NvRegPatternMask = 0x208,
355 NvRegPowerCap = 0x268,
356#define NVREG_POWERCAP_D3SUPP (1<<30)
357#define NVREG_POWERCAP_D2SUPP (1<<26)
358#define NVREG_POWERCAP_D1SUPP (1<<25)
359 NvRegPowerState = 0x26c,
360#define NVREG_POWERSTATE_POWEREDUP 0x8000
361#define NVREG_POWERSTATE_VALID 0x0100
362#define NVREG_POWERSTATE_MASK 0x0003
363#define NVREG_POWERSTATE_D0 0x0000
364#define NVREG_POWERSTATE_D1 0x0001
365#define NVREG_POWERSTATE_D2 0x0002
366#define NVREG_POWERSTATE_D3 0x0003
52da3578
AA
367 NvRegTxCnt = 0x280,
368 NvRegTxZeroReXmt = 0x284,
369 NvRegTxOneReXmt = 0x288,
370 NvRegTxManyReXmt = 0x28c,
371 NvRegTxLateCol = 0x290,
372 NvRegTxUnderflow = 0x294,
373 NvRegTxLossCarrier = 0x298,
374 NvRegTxExcessDef = 0x29c,
375 NvRegTxRetryErr = 0x2a0,
376 NvRegRxFrameErr = 0x2a4,
377 NvRegRxExtraByte = 0x2a8,
378 NvRegRxLateCol = 0x2ac,
379 NvRegRxRunt = 0x2b0,
380 NvRegRxFrameTooLong = 0x2b4,
381 NvRegRxOverflow = 0x2b8,
382 NvRegRxFCSErr = 0x2bc,
383 NvRegRxFrameAlignErr = 0x2c0,
384 NvRegRxLenErr = 0x2c4,
385 NvRegRxUnicast = 0x2c8,
386 NvRegRxMulticast = 0x2cc,
387 NvRegRxBroadcast = 0x2d0,
388 NvRegTxDef = 0x2d4,
389 NvRegTxFrame = 0x2d8,
390 NvRegRxCnt = 0x2dc,
391 NvRegTxPause = 0x2e0,
392 NvRegRxPause = 0x2e4,
393 NvRegRxDropFrame = 0x2e8,
ee407b02
AA
394 NvRegVlanControl = 0x300,
395#define NVREG_VLANCONTROL_ENABLE 0x2000
d33a73c8
AA
396 NvRegMSIXMap0 = 0x3e0,
397 NvRegMSIXMap1 = 0x3e4,
398 NvRegMSIXIrqStatus = 0x3f0,
86a0f043
AA
399
400 NvRegPowerState2 = 0x600,
401#define NVREG_POWERSTATE2_POWERUP_MASK 0x0F11
402#define NVREG_POWERSTATE2_POWERUP_REV_A3 0x0001
1da177e4
LT
403};
404
405/* Big endian: should work, but is untested */
406struct ring_desc {
a8bed49e
SH
407 __le32 buf;
408 __le32 flaglen;
1da177e4
LT
409};
410
ee73362c 411struct ring_desc_ex {
a8bed49e
SH
412 __le32 bufhigh;
413 __le32 buflow;
414 __le32 txvlan;
415 __le32 flaglen;
ee73362c
MS
416};
417
f82a9352 418union ring_type {
ee73362c
MS
419 struct ring_desc* orig;
420 struct ring_desc_ex* ex;
f82a9352 421};
ee73362c 422
1da177e4
LT
423#define FLAG_MASK_V1 0xffff0000
424#define FLAG_MASK_V2 0xffffc000
425#define LEN_MASK_V1 (0xffffffff ^ FLAG_MASK_V1)
426#define LEN_MASK_V2 (0xffffffff ^ FLAG_MASK_V2)
427
428#define NV_TX_LASTPACKET (1<<16)
429#define NV_TX_RETRYERROR (1<<19)
c2dba06d 430#define NV_TX_FORCED_INTERRUPT (1<<24)
1da177e4
LT
431#define NV_TX_DEFERRED (1<<26)
432#define NV_TX_CARRIERLOST (1<<27)
433#define NV_TX_LATECOLLISION (1<<28)
434#define NV_TX_UNDERFLOW (1<<29)
435#define NV_TX_ERROR (1<<30)
436#define NV_TX_VALID (1<<31)
437
438#define NV_TX2_LASTPACKET (1<<29)
439#define NV_TX2_RETRYERROR (1<<18)
c2dba06d 440#define NV_TX2_FORCED_INTERRUPT (1<<30)
1da177e4
LT
441#define NV_TX2_DEFERRED (1<<25)
442#define NV_TX2_CARRIERLOST (1<<26)
443#define NV_TX2_LATECOLLISION (1<<27)
444#define NV_TX2_UNDERFLOW (1<<28)
445/* error and valid are the same for both */
446#define NV_TX2_ERROR (1<<30)
447#define NV_TX2_VALID (1<<31)
ac9c1897
AA
448#define NV_TX2_TSO (1<<28)
449#define NV_TX2_TSO_SHIFT 14
fa45459e
AA
450#define NV_TX2_TSO_MAX_SHIFT 14
451#define NV_TX2_TSO_MAX_SIZE (1<<NV_TX2_TSO_MAX_SHIFT)
8a4ae7f2
MS
452#define NV_TX2_CHECKSUM_L3 (1<<27)
453#define NV_TX2_CHECKSUM_L4 (1<<26)
1da177e4 454
ee407b02
AA
455#define NV_TX3_VLAN_TAG_PRESENT (1<<18)
456
1da177e4
LT
457#define NV_RX_DESCRIPTORVALID (1<<16)
458#define NV_RX_MISSEDFRAME (1<<17)
459#define NV_RX_SUBSTRACT1 (1<<18)
460#define NV_RX_ERROR1 (1<<23)
461#define NV_RX_ERROR2 (1<<24)
462#define NV_RX_ERROR3 (1<<25)
463#define NV_RX_ERROR4 (1<<26)
464#define NV_RX_CRCERR (1<<27)
465#define NV_RX_OVERFLOW (1<<28)
466#define NV_RX_FRAMINGERR (1<<29)
467#define NV_RX_ERROR (1<<30)
468#define NV_RX_AVAIL (1<<31)
469
470#define NV_RX2_CHECKSUMMASK (0x1C000000)
471#define NV_RX2_CHECKSUMOK1 (0x10000000)
472#define NV_RX2_CHECKSUMOK2 (0x14000000)
473#define NV_RX2_CHECKSUMOK3 (0x18000000)
474#define NV_RX2_DESCRIPTORVALID (1<<29)
475#define NV_RX2_SUBSTRACT1 (1<<25)
476#define NV_RX2_ERROR1 (1<<18)
477#define NV_RX2_ERROR2 (1<<19)
478#define NV_RX2_ERROR3 (1<<20)
479#define NV_RX2_ERROR4 (1<<21)
480#define NV_RX2_CRCERR (1<<22)
481#define NV_RX2_OVERFLOW (1<<23)
482#define NV_RX2_FRAMINGERR (1<<24)
483/* error and avail are the same for both */
484#define NV_RX2_ERROR (1<<30)
485#define NV_RX2_AVAIL (1<<31)
486
ee407b02
AA
487#define NV_RX3_VLAN_TAG_PRESENT (1<<16)
488#define NV_RX3_VLAN_TAG_MASK (0x0000FFFF)
489
1da177e4 490/* Miscelaneous hardware related defines: */
86a0f043 491#define NV_PCI_REGSZ_VER1 0x270
57fff698
AA
492#define NV_PCI_REGSZ_VER2 0x2d4
493#define NV_PCI_REGSZ_VER3 0x604
1da177e4
LT
494
495/* various timeout delays: all in usec */
496#define NV_TXRX_RESET_DELAY 4
497#define NV_TXSTOP_DELAY1 10
498#define NV_TXSTOP_DELAY1MAX 500000
499#define NV_TXSTOP_DELAY2 100
500#define NV_RXSTOP_DELAY1 10
501#define NV_RXSTOP_DELAY1MAX 500000
502#define NV_RXSTOP_DELAY2 100
503#define NV_SETUP5_DELAY 5
504#define NV_SETUP5_DELAYMAX 50000
505#define NV_POWERUP_DELAY 5
506#define NV_POWERUP_DELAYMAX 5000
507#define NV_MIIBUSY_DELAY 50
508#define NV_MIIPHY_DELAY 10
509#define NV_MIIPHY_DELAYMAX 10000
86a0f043 510#define NV_MAC_RESET_DELAY 64
1da177e4
LT
511
512#define NV_WAKEUPPATTERNS 5
513#define NV_WAKEUPMASKENTRIES 4
514
515/* General driver defaults */
516#define NV_WATCHDOG_TIMEO (5*HZ)
517
eafa59f6
AA
518#define RX_RING_DEFAULT 128
519#define TX_RING_DEFAULT 256
520#define RX_RING_MIN 128
521#define TX_RING_MIN 64
522#define RING_MAX_DESC_VER_1 1024
523#define RING_MAX_DESC_VER_2_3 16384
1da177e4
LT
524
525/* rx/tx mac addr + type + vlan + align + slack*/
d81c0983
MS
526#define NV_RX_HEADERS (64)
527/* even more slack. */
528#define NV_RX_ALLOC_PAD (64)
529
530/* maximum mtu size */
531#define NV_PKTLIMIT_1 ETH_DATA_LEN /* hard limit not known */
532#define NV_PKTLIMIT_2 9100 /* Actual limit according to NVidia: 9202 */
1da177e4
LT
533
534#define OOM_REFILL (1+HZ/20)
535#define POLL_WAIT (1+HZ/100)
536#define LINK_TIMEOUT (3*HZ)
52da3578 537#define STATS_INTERVAL (10*HZ)
1da177e4 538
f3b197ac 539/*
1da177e4 540 * desc_ver values:
8a4ae7f2
MS
541 * The nic supports three different descriptor types:
542 * - DESC_VER_1: Original
543 * - DESC_VER_2: support for jumbo frames.
544 * - DESC_VER_3: 64-bit format.
1da177e4 545 */
8a4ae7f2
MS
546#define DESC_VER_1 1
547#define DESC_VER_2 2
548#define DESC_VER_3 3
1da177e4
LT
549
550/* PHY defines */
551#define PHY_OUI_MARVELL 0x5043
552#define PHY_OUI_CICADA 0x03f1
d215d8a2 553#define PHY_OUI_VITESSE 0x01c1
c5e3ae88 554#define PHY_OUI_REALTEK 0x01c1
1da177e4
LT
555#define PHYID1_OUI_MASK 0x03ff
556#define PHYID1_OUI_SHFT 6
557#define PHYID2_OUI_MASK 0xfc00
558#define PHYID2_OUI_SHFT 10
edf7e5ec
AA
559#define PHYID2_MODEL_MASK 0x03f0
560#define PHY_MODEL_MARVELL_E3016 0x220
561#define PHY_MARVELL_E3016_INITMASK 0x0300
14a67f3c
AA
562#define PHY_CICADA_INIT1 0x0f000
563#define PHY_CICADA_INIT2 0x0e00
564#define PHY_CICADA_INIT3 0x01000
565#define PHY_CICADA_INIT4 0x0200
566#define PHY_CICADA_INIT5 0x0004
567#define PHY_CICADA_INIT6 0x02000
d215d8a2
AA
568#define PHY_VITESSE_INIT_REG1 0x1f
569#define PHY_VITESSE_INIT_REG2 0x10
570#define PHY_VITESSE_INIT_REG3 0x11
571#define PHY_VITESSE_INIT_REG4 0x12
572#define PHY_VITESSE_INIT_MSK1 0xc
573#define PHY_VITESSE_INIT_MSK2 0x0180
574#define PHY_VITESSE_INIT1 0x52b5
575#define PHY_VITESSE_INIT2 0xaf8a
576#define PHY_VITESSE_INIT3 0x8
577#define PHY_VITESSE_INIT4 0x8f8a
578#define PHY_VITESSE_INIT5 0xaf86
579#define PHY_VITESSE_INIT6 0x8f86
580#define PHY_VITESSE_INIT7 0xaf82
581#define PHY_VITESSE_INIT8 0x0100
582#define PHY_VITESSE_INIT9 0x8f82
583#define PHY_VITESSE_INIT10 0x0
c5e3ae88
AA
584#define PHY_REALTEK_INIT_REG1 0x1f
585#define PHY_REALTEK_INIT_REG2 0x19
586#define PHY_REALTEK_INIT_REG3 0x13
587#define PHY_REALTEK_INIT1 0x0000
588#define PHY_REALTEK_INIT2 0x8e00
589#define PHY_REALTEK_INIT3 0x0001
590#define PHY_REALTEK_INIT4 0xad17
d215d8a2 591
1da177e4
LT
592#define PHY_GIGABIT 0x0100
593
594#define PHY_TIMEOUT 0x1
595#define PHY_ERROR 0x2
596
597#define PHY_100 0x1
598#define PHY_1000 0x2
599#define PHY_HALF 0x100
600
eb91f61b
AA
601#define NV_PAUSEFRAME_RX_CAPABLE 0x0001
602#define NV_PAUSEFRAME_TX_CAPABLE 0x0002
603#define NV_PAUSEFRAME_RX_ENABLE 0x0004
604#define NV_PAUSEFRAME_TX_ENABLE 0x0008
b6d0773f
AA
605#define NV_PAUSEFRAME_RX_REQ 0x0010
606#define NV_PAUSEFRAME_TX_REQ 0x0020
607#define NV_PAUSEFRAME_AUTONEG 0x0040
1da177e4 608
d33a73c8
AA
609/* MSI/MSI-X defines */
610#define NV_MSI_X_MAX_VECTORS 8
611#define NV_MSI_X_VECTORS_MASK 0x000f
612#define NV_MSI_CAPABLE 0x0010
613#define NV_MSI_X_CAPABLE 0x0020
614#define NV_MSI_ENABLED 0x0040
615#define NV_MSI_X_ENABLED 0x0080
616
617#define NV_MSI_X_VECTOR_ALL 0x0
618#define NV_MSI_X_VECTOR_RX 0x0
619#define NV_MSI_X_VECTOR_TX 0x1
620#define NV_MSI_X_VECTOR_OTHER 0x2
1da177e4 621
52da3578
AA
622/* statistics */
623struct nv_ethtool_str {
624 char name[ETH_GSTRING_LEN];
625};
626
627static const struct nv_ethtool_str nv_estats_str[] = {
628 { "tx_bytes" },
629 { "tx_zero_rexmt" },
630 { "tx_one_rexmt" },
631 { "tx_many_rexmt" },
632 { "tx_late_collision" },
633 { "tx_fifo_errors" },
634 { "tx_carrier_errors" },
635 { "tx_excess_deferral" },
636 { "tx_retry_error" },
52da3578
AA
637 { "rx_frame_error" },
638 { "rx_extra_byte" },
639 { "rx_late_collision" },
640 { "rx_runt" },
641 { "rx_frame_too_long" },
642 { "rx_over_errors" },
643 { "rx_crc_errors" },
644 { "rx_frame_align_error" },
645 { "rx_length_error" },
646 { "rx_unicast" },
647 { "rx_multicast" },
648 { "rx_broadcast" },
57fff698
AA
649 { "rx_packets" },
650 { "rx_errors_total" },
651 { "tx_errors_total" },
652
653 /* version 2 stats */
654 { "tx_deferral" },
655 { "tx_packets" },
52da3578 656 { "rx_bytes" },
57fff698 657 { "tx_pause" },
52da3578 658 { "rx_pause" },
57fff698 659 { "rx_drop_frame" }
52da3578
AA
660};
661
662struct nv_ethtool_stats {
663 u64 tx_bytes;
664 u64 tx_zero_rexmt;
665 u64 tx_one_rexmt;
666 u64 tx_many_rexmt;
667 u64 tx_late_collision;
668 u64 tx_fifo_errors;
669 u64 tx_carrier_errors;
670 u64 tx_excess_deferral;
671 u64 tx_retry_error;
52da3578
AA
672 u64 rx_frame_error;
673 u64 rx_extra_byte;
674 u64 rx_late_collision;
675 u64 rx_runt;
676 u64 rx_frame_too_long;
677 u64 rx_over_errors;
678 u64 rx_crc_errors;
679 u64 rx_frame_align_error;
680 u64 rx_length_error;
681 u64 rx_unicast;
682 u64 rx_multicast;
683 u64 rx_broadcast;
57fff698
AA
684 u64 rx_packets;
685 u64 rx_errors_total;
686 u64 tx_errors_total;
687
688 /* version 2 stats */
689 u64 tx_deferral;
690 u64 tx_packets;
52da3578 691 u64 rx_bytes;
57fff698 692 u64 tx_pause;
52da3578
AA
693 u64 rx_pause;
694 u64 rx_drop_frame;
52da3578
AA
695};
696
57fff698
AA
697#define NV_DEV_STATISTICS_V2_COUNT (sizeof(struct nv_ethtool_stats)/sizeof(u64))
698#define NV_DEV_STATISTICS_V1_COUNT (NV_DEV_STATISTICS_V2_COUNT - 6)
699
9589c77a
AA
700/* diagnostics */
701#define NV_TEST_COUNT_BASE 3
702#define NV_TEST_COUNT_EXTENDED 4
703
704static const struct nv_ethtool_str nv_etests_str[] = {
705 { "link (online/offline)" },
706 { "register (offline) " },
707 { "interrupt (offline) " },
708 { "loopback (offline) " }
709};
710
711struct register_test {
a8bed49e
SH
712 __le32 reg;
713 __le32 mask;
9589c77a
AA
714};
715
716static const struct register_test nv_registers_test[] = {
717 { NvRegUnknownSetupReg6, 0x01 },
718 { NvRegMisc1, 0x03c },
719 { NvRegOffloadConfig, 0x03ff },
720 { NvRegMulticastAddrA, 0xffffffff },
95d161cb 721 { NvRegTxWatermark, 0x0ff },
9589c77a
AA
722 { NvRegWakeUpFlags, 0x07777 },
723 { 0,0 }
724};
725
761fcd9e
AA
726struct nv_skb_map {
727 struct sk_buff *skb;
728 dma_addr_t dma;
729 unsigned int dma_len;
730};
731
1da177e4
LT
732/*
733 * SMP locking:
734 * All hardware access under dev->priv->lock, except the performance
735 * critical parts:
736 * - rx is (pseudo-) lockless: it relies on the single-threading provided
737 * by the arch code for interrupts.
932ff279 738 * - tx setup is lockless: it relies on netif_tx_lock. Actual submission
1da177e4 739 * needs dev->priv->lock :-(
932ff279 740 * - set_multicast_list: preparation lockless, relies on netif_tx_lock.
1da177e4
LT
741 */
742
743/* in dev: base, irq */
744struct fe_priv {
745 spinlock_t lock;
746
747 /* General data:
748 * Locking: spin_lock(&np->lock); */
749 struct net_device_stats stats;
52da3578 750 struct nv_ethtool_stats estats;
1da177e4
LT
751 int in_shutdown;
752 u32 linkspeed;
753 int duplex;
754 int autoneg;
755 int fixed_mode;
756 int phyaddr;
757 int wolenabled;
758 unsigned int phy_oui;
edf7e5ec 759 unsigned int phy_model;
1da177e4 760 u16 gigabit;
9589c77a 761 int intr_test;
c5cf9101 762 int recover_error;
1da177e4
LT
763
764 /* General data: RO fields */
765 dma_addr_t ring_addr;
766 struct pci_dev *pci_dev;
767 u32 orig_mac[2];
768 u32 irqmask;
769 u32 desc_ver;
8a4ae7f2 770 u32 txrxctl_bits;
ee407b02 771 u32 vlanctl_bits;
86a0f043
AA
772 u32 driver_data;
773 u32 register_size;
f2ad2d9b 774 int rx_csum;
7e680c22 775 u32 mac_in_use;
1da177e4
LT
776
777 void __iomem *base;
778
779 /* rx specific fields.
780 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
781 */
761fcd9e
AA
782 union ring_type get_rx, put_rx, first_rx, last_rx;
783 struct nv_skb_map *get_rx_ctx, *put_rx_ctx;
784 struct nv_skb_map *first_rx_ctx, *last_rx_ctx;
785 struct nv_skb_map *rx_skb;
786
f82a9352 787 union ring_type rx_ring;
1da177e4 788 unsigned int rx_buf_sz;
d81c0983 789 unsigned int pkt_limit;
1da177e4
LT
790 struct timer_list oom_kick;
791 struct timer_list nic_poll;
52da3578 792 struct timer_list stats_poll;
d33a73c8 793 u32 nic_poll_irq;
eafa59f6 794 int rx_ring_size;
1da177e4
LT
795
796 /* media detection workaround.
797 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
798 */
799 int need_linktimer;
800 unsigned long link_timeout;
801 /*
802 * tx specific fields.
803 */
761fcd9e
AA
804 union ring_type get_tx, put_tx, first_tx, last_tx;
805 struct nv_skb_map *get_tx_ctx, *put_tx_ctx;
806 struct nv_skb_map *first_tx_ctx, *last_tx_ctx;
807 struct nv_skb_map *tx_skb;
808
f82a9352 809 union ring_type tx_ring;
1da177e4 810 u32 tx_flags;
eafa59f6 811 int tx_ring_size;
aaa37d2d 812 int tx_stop;
ee407b02
AA
813
814 /* vlan fields */
815 struct vlan_group *vlangrp;
d33a73c8
AA
816
817 /* msi/msi-x fields */
818 u32 msi_flags;
819 struct msix_entry msi_x_entry[NV_MSI_X_MAX_VECTORS];
eb91f61b
AA
820
821 /* flow control */
822 u32 pause_flags;
1da177e4
LT
823};
824
825/*
826 * Maximum number of loops until we assume that a bit in the irq mask
827 * is stuck. Overridable with module param.
828 */
829static int max_interrupt_work = 5;
830
a971c324
AA
831/*
832 * Optimization can be either throuput mode or cpu mode
f3b197ac 833 *
a971c324
AA
834 * Throughput Mode: Every tx and rx packet will generate an interrupt.
835 * CPU Mode: Interrupts are controlled by a timer.
836 */
69fe3fd7
AA
837enum {
838 NV_OPTIMIZATION_MODE_THROUGHPUT,
839 NV_OPTIMIZATION_MODE_CPU
840};
a971c324
AA
841static int optimization_mode = NV_OPTIMIZATION_MODE_THROUGHPUT;
842
843/*
844 * Poll interval for timer irq
845 *
846 * This interval determines how frequent an interrupt is generated.
847 * The is value is determined by [(time_in_micro_secs * 100) / (2^10)]
848 * Min = 0, and Max = 65535
849 */
850static int poll_interval = -1;
851
d33a73c8 852/*
69fe3fd7 853 * MSI interrupts
d33a73c8 854 */
69fe3fd7
AA
855enum {
856 NV_MSI_INT_DISABLED,
857 NV_MSI_INT_ENABLED
858};
859static int msi = NV_MSI_INT_ENABLED;
d33a73c8
AA
860
861/*
69fe3fd7 862 * MSIX interrupts
d33a73c8 863 */
69fe3fd7
AA
864enum {
865 NV_MSIX_INT_DISABLED,
866 NV_MSIX_INT_ENABLED
867};
caf96469 868static int msix = NV_MSIX_INT_DISABLED;
69fe3fd7
AA
869
870/*
871 * DMA 64bit
872 */
873enum {
874 NV_DMA_64BIT_DISABLED,
875 NV_DMA_64BIT_ENABLED
876};
877static int dma_64bit = NV_DMA_64BIT_ENABLED;
d33a73c8 878
1da177e4
LT
879static inline struct fe_priv *get_nvpriv(struct net_device *dev)
880{
881 return netdev_priv(dev);
882}
883
884static inline u8 __iomem *get_hwbase(struct net_device *dev)
885{
ac9c1897 886 return ((struct fe_priv *)netdev_priv(dev))->base;
1da177e4
LT
887}
888
889static inline void pci_push(u8 __iomem *base)
890{
891 /* force out pending posted writes */
892 readl(base);
893}
894
895static inline u32 nv_descr_getlength(struct ring_desc *prd, u32 v)
896{
f82a9352 897 return le32_to_cpu(prd->flaglen)
1da177e4
LT
898 & ((v == DESC_VER_1) ? LEN_MASK_V1 : LEN_MASK_V2);
899}
900
ee73362c
MS
901static inline u32 nv_descr_getlength_ex(struct ring_desc_ex *prd, u32 v)
902{
f82a9352 903 return le32_to_cpu(prd->flaglen) & LEN_MASK_V2;
ee73362c
MS
904}
905
1da177e4
LT
906static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target,
907 int delay, int delaymax, const char *msg)
908{
909 u8 __iomem *base = get_hwbase(dev);
910
911 pci_push(base);
912 do {
913 udelay(delay);
914 delaymax -= delay;
915 if (delaymax < 0) {
916 if (msg)
917 printk(msg);
918 return 1;
919 }
920 } while ((readl(base + offset) & mask) != target);
921 return 0;
922}
923
0832b25a
AA
924#define NV_SETUP_RX_RING 0x01
925#define NV_SETUP_TX_RING 0x02
926
927static void setup_hw_rings(struct net_device *dev, int rxtx_flags)
928{
929 struct fe_priv *np = get_nvpriv(dev);
930 u8 __iomem *base = get_hwbase(dev);
931
932 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
933 if (rxtx_flags & NV_SETUP_RX_RING) {
934 writel((u32) cpu_to_le64(np->ring_addr), base + NvRegRxRingPhysAddr);
935 }
936 if (rxtx_flags & NV_SETUP_TX_RING) {
eafa59f6 937 writel((u32) cpu_to_le64(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr);
0832b25a
AA
938 }
939 } else {
940 if (rxtx_flags & NV_SETUP_RX_RING) {
941 writel((u32) cpu_to_le64(np->ring_addr), base + NvRegRxRingPhysAddr);
942 writel((u32) (cpu_to_le64(np->ring_addr) >> 32), base + NvRegRxRingPhysAddrHigh);
943 }
944 if (rxtx_flags & NV_SETUP_TX_RING) {
eafa59f6
AA
945 writel((u32) cpu_to_le64(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr);
946 writel((u32) (cpu_to_le64(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)) >> 32), base + NvRegTxRingPhysAddrHigh);
0832b25a
AA
947 }
948 }
949}
950
eafa59f6
AA
951static void free_rings(struct net_device *dev)
952{
953 struct fe_priv *np = get_nvpriv(dev);
954
955 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
f82a9352 956 if (np->rx_ring.orig)
eafa59f6
AA
957 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size),
958 np->rx_ring.orig, np->ring_addr);
959 } else {
960 if (np->rx_ring.ex)
961 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size),
962 np->rx_ring.ex, np->ring_addr);
963 }
761fcd9e
AA
964 if (np->rx_skb)
965 kfree(np->rx_skb);
966 if (np->tx_skb)
967 kfree(np->tx_skb);
eafa59f6
AA
968}
969
84b3932b
AA
970static int using_multi_irqs(struct net_device *dev)
971{
972 struct fe_priv *np = get_nvpriv(dev);
973
974 if (!(np->msi_flags & NV_MSI_X_ENABLED) ||
975 ((np->msi_flags & NV_MSI_X_ENABLED) &&
976 ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1)))
977 return 0;
978 else
979 return 1;
980}
981
982static void nv_enable_irq(struct net_device *dev)
983{
984 struct fe_priv *np = get_nvpriv(dev);
985
986 if (!using_multi_irqs(dev)) {
987 if (np->msi_flags & NV_MSI_X_ENABLED)
988 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
989 else
990 enable_irq(dev->irq);
991 } else {
992 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
993 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
994 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
995 }
996}
997
998static void nv_disable_irq(struct net_device *dev)
999{
1000 struct fe_priv *np = get_nvpriv(dev);
1001
1002 if (!using_multi_irqs(dev)) {
1003 if (np->msi_flags & NV_MSI_X_ENABLED)
1004 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
1005 else
1006 disable_irq(dev->irq);
1007 } else {
1008 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
1009 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
1010 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
1011 }
1012}
1013
1014/* In MSIX mode, a write to irqmask behaves as XOR */
1015static void nv_enable_hw_interrupts(struct net_device *dev, u32 mask)
1016{
1017 u8 __iomem *base = get_hwbase(dev);
1018
1019 writel(mask, base + NvRegIrqMask);
1020}
1021
1022static void nv_disable_hw_interrupts(struct net_device *dev, u32 mask)
1023{
1024 struct fe_priv *np = get_nvpriv(dev);
1025 u8 __iomem *base = get_hwbase(dev);
1026
1027 if (np->msi_flags & NV_MSI_X_ENABLED) {
1028 writel(mask, base + NvRegIrqMask);
1029 } else {
1030 if (np->msi_flags & NV_MSI_ENABLED)
1031 writel(0, base + NvRegMSIIrqMask);
1032 writel(0, base + NvRegIrqMask);
1033 }
1034}
1035
1da177e4
LT
1036#define MII_READ (-1)
1037/* mii_rw: read/write a register on the PHY.
1038 *
1039 * Caller must guarantee serialization
1040 */
1041static int mii_rw(struct net_device *dev, int addr, int miireg, int value)
1042{
1043 u8 __iomem *base = get_hwbase(dev);
1044 u32 reg;
1045 int retval;
1046
1047 writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus);
1048
1049 reg = readl(base + NvRegMIIControl);
1050 if (reg & NVREG_MIICTL_INUSE) {
1051 writel(NVREG_MIICTL_INUSE, base + NvRegMIIControl);
1052 udelay(NV_MIIBUSY_DELAY);
1053 }
1054
1055 reg = (addr << NVREG_MIICTL_ADDRSHIFT) | miireg;
1056 if (value != MII_READ) {
1057 writel(value, base + NvRegMIIData);
1058 reg |= NVREG_MIICTL_WRITE;
1059 }
1060 writel(reg, base + NvRegMIIControl);
1061
1062 if (reg_delay(dev, NvRegMIIControl, NVREG_MIICTL_INUSE, 0,
1063 NV_MIIPHY_DELAY, NV_MIIPHY_DELAYMAX, NULL)) {
1064 dprintk(KERN_DEBUG "%s: mii_rw of reg %d at PHY %d timed out.\n",
1065 dev->name, miireg, addr);
1066 retval = -1;
1067 } else if (value != MII_READ) {
1068 /* it was a write operation - fewer failures are detectable */
1069 dprintk(KERN_DEBUG "%s: mii_rw wrote 0x%x to reg %d at PHY %d\n",
1070 dev->name, value, miireg, addr);
1071 retval = 0;
1072 } else if (readl(base + NvRegMIIStatus) & NVREG_MIISTAT_ERROR) {
1073 dprintk(KERN_DEBUG "%s: mii_rw of reg %d at PHY %d failed.\n",
1074 dev->name, miireg, addr);
1075 retval = -1;
1076 } else {
1077 retval = readl(base + NvRegMIIData);
1078 dprintk(KERN_DEBUG "%s: mii_rw read from reg %d at PHY %d: 0x%x.\n",
1079 dev->name, miireg, addr, retval);
1080 }
1081
1082 return retval;
1083}
1084
edf7e5ec 1085static int phy_reset(struct net_device *dev, u32 bmcr_setup)
1da177e4 1086{
ac9c1897 1087 struct fe_priv *np = netdev_priv(dev);
1da177e4
LT
1088 u32 miicontrol;
1089 unsigned int tries = 0;
1090
edf7e5ec 1091 miicontrol = BMCR_RESET | bmcr_setup;
1da177e4
LT
1092 if (mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol)) {
1093 return -1;
1094 }
1095
1096 /* wait for 500ms */
1097 msleep(500);
1098
1099 /* must wait till reset is deasserted */
1100 while (miicontrol & BMCR_RESET) {
1101 msleep(10);
1102 miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
1103 /* FIXME: 100 tries seem excessive */
1104 if (tries++ > 100)
1105 return -1;
1106 }
1107 return 0;
1108}
1109
1110static int phy_init(struct net_device *dev)
1111{
1112 struct fe_priv *np = get_nvpriv(dev);
1113 u8 __iomem *base = get_hwbase(dev);
1114 u32 phyinterface, phy_reserved, mii_status, mii_control, mii_control_1000,reg;
1115
edf7e5ec
AA
1116 /* phy errata for E3016 phy */
1117 if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
1118 reg = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ);
1119 reg &= ~PHY_MARVELL_E3016_INITMASK;
1120 if (mii_rw(dev, np->phyaddr, MII_NCONFIG, reg)) {
1121 printk(KERN_INFO "%s: phy write to errata reg failed.\n", pci_name(np->pci_dev));
1122 return PHY_ERROR;
1123 }
1124 }
c5e3ae88
AA
1125 if (np->phy_oui == PHY_OUI_REALTEK) {
1126 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {
1127 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1128 return PHY_ERROR;
1129 }
1130 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, PHY_REALTEK_INIT2)) {
1131 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1132 return PHY_ERROR;
1133 }
1134 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3)) {
1135 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1136 return PHY_ERROR;
1137 }
1138 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG3, PHY_REALTEK_INIT4)) {
1139 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1140 return PHY_ERROR;
1141 }
1142 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {
1143 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1144 return PHY_ERROR;
1145 }
1146 }
edf7e5ec 1147
1da177e4
LT
1148 /* set advertise register */
1149 reg = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
eb91f61b 1150 reg |= (ADVERTISE_10HALF|ADVERTISE_10FULL|ADVERTISE_100HALF|ADVERTISE_100FULL|ADVERTISE_PAUSE_ASYM|ADVERTISE_PAUSE_CAP);
1da177e4
LT
1151 if (mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg)) {
1152 printk(KERN_INFO "%s: phy write to advertise failed.\n", pci_name(np->pci_dev));
1153 return PHY_ERROR;
1154 }
1155
1156 /* get phy interface type */
1157 phyinterface = readl(base + NvRegPhyInterface);
1158
1159 /* see if gigabit phy */
1160 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
1161 if (mii_status & PHY_GIGABIT) {
1162 np->gigabit = PHY_GIGABIT;
eb91f61b 1163 mii_control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
1da177e4
LT
1164 mii_control_1000 &= ~ADVERTISE_1000HALF;
1165 if (phyinterface & PHY_RGMII)
1166 mii_control_1000 |= ADVERTISE_1000FULL;
1167 else
1168 mii_control_1000 &= ~ADVERTISE_1000FULL;
1169
eb91f61b 1170 if (mii_rw(dev, np->phyaddr, MII_CTRL1000, mii_control_1000)) {
1da177e4
LT
1171 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1172 return PHY_ERROR;
1173 }
1174 }
1175 else
1176 np->gigabit = 0;
1177
edf7e5ec
AA
1178 mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
1179 mii_control |= BMCR_ANENABLE;
1180
1181 /* reset the phy
1182 * (certain phys need bmcr to be setup with reset)
1183 */
1184 if (phy_reset(dev, mii_control)) {
1da177e4
LT
1185 printk(KERN_INFO "%s: phy reset failed\n", pci_name(np->pci_dev));
1186 return PHY_ERROR;
1187 }
1188
1189 /* phy vendor specific configuration */
1190 if ((np->phy_oui == PHY_OUI_CICADA) && (phyinterface & PHY_RGMII) ) {
1191 phy_reserved = mii_rw(dev, np->phyaddr, MII_RESV1, MII_READ);
14a67f3c
AA
1192 phy_reserved &= ~(PHY_CICADA_INIT1 | PHY_CICADA_INIT2);
1193 phy_reserved |= (PHY_CICADA_INIT3 | PHY_CICADA_INIT4);
1da177e4
LT
1194 if (mii_rw(dev, np->phyaddr, MII_RESV1, phy_reserved)) {
1195 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1196 return PHY_ERROR;
1197 }
1198 phy_reserved = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ);
14a67f3c 1199 phy_reserved |= PHY_CICADA_INIT5;
1da177e4
LT
1200 if (mii_rw(dev, np->phyaddr, MII_NCONFIG, phy_reserved)) {
1201 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1202 return PHY_ERROR;
1203 }
1204 }
1205 if (np->phy_oui == PHY_OUI_CICADA) {
1206 phy_reserved = mii_rw(dev, np->phyaddr, MII_SREVISION, MII_READ);
14a67f3c 1207 phy_reserved |= PHY_CICADA_INIT6;
1da177e4
LT
1208 if (mii_rw(dev, np->phyaddr, MII_SREVISION, phy_reserved)) {
1209 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1210 return PHY_ERROR;
1211 }
1212 }
d215d8a2
AA
1213 if (np->phy_oui == PHY_OUI_VITESSE) {
1214 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT1)) {
1215 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1216 return PHY_ERROR;
1217 }
1218 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT2)) {
1219 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1220 return PHY_ERROR;
1221 }
1222 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, MII_READ);
1223 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) {
1224 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1225 return PHY_ERROR;
1226 }
1227 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, MII_READ);
1228 phy_reserved &= ~PHY_VITESSE_INIT_MSK1;
1229 phy_reserved |= PHY_VITESSE_INIT3;
1230 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) {
1231 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1232 return PHY_ERROR;
1233 }
1234 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT4)) {
1235 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1236 return PHY_ERROR;
1237 }
1238 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT5)) {
1239 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1240 return PHY_ERROR;
1241 }
1242 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, MII_READ);
1243 phy_reserved &= ~PHY_VITESSE_INIT_MSK1;
1244 phy_reserved |= PHY_VITESSE_INIT3;
1245 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) {
1246 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1247 return PHY_ERROR;
1248 }
1249 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, MII_READ);
1250 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) {
1251 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1252 return PHY_ERROR;
1253 }
1254 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT6)) {
1255 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1256 return PHY_ERROR;
1257 }
1258 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT7)) {
1259 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1260 return PHY_ERROR;
1261 }
1262 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, MII_READ);
1263 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) {
1264 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1265 return PHY_ERROR;
1266 }
1267 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, MII_READ);
1268 phy_reserved &= ~PHY_VITESSE_INIT_MSK2;
1269 phy_reserved |= PHY_VITESSE_INIT8;
1270 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) {
1271 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1272 return PHY_ERROR;
1273 }
1274 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT9)) {
1275 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1276 return PHY_ERROR;
1277 }
1278 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT10)) {
1279 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1280 return PHY_ERROR;
1281 }
1282 }
c5e3ae88
AA
1283 if (np->phy_oui == PHY_OUI_REALTEK) {
1284 /* reset could have cleared these out, set them back */
1285 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {
1286 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1287 return PHY_ERROR;
1288 }
1289 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, PHY_REALTEK_INIT2)) {
1290 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1291 return PHY_ERROR;
1292 }
1293 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3)) {
1294 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1295 return PHY_ERROR;
1296 }
1297 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG3, PHY_REALTEK_INIT4)) {
1298 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1299 return PHY_ERROR;
1300 }
1301 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {
1302 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1303 return PHY_ERROR;
1304 }
1305 }
1306
eb91f61b
AA
1307 /* some phys clear out pause advertisment on reset, set it back */
1308 mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg);
1da177e4
LT
1309
1310 /* restart auto negotiation */
1311 mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
1312 mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE);
1313 if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) {
1314 return PHY_ERROR;
1315 }
1316
1317 return 0;
1318}
1319
1320static void nv_start_rx(struct net_device *dev)
1321{
ac9c1897 1322 struct fe_priv *np = netdev_priv(dev);
1da177e4 1323 u8 __iomem *base = get_hwbase(dev);
f35723ec 1324 u32 rx_ctrl = readl(base + NvRegReceiverControl);
1da177e4
LT
1325
1326 dprintk(KERN_DEBUG "%s: nv_start_rx\n", dev->name);
1327 /* Already running? Stop it. */
f35723ec
AA
1328 if ((readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) && !np->mac_in_use) {
1329 rx_ctrl &= ~NVREG_RCVCTL_START;
1330 writel(rx_ctrl, base + NvRegReceiverControl);
1da177e4
LT
1331 pci_push(base);
1332 }
1333 writel(np->linkspeed, base + NvRegLinkSpeed);
1334 pci_push(base);
f35723ec
AA
1335 rx_ctrl |= NVREG_RCVCTL_START;
1336 if (np->mac_in_use)
1337 rx_ctrl &= ~NVREG_RCVCTL_RX_PATH_EN;
1338 writel(rx_ctrl, base + NvRegReceiverControl);
1da177e4
LT
1339 dprintk(KERN_DEBUG "%s: nv_start_rx to duplex %d, speed 0x%08x.\n",
1340 dev->name, np->duplex, np->linkspeed);
1341 pci_push(base);
1342}
1343
1344static void nv_stop_rx(struct net_device *dev)
1345{
f35723ec 1346 struct fe_priv *np = netdev_priv(dev);
1da177e4 1347 u8 __iomem *base = get_hwbase(dev);
f35723ec 1348 u32 rx_ctrl = readl(base + NvRegReceiverControl);
1da177e4
LT
1349
1350 dprintk(KERN_DEBUG "%s: nv_stop_rx\n", dev->name);
f35723ec
AA
1351 if (!np->mac_in_use)
1352 rx_ctrl &= ~NVREG_RCVCTL_START;
1353 else
1354 rx_ctrl |= NVREG_RCVCTL_RX_PATH_EN;
1355 writel(rx_ctrl, base + NvRegReceiverControl);
1da177e4
LT
1356 reg_delay(dev, NvRegReceiverStatus, NVREG_RCVSTAT_BUSY, 0,
1357 NV_RXSTOP_DELAY1, NV_RXSTOP_DELAY1MAX,
1358 KERN_INFO "nv_stop_rx: ReceiverStatus remained busy");
1359
1360 udelay(NV_RXSTOP_DELAY2);
f35723ec
AA
1361 if (!np->mac_in_use)
1362 writel(0, base + NvRegLinkSpeed);
1da177e4
LT
1363}
1364
1365static void nv_start_tx(struct net_device *dev)
1366{
f35723ec 1367 struct fe_priv *np = netdev_priv(dev);
1da177e4 1368 u8 __iomem *base = get_hwbase(dev);
f35723ec 1369 u32 tx_ctrl = readl(base + NvRegTransmitterControl);
1da177e4
LT
1370
1371 dprintk(KERN_DEBUG "%s: nv_start_tx\n", dev->name);
f35723ec
AA
1372 tx_ctrl |= NVREG_XMITCTL_START;
1373 if (np->mac_in_use)
1374 tx_ctrl &= ~NVREG_XMITCTL_TX_PATH_EN;
1375 writel(tx_ctrl, base + NvRegTransmitterControl);
1da177e4
LT
1376 pci_push(base);
1377}
1378
1379static void nv_stop_tx(struct net_device *dev)
1380{
f35723ec 1381 struct fe_priv *np = netdev_priv(dev);
1da177e4 1382 u8 __iomem *base = get_hwbase(dev);
f35723ec 1383 u32 tx_ctrl = readl(base + NvRegTransmitterControl);
1da177e4
LT
1384
1385 dprintk(KERN_DEBUG "%s: nv_stop_tx\n", dev->name);
f35723ec
AA
1386 if (!np->mac_in_use)
1387 tx_ctrl &= ~NVREG_XMITCTL_START;
1388 else
1389 tx_ctrl |= NVREG_XMITCTL_TX_PATH_EN;
1390 writel(tx_ctrl, base + NvRegTransmitterControl);
1da177e4
LT
1391 reg_delay(dev, NvRegTransmitterStatus, NVREG_XMITSTAT_BUSY, 0,
1392 NV_TXSTOP_DELAY1, NV_TXSTOP_DELAY1MAX,
1393 KERN_INFO "nv_stop_tx: TransmitterStatus remained busy");
1394
1395 udelay(NV_TXSTOP_DELAY2);
f35723ec
AA
1396 if (!np->mac_in_use)
1397 writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV,
1398 base + NvRegTransmitPoll);
1da177e4
LT
1399}
1400
1401static void nv_txrx_reset(struct net_device *dev)
1402{
ac9c1897 1403 struct fe_priv *np = netdev_priv(dev);
1da177e4
LT
1404 u8 __iomem *base = get_hwbase(dev);
1405
1406 dprintk(KERN_DEBUG "%s: nv_txrx_reset\n", dev->name);
8a4ae7f2 1407 writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl);
1da177e4
LT
1408 pci_push(base);
1409 udelay(NV_TXRX_RESET_DELAY);
8a4ae7f2 1410 writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl);
1da177e4
LT
1411 pci_push(base);
1412}
1413
86a0f043
AA
1414static void nv_mac_reset(struct net_device *dev)
1415{
1416 struct fe_priv *np = netdev_priv(dev);
1417 u8 __iomem *base = get_hwbase(dev);
1418
1419 dprintk(KERN_DEBUG "%s: nv_mac_reset\n", dev->name);
1420 writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl);
1421 pci_push(base);
1422 writel(NVREG_MAC_RESET_ASSERT, base + NvRegMacReset);
1423 pci_push(base);
1424 udelay(NV_MAC_RESET_DELAY);
1425 writel(0, base + NvRegMacReset);
1426 pci_push(base);
1427 udelay(NV_MAC_RESET_DELAY);
1428 writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl);
1429 pci_push(base);
1430}
1431
57fff698
AA
1432static void nv_get_hw_stats(struct net_device *dev)
1433{
1434 struct fe_priv *np = netdev_priv(dev);
1435 u8 __iomem *base = get_hwbase(dev);
1436
1437 np->estats.tx_bytes += readl(base + NvRegTxCnt);
1438 np->estats.tx_zero_rexmt += readl(base + NvRegTxZeroReXmt);
1439 np->estats.tx_one_rexmt += readl(base + NvRegTxOneReXmt);
1440 np->estats.tx_many_rexmt += readl(base + NvRegTxManyReXmt);
1441 np->estats.tx_late_collision += readl(base + NvRegTxLateCol);
1442 np->estats.tx_fifo_errors += readl(base + NvRegTxUnderflow);
1443 np->estats.tx_carrier_errors += readl(base + NvRegTxLossCarrier);
1444 np->estats.tx_excess_deferral += readl(base + NvRegTxExcessDef);
1445 np->estats.tx_retry_error += readl(base + NvRegTxRetryErr);
1446 np->estats.rx_frame_error += readl(base + NvRegRxFrameErr);
1447 np->estats.rx_extra_byte += readl(base + NvRegRxExtraByte);
1448 np->estats.rx_late_collision += readl(base + NvRegRxLateCol);
1449 np->estats.rx_runt += readl(base + NvRegRxRunt);
1450 np->estats.rx_frame_too_long += readl(base + NvRegRxFrameTooLong);
1451 np->estats.rx_over_errors += readl(base + NvRegRxOverflow);
1452 np->estats.rx_crc_errors += readl(base + NvRegRxFCSErr);
1453 np->estats.rx_frame_align_error += readl(base + NvRegRxFrameAlignErr);
1454 np->estats.rx_length_error += readl(base + NvRegRxLenErr);
1455 np->estats.rx_unicast += readl(base + NvRegRxUnicast);
1456 np->estats.rx_multicast += readl(base + NvRegRxMulticast);
1457 np->estats.rx_broadcast += readl(base + NvRegRxBroadcast);
1458 np->estats.rx_packets =
1459 np->estats.rx_unicast +
1460 np->estats.rx_multicast +
1461 np->estats.rx_broadcast;
1462 np->estats.rx_errors_total =
1463 np->estats.rx_crc_errors +
1464 np->estats.rx_over_errors +
1465 np->estats.rx_frame_error +
1466 (np->estats.rx_frame_align_error - np->estats.rx_extra_byte) +
1467 np->estats.rx_late_collision +
1468 np->estats.rx_runt +
1469 np->estats.rx_frame_too_long;
1470 np->estats.tx_errors_total =
1471 np->estats.tx_late_collision +
1472 np->estats.tx_fifo_errors +
1473 np->estats.tx_carrier_errors +
1474 np->estats.tx_excess_deferral +
1475 np->estats.tx_retry_error;
1476
1477 if (np->driver_data & DEV_HAS_STATISTICS_V2) {
1478 np->estats.tx_deferral += readl(base + NvRegTxDef);
1479 np->estats.tx_packets += readl(base + NvRegTxFrame);
1480 np->estats.rx_bytes += readl(base + NvRegRxCnt);
1481 np->estats.tx_pause += readl(base + NvRegTxPause);
1482 np->estats.rx_pause += readl(base + NvRegRxPause);
1483 np->estats.rx_drop_frame += readl(base + NvRegRxDropFrame);
1484 }
1485}
1486
1da177e4
LT
1487/*
1488 * nv_get_stats: dev->get_stats function
1489 * Get latest stats value from the nic.
1490 * Called with read_lock(&dev_base_lock) held for read -
1491 * only synchronized against unregister_netdevice.
1492 */
1493static struct net_device_stats *nv_get_stats(struct net_device *dev)
1494{
ac9c1897 1495 struct fe_priv *np = netdev_priv(dev);
1da177e4 1496
21828163
AA
1497 /* If the nic supports hw counters then retrieve latest values */
1498 if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2)) {
1499 nv_get_hw_stats(dev);
1500
1501 /* copy to net_device stats */
1502 np->stats.tx_bytes = np->estats.tx_bytes;
1503 np->stats.tx_fifo_errors = np->estats.tx_fifo_errors;
1504 np->stats.tx_carrier_errors = np->estats.tx_carrier_errors;
1505 np->stats.rx_crc_errors = np->estats.rx_crc_errors;
1506 np->stats.rx_over_errors = np->estats.rx_over_errors;
1507 np->stats.rx_errors = np->estats.rx_errors_total;
1508 np->stats.tx_errors = np->estats.tx_errors_total;
1509 }
1da177e4
LT
1510 return &np->stats;
1511}
1512
1513/*
1514 * nv_alloc_rx: fill rx ring entries.
1515 * Return 1 if the allocations for the skbs failed and the
1516 * rx engine is without Available descriptors
1517 */
1518static int nv_alloc_rx(struct net_device *dev)
1519{
ac9c1897 1520 struct fe_priv *np = netdev_priv(dev);
86b22b0d 1521 struct ring_desc* less_rx;
1da177e4 1522
86b22b0d
AA
1523 less_rx = np->get_rx.orig;
1524 if (less_rx-- == np->first_rx.orig)
1525 less_rx = np->last_rx.orig;
761fcd9e 1526
86b22b0d
AA
1527 while (np->put_rx.orig != less_rx) {
1528 struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD);
1529 if (skb) {
86b22b0d 1530 np->put_rx_ctx->skb = skb;
4305b541
ACM
1531 np->put_rx_ctx->dma = pci_map_single(np->pci_dev,
1532 skb->data,
8b5be268 1533 skb_tailroom(skb),
4305b541 1534 PCI_DMA_FROMDEVICE);
8b5be268 1535 np->put_rx_ctx->dma_len = skb_tailroom(skb);
86b22b0d
AA
1536 np->put_rx.orig->buf = cpu_to_le32(np->put_rx_ctx->dma);
1537 wmb();
1538 np->put_rx.orig->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL);
b01867cb 1539 if (unlikely(np->put_rx.orig++ == np->last_rx.orig))
86b22b0d 1540 np->put_rx.orig = np->first_rx.orig;
b01867cb 1541 if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
86b22b0d 1542 np->put_rx_ctx = np->first_rx_ctx;
761fcd9e 1543 } else {
86b22b0d 1544 return 1;
761fcd9e 1545 }
86b22b0d
AA
1546 }
1547 return 0;
1548}
1549
1550static int nv_alloc_rx_optimized(struct net_device *dev)
1551{
1552 struct fe_priv *np = netdev_priv(dev);
1553 struct ring_desc_ex* less_rx;
1554
1555 less_rx = np->get_rx.ex;
1556 if (less_rx-- == np->first_rx.ex)
1557 less_rx = np->last_rx.ex;
761fcd9e 1558
86b22b0d
AA
1559 while (np->put_rx.ex != less_rx) {
1560 struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD);
0d63fb32 1561 if (skb) {
761fcd9e 1562 np->put_rx_ctx->skb = skb;
4305b541
ACM
1563 np->put_rx_ctx->dma = pci_map_single(np->pci_dev,
1564 skb->data,
8b5be268 1565 skb_tailroom(skb),
4305b541 1566 PCI_DMA_FROMDEVICE);
8b5be268 1567 np->put_rx_ctx->dma_len = skb_tailroom(skb);
86b22b0d
AA
1568 np->put_rx.ex->bufhigh = cpu_to_le64(np->put_rx_ctx->dma) >> 32;
1569 np->put_rx.ex->buflow = cpu_to_le64(np->put_rx_ctx->dma) & 0x0FFFFFFFF;
1570 wmb();
1571 np->put_rx.ex->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL);
b01867cb 1572 if (unlikely(np->put_rx.ex++ == np->last_rx.ex))
86b22b0d 1573 np->put_rx.ex = np->first_rx.ex;
b01867cb 1574 if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
0d63fb32 1575 np->put_rx_ctx = np->first_rx_ctx;
1da177e4 1576 } else {
0d63fb32 1577 return 1;
ee73362c 1578 }
1da177e4 1579 }
1da177e4
LT
1580 return 0;
1581}
1582
e27cdba5
SH
1583/* If rx bufs are exhausted called after 50ms to attempt to refresh */
1584#ifdef CONFIG_FORCEDETH_NAPI
1585static void nv_do_rx_refill(unsigned long data)
1586{
1587 struct net_device *dev = (struct net_device *) data;
1588
1589 /* Just reschedule NAPI rx processing */
1590 netif_rx_schedule(dev);
1591}
1592#else
1da177e4
LT
1593static void nv_do_rx_refill(unsigned long data)
1594{
1595 struct net_device *dev = (struct net_device *) data;
ac9c1897 1596 struct fe_priv *np = netdev_priv(dev);
86b22b0d 1597 int retcode;
1da177e4 1598
84b3932b
AA
1599 if (!using_multi_irqs(dev)) {
1600 if (np->msi_flags & NV_MSI_X_ENABLED)
1601 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
1602 else
1603 disable_irq(dev->irq);
d33a73c8
AA
1604 } else {
1605 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
1606 }
86b22b0d
AA
1607 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
1608 retcode = nv_alloc_rx(dev);
1609 else
1610 retcode = nv_alloc_rx_optimized(dev);
1611 if (retcode) {
84b3932b 1612 spin_lock_irq(&np->lock);
1da177e4
LT
1613 if (!np->in_shutdown)
1614 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
84b3932b 1615 spin_unlock_irq(&np->lock);
1da177e4 1616 }
84b3932b
AA
1617 if (!using_multi_irqs(dev)) {
1618 if (np->msi_flags & NV_MSI_X_ENABLED)
1619 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
1620 else
1621 enable_irq(dev->irq);
d33a73c8
AA
1622 } else {
1623 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
1624 }
1da177e4 1625}
e27cdba5 1626#endif
1da177e4 1627
f3b197ac 1628static void nv_init_rx(struct net_device *dev)
1da177e4 1629{
ac9c1897 1630 struct fe_priv *np = netdev_priv(dev);
1da177e4 1631 int i;
761fcd9e
AA
1632 np->get_rx = np->put_rx = np->first_rx = np->rx_ring;
1633 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
1634 np->last_rx.orig = &np->rx_ring.orig[np->rx_ring_size-1];
1635 else
1636 np->last_rx.ex = &np->rx_ring.ex[np->rx_ring_size-1];
1637 np->get_rx_ctx = np->put_rx_ctx = np->first_rx_ctx = np->rx_skb;
1638 np->last_rx_ctx = &np->rx_skb[np->rx_ring_size-1];
1da177e4 1639
761fcd9e
AA
1640 for (i = 0; i < np->rx_ring_size; i++) {
1641 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
f82a9352 1642 np->rx_ring.orig[i].flaglen = 0;
761fcd9e
AA
1643 np->rx_ring.orig[i].buf = 0;
1644 } else {
f82a9352 1645 np->rx_ring.ex[i].flaglen = 0;
761fcd9e
AA
1646 np->rx_ring.ex[i].txvlan = 0;
1647 np->rx_ring.ex[i].bufhigh = 0;
1648 np->rx_ring.ex[i].buflow = 0;
1649 }
1650 np->rx_skb[i].skb = NULL;
1651 np->rx_skb[i].dma = 0;
1652 }
d81c0983
MS
1653}
1654
1655static void nv_init_tx(struct net_device *dev)
1656{
ac9c1897 1657 struct fe_priv *np = netdev_priv(dev);
d81c0983 1658 int i;
761fcd9e
AA
1659 np->get_tx = np->put_tx = np->first_tx = np->tx_ring;
1660 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
1661 np->last_tx.orig = &np->tx_ring.orig[np->tx_ring_size-1];
1662 else
1663 np->last_tx.ex = &np->tx_ring.ex[np->tx_ring_size-1];
1664 np->get_tx_ctx = np->put_tx_ctx = np->first_tx_ctx = np->tx_skb;
1665 np->last_tx_ctx = &np->tx_skb[np->tx_ring_size-1];
d81c0983 1666
eafa59f6 1667 for (i = 0; i < np->tx_ring_size; i++) {
761fcd9e 1668 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
f82a9352 1669 np->tx_ring.orig[i].flaglen = 0;
761fcd9e
AA
1670 np->tx_ring.orig[i].buf = 0;
1671 } else {
f82a9352 1672 np->tx_ring.ex[i].flaglen = 0;
761fcd9e
AA
1673 np->tx_ring.ex[i].txvlan = 0;
1674 np->tx_ring.ex[i].bufhigh = 0;
1675 np->tx_ring.ex[i].buflow = 0;
1676 }
1677 np->tx_skb[i].skb = NULL;
1678 np->tx_skb[i].dma = 0;
ac9c1897 1679 }
d81c0983
MS
1680}
1681
1682static int nv_init_ring(struct net_device *dev)
1683{
86b22b0d
AA
1684 struct fe_priv *np = netdev_priv(dev);
1685
d81c0983
MS
1686 nv_init_tx(dev);
1687 nv_init_rx(dev);
86b22b0d
AA
1688 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
1689 return nv_alloc_rx(dev);
1690 else
1691 return nv_alloc_rx_optimized(dev);
1da177e4
LT
1692}
1693
761fcd9e 1694static int nv_release_txskb(struct net_device *dev, struct nv_skb_map* tx_skb)
ac9c1897
AA
1695{
1696 struct fe_priv *np = netdev_priv(dev);
fa45459e 1697
761fcd9e
AA
1698 if (tx_skb->dma) {
1699 pci_unmap_page(np->pci_dev, tx_skb->dma,
1700 tx_skb->dma_len,
fa45459e 1701 PCI_DMA_TODEVICE);
761fcd9e 1702 tx_skb->dma = 0;
fa45459e 1703 }
761fcd9e
AA
1704 if (tx_skb->skb) {
1705 dev_kfree_skb_any(tx_skb->skb);
1706 tx_skb->skb = NULL;
fa45459e
AA
1707 return 1;
1708 } else {
1709 return 0;
ac9c1897 1710 }
ac9c1897
AA
1711}
1712
1da177e4
LT
1713static void nv_drain_tx(struct net_device *dev)
1714{
ac9c1897
AA
1715 struct fe_priv *np = netdev_priv(dev);
1716 unsigned int i;
f3b197ac 1717
eafa59f6 1718 for (i = 0; i < np->tx_ring_size; i++) {
761fcd9e 1719 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
f82a9352 1720 np->tx_ring.orig[i].flaglen = 0;
761fcd9e
AA
1721 np->tx_ring.orig[i].buf = 0;
1722 } else {
f82a9352 1723 np->tx_ring.ex[i].flaglen = 0;
761fcd9e
AA
1724 np->tx_ring.ex[i].txvlan = 0;
1725 np->tx_ring.ex[i].bufhigh = 0;
1726 np->tx_ring.ex[i].buflow = 0;
1727 }
1728 if (nv_release_txskb(dev, &np->tx_skb[i]))
1da177e4 1729 np->stats.tx_dropped++;
1da177e4
LT
1730 }
1731}
1732
1733static void nv_drain_rx(struct net_device *dev)
1734{
ac9c1897 1735 struct fe_priv *np = netdev_priv(dev);
1da177e4 1736 int i;
761fcd9e 1737
eafa59f6 1738 for (i = 0; i < np->rx_ring_size; i++) {
761fcd9e 1739 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
f82a9352 1740 np->rx_ring.orig[i].flaglen = 0;
761fcd9e
AA
1741 np->rx_ring.orig[i].buf = 0;
1742 } else {
f82a9352 1743 np->rx_ring.ex[i].flaglen = 0;
761fcd9e
AA
1744 np->rx_ring.ex[i].txvlan = 0;
1745 np->rx_ring.ex[i].bufhigh = 0;
1746 np->rx_ring.ex[i].buflow = 0;
1747 }
1da177e4 1748 wmb();
761fcd9e
AA
1749 if (np->rx_skb[i].skb) {
1750 pci_unmap_single(np->pci_dev, np->rx_skb[i].dma,
4305b541
ACM
1751 (skb_end_pointer(np->rx_skb[i].skb) -
1752 np->rx_skb[i].skb->data),
1753 PCI_DMA_FROMDEVICE);
761fcd9e
AA
1754 dev_kfree_skb(np->rx_skb[i].skb);
1755 np->rx_skb[i].skb = NULL;
1da177e4
LT
1756 }
1757 }
1758}
1759
1760static void drain_ring(struct net_device *dev)
1761{
1762 nv_drain_tx(dev);
1763 nv_drain_rx(dev);
1764}
1765
761fcd9e
AA
1766static inline u32 nv_get_empty_tx_slots(struct fe_priv *np)
1767{
1768 return (u32)(np->tx_ring_size - ((np->tx_ring_size + (np->put_tx_ctx - np->get_tx_ctx)) % np->tx_ring_size));
1769}
1770
1da177e4
LT
1771/*
1772 * nv_start_xmit: dev->hard_start_xmit function
932ff279 1773 * Called with netif_tx_lock held.
1da177e4
LT
1774 */
1775static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
1776{
ac9c1897 1777 struct fe_priv *np = netdev_priv(dev);
fa45459e 1778 u32 tx_flags = 0;
ac9c1897
AA
1779 u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET);
1780 unsigned int fragments = skb_shinfo(skb)->nr_frags;
ac9c1897 1781 unsigned int i;
fa45459e
AA
1782 u32 offset = 0;
1783 u32 bcnt;
1784 u32 size = skb->len-skb->data_len;
1785 u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
761fcd9e 1786 u32 empty_slots;
86b22b0d
AA
1787 struct ring_desc* put_tx;
1788 struct ring_desc* start_tx;
1789 struct ring_desc* prev_tx;
761fcd9e 1790 struct nv_skb_map* prev_tx_ctx;
fa45459e
AA
1791
1792 /* add fragments to entries count */
1793 for (i = 0; i < fragments; i++) {
1794 entries += (skb_shinfo(skb)->frags[i].size >> NV_TX2_TSO_MAX_SHIFT) +
1795 ((skb_shinfo(skb)->frags[i].size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
1796 }
ac9c1897 1797
761fcd9e 1798 empty_slots = nv_get_empty_tx_slots(np);
445583b8 1799 if (unlikely(empty_slots <= entries)) {
164a86e4 1800 spin_lock_irq(&np->lock);
ac9c1897 1801 netif_stop_queue(dev);
aaa37d2d 1802 np->tx_stop = 1;
164a86e4 1803 spin_unlock_irq(&np->lock);
ac9c1897
AA
1804 return NETDEV_TX_BUSY;
1805 }
1da177e4 1806
86b22b0d 1807 start_tx = put_tx = np->put_tx.orig;
761fcd9e 1808
fa45459e
AA
1809 /* setup the header buffer */
1810 do {
761fcd9e
AA
1811 prev_tx = put_tx;
1812 prev_tx_ctx = np->put_tx_ctx;
fa45459e 1813 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
761fcd9e 1814 np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt,
fa45459e 1815 PCI_DMA_TODEVICE);
761fcd9e 1816 np->put_tx_ctx->dma_len = bcnt;
86b22b0d
AA
1817 put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma);
1818 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
445583b8 1819
fa45459e
AA
1820 tx_flags = np->tx_flags;
1821 offset += bcnt;
1822 size -= bcnt;
445583b8 1823 if (unlikely(put_tx++ == np->last_tx.orig))
86b22b0d 1824 put_tx = np->first_tx.orig;
445583b8 1825 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
761fcd9e 1826 np->put_tx_ctx = np->first_tx_ctx;
f82a9352 1827 } while (size);
fa45459e
AA
1828
1829 /* setup the fragments */
1830 for (i = 0; i < fragments; i++) {
1831 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1832 u32 size = frag->size;
1833 offset = 0;
1834
1835 do {
761fcd9e
AA
1836 prev_tx = put_tx;
1837 prev_tx_ctx = np->put_tx_ctx;
fa45459e 1838 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
761fcd9e
AA
1839 np->put_tx_ctx->dma = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt,
1840 PCI_DMA_TODEVICE);
1841 np->put_tx_ctx->dma_len = bcnt;
86b22b0d
AA
1842 put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma);
1843 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
445583b8 1844
fa45459e
AA
1845 offset += bcnt;
1846 size -= bcnt;
445583b8 1847 if (unlikely(put_tx++ == np->last_tx.orig))
86b22b0d 1848 put_tx = np->first_tx.orig;
445583b8 1849 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
761fcd9e 1850 np->put_tx_ctx = np->first_tx_ctx;
fa45459e
AA
1851 } while (size);
1852 }
ac9c1897 1853
fa45459e 1854 /* set last fragment flag */
86b22b0d 1855 prev_tx->flaglen |= cpu_to_le32(tx_flags_extra);
ac9c1897 1856
761fcd9e
AA
1857 /* save skb in this slot's context area */
1858 prev_tx_ctx->skb = skb;
fa45459e 1859
89114afd 1860 if (skb_is_gso(skb))
7967168c 1861 tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT);
ac9c1897 1862 else
1d39ed56 1863 tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ?
84fa7933 1864 NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0;
ac9c1897 1865
164a86e4
AA
1866 spin_lock_irq(&np->lock);
1867
fa45459e 1868 /* set tx flags */
86b22b0d
AA
1869 start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra);
1870 np->put_tx.orig = put_tx;
1da177e4 1871
164a86e4 1872 spin_unlock_irq(&np->lock);
761fcd9e
AA
1873
1874 dprintk(KERN_DEBUG "%s: nv_start_xmit: entries %d queued for transmission. tx_flags_extra: %x\n",
1875 dev->name, entries, tx_flags_extra);
1da177e4
LT
1876 {
1877 int j;
1878 for (j=0; j<64; j++) {
1879 if ((j%16) == 0)
1880 dprintk("\n%03x:", j);
1881 dprintk(" %02x", ((unsigned char*)skb->data)[j]);
1882 }
1883 dprintk("\n");
1884 }
1885
1da177e4 1886 dev->trans_start = jiffies;
8a4ae7f2 1887 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
ac9c1897 1888 return NETDEV_TX_OK;
1da177e4
LT
1889}
1890
86b22b0d
AA
1891static int nv_start_xmit_optimized(struct sk_buff *skb, struct net_device *dev)
1892{
1893 struct fe_priv *np = netdev_priv(dev);
1894 u32 tx_flags = 0;
445583b8 1895 u32 tx_flags_extra;
86b22b0d
AA
1896 unsigned int fragments = skb_shinfo(skb)->nr_frags;
1897 unsigned int i;
1898 u32 offset = 0;
1899 u32 bcnt;
1900 u32 size = skb->len-skb->data_len;
1901 u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
1902 u32 empty_slots;
86b22b0d
AA
1903 struct ring_desc_ex* put_tx;
1904 struct ring_desc_ex* start_tx;
1905 struct ring_desc_ex* prev_tx;
1906 struct nv_skb_map* prev_tx_ctx;
1907
1908 /* add fragments to entries count */
1909 for (i = 0; i < fragments; i++) {
1910 entries += (skb_shinfo(skb)->frags[i].size >> NV_TX2_TSO_MAX_SHIFT) +
1911 ((skb_shinfo(skb)->frags[i].size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
1912 }
1913
1914 empty_slots = nv_get_empty_tx_slots(np);
445583b8 1915 if (unlikely(empty_slots <= entries)) {
86b22b0d
AA
1916 spin_lock_irq(&np->lock);
1917 netif_stop_queue(dev);
aaa37d2d 1918 np->tx_stop = 1;
86b22b0d
AA
1919 spin_unlock_irq(&np->lock);
1920 return NETDEV_TX_BUSY;
1921 }
1922
1923 start_tx = put_tx = np->put_tx.ex;
1924
1925 /* setup the header buffer */
1926 do {
1927 prev_tx = put_tx;
1928 prev_tx_ctx = np->put_tx_ctx;
1929 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
1930 np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt,
1931 PCI_DMA_TODEVICE);
1932 np->put_tx_ctx->dma_len = bcnt;
1933 put_tx->bufhigh = cpu_to_le64(np->put_tx_ctx->dma) >> 32;
1934 put_tx->buflow = cpu_to_le64(np->put_tx_ctx->dma) & 0x0FFFFFFFF;
1935 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
445583b8
AA
1936
1937 tx_flags = NV_TX2_VALID;
86b22b0d
AA
1938 offset += bcnt;
1939 size -= bcnt;
445583b8 1940 if (unlikely(put_tx++ == np->last_tx.ex))
86b22b0d 1941 put_tx = np->first_tx.ex;
445583b8 1942 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
86b22b0d
AA
1943 np->put_tx_ctx = np->first_tx_ctx;
1944 } while (size);
1945
1946 /* setup the fragments */
1947 for (i = 0; i < fragments; i++) {
1948 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1949 u32 size = frag->size;
1950 offset = 0;
1951
1952 do {
1953 prev_tx = put_tx;
1954 prev_tx_ctx = np->put_tx_ctx;
1955 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
1956 np->put_tx_ctx->dma = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt,
1957 PCI_DMA_TODEVICE);
1958 np->put_tx_ctx->dma_len = bcnt;
86b22b0d
AA
1959 put_tx->bufhigh = cpu_to_le64(np->put_tx_ctx->dma) >> 32;
1960 put_tx->buflow = cpu_to_le64(np->put_tx_ctx->dma) & 0x0FFFFFFFF;
1961 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
445583b8 1962
86b22b0d
AA
1963 offset += bcnt;
1964 size -= bcnt;
445583b8 1965 if (unlikely(put_tx++ == np->last_tx.ex))
86b22b0d 1966 put_tx = np->first_tx.ex;
445583b8 1967 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
86b22b0d
AA
1968 np->put_tx_ctx = np->first_tx_ctx;
1969 } while (size);
1970 }
1971
1972 /* set last fragment flag */
445583b8 1973 prev_tx->flaglen |= cpu_to_le32(NV_TX2_LASTPACKET);
86b22b0d
AA
1974
1975 /* save skb in this slot's context area */
1976 prev_tx_ctx->skb = skb;
1977
1978 if (skb_is_gso(skb))
1979 tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT);
1980 else
1981 tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ?
1982 NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0;
1983
1984 /* vlan tag */
445583b8
AA
1985 if (likely(!np->vlangrp)) {
1986 start_tx->txvlan = 0;
1987 } else {
1988 if (vlan_tx_tag_present(skb))
1989 start_tx->txvlan = cpu_to_le32(NV_TX3_VLAN_TAG_PRESENT | vlan_tx_tag_get(skb));
1990 else
1991 start_tx->txvlan = 0;
86b22b0d
AA
1992 }
1993
1994 spin_lock_irq(&np->lock);
1995
1996 /* set tx flags */
86b22b0d
AA
1997 start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra);
1998 np->put_tx.ex = put_tx;
1999
2000 spin_unlock_irq(&np->lock);
2001
2002 dprintk(KERN_DEBUG "%s: nv_start_xmit_optimized: entries %d queued for transmission. tx_flags_extra: %x\n",
2003 dev->name, entries, tx_flags_extra);
2004 {
2005 int j;
2006 for (j=0; j<64; j++) {
2007 if ((j%16) == 0)
2008 dprintk("\n%03x:", j);
2009 dprintk(" %02x", ((unsigned char*)skb->data)[j]);
2010 }
2011 dprintk("\n");
2012 }
2013
2014 dev->trans_start = jiffies;
2015 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
86b22b0d
AA
2016 return NETDEV_TX_OK;
2017}
2018
1da177e4
LT
2019/*
2020 * nv_tx_done: check for completed packets, release the skbs.
2021 *
2022 * Caller must own np->lock.
2023 */
2024static void nv_tx_done(struct net_device *dev)
2025{
ac9c1897 2026 struct fe_priv *np = netdev_priv(dev);
f82a9352 2027 u32 flags;
aaa37d2d 2028 struct ring_desc* orig_get_tx = np->get_tx.orig;
1da177e4 2029
445583b8
AA
2030 while ((np->get_tx.orig != np->put_tx.orig) &&
2031 !((flags = le32_to_cpu(np->get_tx.orig->flaglen)) & NV_TX_VALID)) {
1da177e4 2032
761fcd9e
AA
2033 dprintk(KERN_DEBUG "%s: nv_tx_done: flags 0x%x.\n",
2034 dev->name, flags);
445583b8
AA
2035
2036 pci_unmap_page(np->pci_dev, np->get_tx_ctx->dma,
2037 np->get_tx_ctx->dma_len,
2038 PCI_DMA_TODEVICE);
2039 np->get_tx_ctx->dma = 0;
2040
1da177e4 2041 if (np->desc_ver == DESC_VER_1) {
f82a9352 2042 if (flags & NV_TX_LASTPACKET) {
445583b8 2043 if (flags & NV_TX_ERROR) {
f82a9352 2044 if (flags & NV_TX_UNDERFLOW)
ac9c1897 2045 np->stats.tx_fifo_errors++;
f82a9352 2046 if (flags & NV_TX_CARRIERLOST)
ac9c1897
AA
2047 np->stats.tx_carrier_errors++;
2048 np->stats.tx_errors++;
2049 } else {
2050 np->stats.tx_packets++;
445583b8 2051 np->stats.tx_bytes += np->get_tx_ctx->skb->len;
ac9c1897 2052 }
445583b8
AA
2053 dev_kfree_skb_any(np->get_tx_ctx->skb);
2054 np->get_tx_ctx->skb = NULL;
1da177e4
LT
2055 }
2056 } else {
f82a9352 2057 if (flags & NV_TX2_LASTPACKET) {
445583b8 2058 if (flags & NV_TX2_ERROR) {
f82a9352 2059 if (flags & NV_TX2_UNDERFLOW)
ac9c1897 2060 np->stats.tx_fifo_errors++;
f82a9352 2061 if (flags & NV_TX2_CARRIERLOST)
ac9c1897
AA
2062 np->stats.tx_carrier_errors++;
2063 np->stats.tx_errors++;
2064 } else {
2065 np->stats.tx_packets++;
445583b8 2066 np->stats.tx_bytes += np->get_tx_ctx->skb->len;
f3b197ac 2067 }
445583b8
AA
2068 dev_kfree_skb_any(np->get_tx_ctx->skb);
2069 np->get_tx_ctx->skb = NULL;
1da177e4
LT
2070 }
2071 }
445583b8 2072 if (unlikely(np->get_tx.orig++ == np->last_tx.orig))
86b22b0d 2073 np->get_tx.orig = np->first_tx.orig;
445583b8 2074 if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx))
86b22b0d
AA
2075 np->get_tx_ctx = np->first_tx_ctx;
2076 }
445583b8 2077 if (unlikely((np->tx_stop == 1) && (np->get_tx.orig != orig_get_tx))) {
aaa37d2d 2078 np->tx_stop = 0;
86b22b0d 2079 netif_wake_queue(dev);
aaa37d2d 2080 }
86b22b0d
AA
2081}
2082
4e16ed1b 2083static void nv_tx_done_optimized(struct net_device *dev, int limit)
86b22b0d
AA
2084{
2085 struct fe_priv *np = netdev_priv(dev);
2086 u32 flags;
aaa37d2d 2087 struct ring_desc_ex* orig_get_tx = np->get_tx.ex;
86b22b0d 2088
445583b8 2089 while ((np->get_tx.ex != np->put_tx.ex) &&
4e16ed1b
AA
2090 !((flags = le32_to_cpu(np->get_tx.ex->flaglen)) & NV_TX_VALID) &&
2091 (limit-- > 0)) {
86b22b0d
AA
2092
2093 dprintk(KERN_DEBUG "%s: nv_tx_done_optimized: flags 0x%x.\n",
2094 dev->name, flags);
445583b8
AA
2095
2096 pci_unmap_page(np->pci_dev, np->get_tx_ctx->dma,
2097 np->get_tx_ctx->dma_len,
2098 PCI_DMA_TODEVICE);
2099 np->get_tx_ctx->dma = 0;
2100
86b22b0d 2101 if (flags & NV_TX2_LASTPACKET) {
21828163 2102 if (!(flags & NV_TX2_ERROR))
86b22b0d 2103 np->stats.tx_packets++;
445583b8
AA
2104 dev_kfree_skb_any(np->get_tx_ctx->skb);
2105 np->get_tx_ctx->skb = NULL;
761fcd9e 2106 }
445583b8 2107 if (unlikely(np->get_tx.ex++ == np->last_tx.ex))
86b22b0d 2108 np->get_tx.ex = np->first_tx.ex;
445583b8 2109 if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx))
761fcd9e 2110 np->get_tx_ctx = np->first_tx_ctx;
1da177e4 2111 }
445583b8 2112 if (unlikely((np->tx_stop == 1) && (np->get_tx.ex != orig_get_tx))) {
aaa37d2d 2113 np->tx_stop = 0;
1da177e4 2114 netif_wake_queue(dev);
aaa37d2d 2115 }
1da177e4
LT
2116}
2117
2118/*
2119 * nv_tx_timeout: dev->tx_timeout function
932ff279 2120 * Called with netif_tx_lock held.
1da177e4
LT
2121 */
2122static void nv_tx_timeout(struct net_device *dev)
2123{
ac9c1897 2124 struct fe_priv *np = netdev_priv(dev);
1da177e4 2125 u8 __iomem *base = get_hwbase(dev);
d33a73c8
AA
2126 u32 status;
2127
2128 if (np->msi_flags & NV_MSI_X_ENABLED)
2129 status = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
2130 else
2131 status = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
1da177e4 2132
d33a73c8 2133 printk(KERN_INFO "%s: Got tx_timeout. irq: %08x\n", dev->name, status);
1da177e4 2134
c2dba06d
MS
2135 {
2136 int i;
2137
761fcd9e
AA
2138 printk(KERN_INFO "%s: Ring at %lx\n",
2139 dev->name, (unsigned long)np->ring_addr);
c2dba06d 2140 printk(KERN_INFO "%s: Dumping tx registers\n", dev->name);
86a0f043 2141 for (i=0;i<=np->register_size;i+= 32) {
c2dba06d
MS
2142 printk(KERN_INFO "%3x: %08x %08x %08x %08x %08x %08x %08x %08x\n",
2143 i,
2144 readl(base + i + 0), readl(base + i + 4),
2145 readl(base + i + 8), readl(base + i + 12),
2146 readl(base + i + 16), readl(base + i + 20),
2147 readl(base + i + 24), readl(base + i + 28));
2148 }
2149 printk(KERN_INFO "%s: Dumping tx ring\n", dev->name);
eafa59f6 2150 for (i=0;i<np->tx_ring_size;i+= 4) {
ee73362c
MS
2151 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
2152 printk(KERN_INFO "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n",
f3b197ac 2153 i,
f82a9352
SH
2154 le32_to_cpu(np->tx_ring.orig[i].buf),
2155 le32_to_cpu(np->tx_ring.orig[i].flaglen),
2156 le32_to_cpu(np->tx_ring.orig[i+1].buf),
2157 le32_to_cpu(np->tx_ring.orig[i+1].flaglen),
2158 le32_to_cpu(np->tx_ring.orig[i+2].buf),
2159 le32_to_cpu(np->tx_ring.orig[i+2].flaglen),
2160 le32_to_cpu(np->tx_ring.orig[i+3].buf),
2161 le32_to_cpu(np->tx_ring.orig[i+3].flaglen));
ee73362c
MS
2162 } else {
2163 printk(KERN_INFO "%03x: %08x %08x %08x // %08x %08x %08x // %08x %08x %08x // %08x %08x %08x\n",
f3b197ac 2164 i,
f82a9352
SH
2165 le32_to_cpu(np->tx_ring.ex[i].bufhigh),
2166 le32_to_cpu(np->tx_ring.ex[i].buflow),
2167 le32_to_cpu(np->tx_ring.ex[i].flaglen),
2168 le32_to_cpu(np->tx_ring.ex[i+1].bufhigh),
2169 le32_to_cpu(np->tx_ring.ex[i+1].buflow),
2170 le32_to_cpu(np->tx_ring.ex[i+1].flaglen),
2171 le32_to_cpu(np->tx_ring.ex[i+2].bufhigh),
2172 le32_to_cpu(np->tx_ring.ex[i+2].buflow),
2173 le32_to_cpu(np->tx_ring.ex[i+2].flaglen),
2174 le32_to_cpu(np->tx_ring.ex[i+3].bufhigh),
2175 le32_to_cpu(np->tx_ring.ex[i+3].buflow),
2176 le32_to_cpu(np->tx_ring.ex[i+3].flaglen));
ee73362c 2177 }
c2dba06d
MS
2178 }
2179 }
2180
1da177e4
LT
2181 spin_lock_irq(&np->lock);
2182
2183 /* 1) stop tx engine */
2184 nv_stop_tx(dev);
2185
2186 /* 2) check that the packets were not sent already: */
86b22b0d
AA
2187 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
2188 nv_tx_done(dev);
2189 else
4e16ed1b 2190 nv_tx_done_optimized(dev, np->tx_ring_size);
1da177e4
LT
2191
2192 /* 3) if there are dead entries: clear everything */
761fcd9e 2193 if (np->get_tx_ctx != np->put_tx_ctx) {
1da177e4
LT
2194 printk(KERN_DEBUG "%s: tx_timeout: dead entries!\n", dev->name);
2195 nv_drain_tx(dev);
761fcd9e 2196 nv_init_tx(dev);
0832b25a 2197 setup_hw_rings(dev, NV_SETUP_TX_RING);
1da177e4
LT
2198 }
2199
3ba4d093
AA
2200 netif_wake_queue(dev);
2201
1da177e4
LT
2202 /* 4) restart tx engine */
2203 nv_start_tx(dev);
2204 spin_unlock_irq(&np->lock);
2205}
2206
22c6d143
MS
2207/*
2208 * Called when the nic notices a mismatch between the actual data len on the
2209 * wire and the len indicated in the 802 header
2210 */
2211static int nv_getlen(struct net_device *dev, void *packet, int datalen)
2212{
2213 int hdrlen; /* length of the 802 header */
2214 int protolen; /* length as stored in the proto field */
2215
2216 /* 1) calculate len according to header */
f82a9352 2217 if ( ((struct vlan_ethhdr *)packet)->h_vlan_proto == htons(ETH_P_8021Q)) {
22c6d143
MS
2218 protolen = ntohs( ((struct vlan_ethhdr *)packet)->h_vlan_encapsulated_proto );
2219 hdrlen = VLAN_HLEN;
2220 } else {
2221 protolen = ntohs( ((struct ethhdr *)packet)->h_proto);
2222 hdrlen = ETH_HLEN;
2223 }
2224 dprintk(KERN_DEBUG "%s: nv_getlen: datalen %d, protolen %d, hdrlen %d\n",
2225 dev->name, datalen, protolen, hdrlen);
2226 if (protolen > ETH_DATA_LEN)
2227 return datalen; /* Value in proto field not a len, no checks possible */
2228
2229 protolen += hdrlen;
2230 /* consistency checks: */
2231 if (datalen > ETH_ZLEN) {
2232 if (datalen >= protolen) {
2233 /* more data on wire than in 802 header, trim of
2234 * additional data.
2235 */
2236 dprintk(KERN_DEBUG "%s: nv_getlen: accepting %d bytes.\n",
2237 dev->name, protolen);
2238 return protolen;
2239 } else {
2240 /* less data on wire than mentioned in header.
2241 * Discard the packet.
2242 */
2243 dprintk(KERN_DEBUG "%s: nv_getlen: discarding long packet.\n",
2244 dev->name);
2245 return -1;
2246 }
2247 } else {
2248 /* short packet. Accept only if 802 values are also short */
2249 if (protolen > ETH_ZLEN) {
2250 dprintk(KERN_DEBUG "%s: nv_getlen: discarding short packet.\n",
2251 dev->name);
2252 return -1;
2253 }
2254 dprintk(KERN_DEBUG "%s: nv_getlen: accepting %d bytes.\n",
2255 dev->name, datalen);
2256 return datalen;
2257 }
2258}
2259
e27cdba5 2260static int nv_rx_process(struct net_device *dev, int limit)
1da177e4 2261{
ac9c1897 2262 struct fe_priv *np = netdev_priv(dev);
f82a9352 2263 u32 flags;
b01867cb
AA
2264 u32 rx_processed_cnt = 0;
2265 struct sk_buff *skb;
2266 int len;
1da177e4 2267
b01867cb
AA
2268 while((np->get_rx.orig != np->put_rx.orig) &&
2269 !((flags = le32_to_cpu(np->get_rx.orig->flaglen)) & NV_RX_AVAIL) &&
2270 (rx_processed_cnt++ < limit)) {
1da177e4 2271
761fcd9e
AA
2272 dprintk(KERN_DEBUG "%s: nv_rx_process: flags 0x%x.\n",
2273 dev->name, flags);
1da177e4 2274
1da177e4
LT
2275 /*
2276 * the packet is for us - immediately tear down the pci mapping.
2277 * TODO: check if a prefetch of the first cacheline improves
2278 * the performance.
2279 */
761fcd9e
AA
2280 pci_unmap_single(np->pci_dev, np->get_rx_ctx->dma,
2281 np->get_rx_ctx->dma_len,
1da177e4 2282 PCI_DMA_FROMDEVICE);
0d63fb32
AA
2283 skb = np->get_rx_ctx->skb;
2284 np->get_rx_ctx->skb = NULL;
1da177e4
LT
2285
2286 {
2287 int j;
f82a9352 2288 dprintk(KERN_DEBUG "Dumping packet (flags 0x%x).",flags);
1da177e4
LT
2289 for (j=0; j<64; j++) {
2290 if ((j%16) == 0)
2291 dprintk("\n%03x:", j);
0d63fb32 2292 dprintk(" %02x", ((unsigned char*)skb->data)[j]);
1da177e4
LT
2293 }
2294 dprintk("\n");
2295 }
2296 /* look at what we actually got: */
2297 if (np->desc_ver == DESC_VER_1) {
b01867cb
AA
2298 if (likely(flags & NV_RX_DESCRIPTORVALID)) {
2299 len = flags & LEN_MASK_V1;
2300 if (unlikely(flags & NV_RX_ERROR)) {
2301 if (flags & NV_RX_ERROR4) {
2302 len = nv_getlen(dev, skb->data, len);
2303 if (len < 0) {
2304 np->stats.rx_errors++;
2305 dev_kfree_skb(skb);
2306 goto next_pkt;
2307 }
2308 }
2309 /* framing errors are soft errors */
2310 else if (flags & NV_RX_FRAMINGERR) {
2311 if (flags & NV_RX_SUBSTRACT1) {
2312 len--;
2313 }
2314 }
2315 /* the rest are hard errors */
2316 else {
2317 if (flags & NV_RX_MISSEDFRAME)
2318 np->stats.rx_missed_errors++;
2319 if (flags & NV_RX_CRCERR)
2320 np->stats.rx_crc_errors++;
2321 if (flags & NV_RX_OVERFLOW)
2322 np->stats.rx_over_errors++;
a971c324 2323 np->stats.rx_errors++;
0d63fb32 2324 dev_kfree_skb(skb);
a971c324
AA
2325 goto next_pkt;
2326 }
2327 }
b01867cb 2328 } else {
0d63fb32 2329 dev_kfree_skb(skb);
1da177e4 2330 goto next_pkt;
0d63fb32 2331 }
b01867cb
AA
2332 } else {
2333 if (likely(flags & NV_RX2_DESCRIPTORVALID)) {
2334 len = flags & LEN_MASK_V2;
2335 if (unlikely(flags & NV_RX2_ERROR)) {
2336 if (flags & NV_RX2_ERROR4) {
2337 len = nv_getlen(dev, skb->data, len);
2338 if (len < 0) {
2339 np->stats.rx_errors++;
2340 dev_kfree_skb(skb);
2341 goto next_pkt;
2342 }
2343 }
2344 /* framing errors are soft errors */
2345 else if (flags & NV_RX2_FRAMINGERR) {
2346 if (flags & NV_RX2_SUBSTRACT1) {
2347 len--;
2348 }
2349 }
2350 /* the rest are hard errors */
2351 else {
2352 if (flags & NV_RX2_CRCERR)
2353 np->stats.rx_crc_errors++;
2354 if (flags & NV_RX2_OVERFLOW)
2355 np->stats.rx_over_errors++;
a971c324 2356 np->stats.rx_errors++;
0d63fb32 2357 dev_kfree_skb(skb);
a971c324
AA
2358 goto next_pkt;
2359 }
2360 }
b01867cb 2361 if ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK2)/*ip and tcp */ {
0d63fb32 2362 skb->ip_summed = CHECKSUM_UNNECESSARY;
5ed2616f 2363 } else {
b01867cb
AA
2364 if ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK1 ||
2365 (flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK3) {
2366 skb->ip_summed = CHECKSUM_UNNECESSARY;
2367 }
5ed2616f 2368 }
b01867cb
AA
2369 } else {
2370 dev_kfree_skb(skb);
2371 goto next_pkt;
1da177e4
LT
2372 }
2373 }
2374 /* got a valid packet - forward it to the network core */
1da177e4
LT
2375 skb_put(skb, len);
2376 skb->protocol = eth_type_trans(skb, dev);
761fcd9e
AA
2377 dprintk(KERN_DEBUG "%s: nv_rx_process: %d bytes, proto %d accepted.\n",
2378 dev->name, len, skb->protocol);
e27cdba5 2379#ifdef CONFIG_FORCEDETH_NAPI
b01867cb 2380 netif_receive_skb(skb);
e27cdba5 2381#else
b01867cb 2382 netif_rx(skb);
e27cdba5 2383#endif
1da177e4
LT
2384 dev->last_rx = jiffies;
2385 np->stats.rx_packets++;
2386 np->stats.rx_bytes += len;
2387next_pkt:
b01867cb 2388 if (unlikely(np->get_rx.orig++ == np->last_rx.orig))
86b22b0d 2389 np->get_rx.orig = np->first_rx.orig;
b01867cb 2390 if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx))
86b22b0d
AA
2391 np->get_rx_ctx = np->first_rx_ctx;
2392 }
2393
b01867cb 2394 return rx_processed_cnt;
86b22b0d
AA
2395}
2396
2397static int nv_rx_process_optimized(struct net_device *dev, int limit)
2398{
2399 struct fe_priv *np = netdev_priv(dev);
2400 u32 flags;
2401 u32 vlanflags = 0;
b01867cb
AA
2402 u32 rx_processed_cnt = 0;
2403 struct sk_buff *skb;
2404 int len;
86b22b0d 2405
b01867cb
AA
2406 while((np->get_rx.ex != np->put_rx.ex) &&
2407 !((flags = le32_to_cpu(np->get_rx.ex->flaglen)) & NV_RX2_AVAIL) &&
2408 (rx_processed_cnt++ < limit)) {
86b22b0d
AA
2409
2410 dprintk(KERN_DEBUG "%s: nv_rx_process_optimized: flags 0x%x.\n",
2411 dev->name, flags);
2412
86b22b0d
AA
2413 /*
2414 * the packet is for us - immediately tear down the pci mapping.
2415 * TODO: check if a prefetch of the first cacheline improves
2416 * the performance.
2417 */
2418 pci_unmap_single(np->pci_dev, np->get_rx_ctx->dma,
2419 np->get_rx_ctx->dma_len,
2420 PCI_DMA_FROMDEVICE);
2421 skb = np->get_rx_ctx->skb;
2422 np->get_rx_ctx->skb = NULL;
2423
2424 {
2425 int j;
2426 dprintk(KERN_DEBUG "Dumping packet (flags 0x%x).",flags);
2427 for (j=0; j<64; j++) {
2428 if ((j%16) == 0)
2429 dprintk("\n%03x:", j);
2430 dprintk(" %02x", ((unsigned char*)skb->data)[j]);
2431 }
2432 dprintk("\n");
761fcd9e 2433 }
86b22b0d 2434 /* look at what we actually got: */
b01867cb
AA
2435 if (likely(flags & NV_RX2_DESCRIPTORVALID)) {
2436 len = flags & LEN_MASK_V2;
2437 if (unlikely(flags & NV_RX2_ERROR)) {
2438 if (flags & NV_RX2_ERROR4) {
2439 len = nv_getlen(dev, skb->data, len);
2440 if (len < 0) {
b01867cb
AA
2441 dev_kfree_skb(skb);
2442 goto next_pkt;
2443 }
2444 }
2445 /* framing errors are soft errors */
2446 else if (flags & NV_RX2_FRAMINGERR) {
2447 if (flags & NV_RX2_SUBSTRACT1) {
2448 len--;
2449 }
2450 }
2451 /* the rest are hard errors */
2452 else {
86b22b0d
AA
2453 dev_kfree_skb(skb);
2454 goto next_pkt;
2455 }
2456 }
b01867cb
AA
2457
2458 if ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK2)/*ip and tcp */ {
86b22b0d
AA
2459 skb->ip_summed = CHECKSUM_UNNECESSARY;
2460 } else {
b01867cb
AA
2461 if ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK1 ||
2462 (flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK3) {
2463 skb->ip_summed = CHECKSUM_UNNECESSARY;
2464 }
86b22b0d 2465 }
b01867cb
AA
2466
2467 /* got a valid packet - forward it to the network core */
2468 skb_put(skb, len);
2469 skb->protocol = eth_type_trans(skb, dev);
2470 prefetch(skb->data);
2471
2472 dprintk(KERN_DEBUG "%s: nv_rx_process_optimized: %d bytes, proto %d accepted.\n",
2473 dev->name, len, skb->protocol);
2474
2475 if (likely(!np->vlangrp)) {
86b22b0d 2476#ifdef CONFIG_FORCEDETH_NAPI
b01867cb 2477 netif_receive_skb(skb);
86b22b0d 2478#else
b01867cb 2479 netif_rx(skb);
86b22b0d 2480#endif
b01867cb
AA
2481 } else {
2482 vlanflags = le32_to_cpu(np->get_rx.ex->buflow);
2483 if (vlanflags & NV_RX3_VLAN_TAG_PRESENT) {
2484#ifdef CONFIG_FORCEDETH_NAPI
2485 vlan_hwaccel_receive_skb(skb, np->vlangrp,
2486 vlanflags & NV_RX3_VLAN_TAG_MASK);
2487#else
2488 vlan_hwaccel_rx(skb, np->vlangrp,
2489 vlanflags & NV_RX3_VLAN_TAG_MASK);
2490#endif
2491 } else {
2492#ifdef CONFIG_FORCEDETH_NAPI
2493 netif_receive_skb(skb);
2494#else
2495 netif_rx(skb);
2496#endif
2497 }
2498 }
2499
2500 dev->last_rx = jiffies;
2501 np->stats.rx_packets++;
2502 np->stats.rx_bytes += len;
2503 } else {
2504 dev_kfree_skb(skb);
2505 }
86b22b0d 2506next_pkt:
b01867cb 2507 if (unlikely(np->get_rx.ex++ == np->last_rx.ex))
86b22b0d 2508 np->get_rx.ex = np->first_rx.ex;
b01867cb 2509 if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx))
761fcd9e 2510 np->get_rx_ctx = np->first_rx_ctx;
1da177e4 2511 }
e27cdba5 2512
b01867cb 2513 return rx_processed_cnt;
1da177e4
LT
2514}
2515
d81c0983
MS
2516static void set_bufsize(struct net_device *dev)
2517{
2518 struct fe_priv *np = netdev_priv(dev);
2519
2520 if (dev->mtu <= ETH_DATA_LEN)
2521 np->rx_buf_sz = ETH_DATA_LEN + NV_RX_HEADERS;
2522 else
2523 np->rx_buf_sz = dev->mtu + NV_RX_HEADERS;
2524}
2525
1da177e4
LT
2526/*
2527 * nv_change_mtu: dev->change_mtu function
2528 * Called with dev_base_lock held for read.
2529 */
2530static int nv_change_mtu(struct net_device *dev, int new_mtu)
2531{
ac9c1897 2532 struct fe_priv *np = netdev_priv(dev);
d81c0983
MS
2533 int old_mtu;
2534
2535 if (new_mtu < 64 || new_mtu > np->pkt_limit)
1da177e4 2536 return -EINVAL;
d81c0983
MS
2537
2538 old_mtu = dev->mtu;
1da177e4 2539 dev->mtu = new_mtu;
d81c0983
MS
2540
2541 /* return early if the buffer sizes will not change */
2542 if (old_mtu <= ETH_DATA_LEN && new_mtu <= ETH_DATA_LEN)
2543 return 0;
2544 if (old_mtu == new_mtu)
2545 return 0;
2546
2547 /* synchronized against open : rtnl_lock() held by caller */
2548 if (netif_running(dev)) {
25097d4b 2549 u8 __iomem *base = get_hwbase(dev);
d81c0983
MS
2550 /*
2551 * It seems that the nic preloads valid ring entries into an
2552 * internal buffer. The procedure for flushing everything is
2553 * guessed, there is probably a simpler approach.
2554 * Changing the MTU is a rare event, it shouldn't matter.
2555 */
84b3932b 2556 nv_disable_irq(dev);
932ff279 2557 netif_tx_lock_bh(dev);
d81c0983
MS
2558 spin_lock(&np->lock);
2559 /* stop engines */
2560 nv_stop_rx(dev);
2561 nv_stop_tx(dev);
2562 nv_txrx_reset(dev);
2563 /* drain rx queue */
2564 nv_drain_rx(dev);
2565 nv_drain_tx(dev);
2566 /* reinit driver view of the rx queue */
d81c0983 2567 set_bufsize(dev);
eafa59f6 2568 if (nv_init_ring(dev)) {
d81c0983
MS
2569 if (!np->in_shutdown)
2570 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
2571 }
2572 /* reinit nic view of the rx queue */
2573 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
0832b25a 2574 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
eafa59f6 2575 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
d81c0983
MS
2576 base + NvRegRingSizes);
2577 pci_push(base);
8a4ae7f2 2578 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
d81c0983
MS
2579 pci_push(base);
2580
2581 /* restart rx engine */
2582 nv_start_rx(dev);
2583 nv_start_tx(dev);
2584 spin_unlock(&np->lock);
932ff279 2585 netif_tx_unlock_bh(dev);
84b3932b 2586 nv_enable_irq(dev);
d81c0983 2587 }
1da177e4
LT
2588 return 0;
2589}
2590
72b31782
MS
2591static void nv_copy_mac_to_hw(struct net_device *dev)
2592{
25097d4b 2593 u8 __iomem *base = get_hwbase(dev);
72b31782
MS
2594 u32 mac[2];
2595
2596 mac[0] = (dev->dev_addr[0] << 0) + (dev->dev_addr[1] << 8) +
2597 (dev->dev_addr[2] << 16) + (dev->dev_addr[3] << 24);
2598 mac[1] = (dev->dev_addr[4] << 0) + (dev->dev_addr[5] << 8);
2599
2600 writel(mac[0], base + NvRegMacAddrA);
2601 writel(mac[1], base + NvRegMacAddrB);
2602}
2603
2604/*
2605 * nv_set_mac_address: dev->set_mac_address function
2606 * Called with rtnl_lock() held.
2607 */
2608static int nv_set_mac_address(struct net_device *dev, void *addr)
2609{
ac9c1897 2610 struct fe_priv *np = netdev_priv(dev);
72b31782
MS
2611 struct sockaddr *macaddr = (struct sockaddr*)addr;
2612
f82a9352 2613 if (!is_valid_ether_addr(macaddr->sa_data))
72b31782
MS
2614 return -EADDRNOTAVAIL;
2615
2616 /* synchronized against open : rtnl_lock() held by caller */
2617 memcpy(dev->dev_addr, macaddr->sa_data, ETH_ALEN);
2618
2619 if (netif_running(dev)) {
932ff279 2620 netif_tx_lock_bh(dev);
72b31782
MS
2621 spin_lock_irq(&np->lock);
2622
2623 /* stop rx engine */
2624 nv_stop_rx(dev);
2625
2626 /* set mac address */
2627 nv_copy_mac_to_hw(dev);
2628
2629 /* restart rx engine */
2630 nv_start_rx(dev);
2631 spin_unlock_irq(&np->lock);
932ff279 2632 netif_tx_unlock_bh(dev);
72b31782
MS
2633 } else {
2634 nv_copy_mac_to_hw(dev);
2635 }
2636 return 0;
2637}
2638
1da177e4
LT
2639/*
2640 * nv_set_multicast: dev->set_multicast function
932ff279 2641 * Called with netif_tx_lock held.
1da177e4
LT
2642 */
2643static void nv_set_multicast(struct net_device *dev)
2644{
ac9c1897 2645 struct fe_priv *np = netdev_priv(dev);
1da177e4
LT
2646 u8 __iomem *base = get_hwbase(dev);
2647 u32 addr[2];
2648 u32 mask[2];
b6d0773f 2649 u32 pff = readl(base + NvRegPacketFilterFlags) & NVREG_PFF_PAUSE_RX;
1da177e4
LT
2650
2651 memset(addr, 0, sizeof(addr));
2652 memset(mask, 0, sizeof(mask));
2653
2654 if (dev->flags & IFF_PROMISC) {
b6d0773f 2655 pff |= NVREG_PFF_PROMISC;
1da177e4 2656 } else {
b6d0773f 2657 pff |= NVREG_PFF_MYADDR;
1da177e4
LT
2658
2659 if (dev->flags & IFF_ALLMULTI || dev->mc_list) {
2660 u32 alwaysOff[2];
2661 u32 alwaysOn[2];
2662
2663 alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0xffffffff;
2664 if (dev->flags & IFF_ALLMULTI) {
2665 alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0;
2666 } else {
2667 struct dev_mc_list *walk;
2668
2669 walk = dev->mc_list;
2670 while (walk != NULL) {
2671 u32 a, b;
2672 a = le32_to_cpu(*(u32 *) walk->dmi_addr);
2673 b = le16_to_cpu(*(u16 *) (&walk->dmi_addr[4]));
2674 alwaysOn[0] &= a;
2675 alwaysOff[0] &= ~a;
2676 alwaysOn[1] &= b;
2677 alwaysOff[1] &= ~b;
2678 walk = walk->next;
2679 }
2680 }
2681 addr[0] = alwaysOn[0];
2682 addr[1] = alwaysOn[1];
2683 mask[0] = alwaysOn[0] | alwaysOff[0];
2684 mask[1] = alwaysOn[1] | alwaysOff[1];
2685 }
2686 }
2687 addr[0] |= NVREG_MCASTADDRA_FORCE;
2688 pff |= NVREG_PFF_ALWAYS;
2689 spin_lock_irq(&np->lock);
2690 nv_stop_rx(dev);
2691 writel(addr[0], base + NvRegMulticastAddrA);
2692 writel(addr[1], base + NvRegMulticastAddrB);
2693 writel(mask[0], base + NvRegMulticastMaskA);
2694 writel(mask[1], base + NvRegMulticastMaskB);
2695 writel(pff, base + NvRegPacketFilterFlags);
2696 dprintk(KERN_INFO "%s: reconfiguration for multicast lists.\n",
2697 dev->name);
2698 nv_start_rx(dev);
2699 spin_unlock_irq(&np->lock);
2700}
2701
c7985051 2702static void nv_update_pause(struct net_device *dev, u32 pause_flags)
b6d0773f
AA
2703{
2704 struct fe_priv *np = netdev_priv(dev);
2705 u8 __iomem *base = get_hwbase(dev);
2706
2707 np->pause_flags &= ~(NV_PAUSEFRAME_TX_ENABLE | NV_PAUSEFRAME_RX_ENABLE);
2708
2709 if (np->pause_flags & NV_PAUSEFRAME_RX_CAPABLE) {
2710 u32 pff = readl(base + NvRegPacketFilterFlags) & ~NVREG_PFF_PAUSE_RX;
2711 if (pause_flags & NV_PAUSEFRAME_RX_ENABLE) {
2712 writel(pff|NVREG_PFF_PAUSE_RX, base + NvRegPacketFilterFlags);
2713 np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
2714 } else {
2715 writel(pff, base + NvRegPacketFilterFlags);
2716 }
2717 }
2718 if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE) {
2719 u32 regmisc = readl(base + NvRegMisc1) & ~NVREG_MISC1_PAUSE_TX;
2720 if (pause_flags & NV_PAUSEFRAME_TX_ENABLE) {
2721 writel(NVREG_TX_PAUSEFRAME_ENABLE, base + NvRegTxPauseFrame);
2722 writel(regmisc|NVREG_MISC1_PAUSE_TX, base + NvRegMisc1);
2723 np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
2724 } else {
2725 writel(NVREG_TX_PAUSEFRAME_DISABLE, base + NvRegTxPauseFrame);
2726 writel(regmisc, base + NvRegMisc1);
2727 }
2728 }
2729}
2730
4ea7f299
AA
2731/**
2732 * nv_update_linkspeed: Setup the MAC according to the link partner
2733 * @dev: Network device to be configured
2734 *
2735 * The function queries the PHY and checks if there is a link partner.
2736 * If yes, then it sets up the MAC accordingly. Otherwise, the MAC is
2737 * set to 10 MBit HD.
2738 *
2739 * The function returns 0 if there is no link partner and 1 if there is
2740 * a good link partner.
2741 */
1da177e4
LT
2742static int nv_update_linkspeed(struct net_device *dev)
2743{
ac9c1897 2744 struct fe_priv *np = netdev_priv(dev);
1da177e4 2745 u8 __iomem *base = get_hwbase(dev);
eb91f61b
AA
2746 int adv = 0;
2747 int lpa = 0;
2748 int adv_lpa, adv_pause, lpa_pause;
1da177e4
LT
2749 int newls = np->linkspeed;
2750 int newdup = np->duplex;
2751 int mii_status;
2752 int retval = 0;
9744e218 2753 u32 control_1000, status_1000, phyreg, pause_flags, txreg;
1da177e4
LT
2754
2755 /* BMSR_LSTATUS is latched, read it twice:
2756 * we want the current value.
2757 */
2758 mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
2759 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
2760
2761 if (!(mii_status & BMSR_LSTATUS)) {
2762 dprintk(KERN_DEBUG "%s: no link detected by phy - falling back to 10HD.\n",
2763 dev->name);
2764 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
2765 newdup = 0;
2766 retval = 0;
2767 goto set_speed;
2768 }
2769
2770 if (np->autoneg == 0) {
2771 dprintk(KERN_DEBUG "%s: nv_update_linkspeed: autoneg off, PHY set to 0x%04x.\n",
2772 dev->name, np->fixed_mode);
2773 if (np->fixed_mode & LPA_100FULL) {
2774 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
2775 newdup = 1;
2776 } else if (np->fixed_mode & LPA_100HALF) {
2777 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
2778 newdup = 0;
2779 } else if (np->fixed_mode & LPA_10FULL) {
2780 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
2781 newdup = 1;
2782 } else {
2783 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
2784 newdup = 0;
2785 }
2786 retval = 1;
2787 goto set_speed;
2788 }
2789 /* check auto negotiation is complete */
2790 if (!(mii_status & BMSR_ANEGCOMPLETE)) {
2791 /* still in autonegotiation - configure nic for 10 MBit HD and wait. */
2792 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
2793 newdup = 0;
2794 retval = 0;
2795 dprintk(KERN_DEBUG "%s: autoneg not completed - falling back to 10HD.\n", dev->name);
2796 goto set_speed;
2797 }
2798
b6d0773f
AA
2799 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
2800 lpa = mii_rw(dev, np->phyaddr, MII_LPA, MII_READ);
2801 dprintk(KERN_DEBUG "%s: nv_update_linkspeed: PHY advertises 0x%04x, lpa 0x%04x.\n",
2802 dev->name, adv, lpa);
2803
1da177e4
LT
2804 retval = 1;
2805 if (np->gigabit == PHY_GIGABIT) {
eb91f61b
AA
2806 control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
2807 status_1000 = mii_rw(dev, np->phyaddr, MII_STAT1000, MII_READ);
1da177e4
LT
2808
2809 if ((control_1000 & ADVERTISE_1000FULL) &&
2810 (status_1000 & LPA_1000FULL)) {
2811 dprintk(KERN_DEBUG "%s: nv_update_linkspeed: GBit ethernet detected.\n",
2812 dev->name);
2813 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_1000;
2814 newdup = 1;
2815 goto set_speed;
2816 }
2817 }
2818
1da177e4 2819 /* FIXME: handle parallel detection properly */
eb91f61b
AA
2820 adv_lpa = lpa & adv;
2821 if (adv_lpa & LPA_100FULL) {
1da177e4
LT
2822 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
2823 newdup = 1;
eb91f61b 2824 } else if (adv_lpa & LPA_100HALF) {
1da177e4
LT
2825 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
2826 newdup = 0;
eb91f61b 2827 } else if (adv_lpa & LPA_10FULL) {
1da177e4
LT
2828 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
2829 newdup = 1;
eb91f61b 2830 } else if (adv_lpa & LPA_10HALF) {
1da177e4
LT
2831 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
2832 newdup = 0;
2833 } else {
eb91f61b 2834 dprintk(KERN_DEBUG "%s: bad ability %04x - falling back to 10HD.\n", dev->name, adv_lpa);
1da177e4
LT
2835 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
2836 newdup = 0;
2837 }
2838
2839set_speed:
2840 if (np->duplex == newdup && np->linkspeed == newls)
2841 return retval;
2842
2843 dprintk(KERN_INFO "%s: changing link setting from %d/%d to %d/%d.\n",
2844 dev->name, np->linkspeed, np->duplex, newls, newdup);
2845
2846 np->duplex = newdup;
2847 np->linkspeed = newls;
2848
2849 if (np->gigabit == PHY_GIGABIT) {
2850 phyreg = readl(base + NvRegRandomSeed);
2851 phyreg &= ~(0x3FF00);
2852 if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_10)
2853 phyreg |= NVREG_RNDSEED_FORCE3;
2854 else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_100)
2855 phyreg |= NVREG_RNDSEED_FORCE2;
2856 else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_1000)
2857 phyreg |= NVREG_RNDSEED_FORCE;
2858 writel(phyreg, base + NvRegRandomSeed);
2859 }
2860
2861 phyreg = readl(base + NvRegPhyInterface);
2862 phyreg &= ~(PHY_HALF|PHY_100|PHY_1000);
2863 if (np->duplex == 0)
2864 phyreg |= PHY_HALF;
2865 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_100)
2866 phyreg |= PHY_100;
2867 else if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000)
2868 phyreg |= PHY_1000;
2869 writel(phyreg, base + NvRegPhyInterface);
2870
9744e218
AA
2871 if (phyreg & PHY_RGMII) {
2872 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000)
2873 txreg = NVREG_TX_DEFERRAL_RGMII_1000;
2874 else
2875 txreg = NVREG_TX_DEFERRAL_RGMII_10_100;
2876 } else {
2877 txreg = NVREG_TX_DEFERRAL_DEFAULT;
2878 }
2879 writel(txreg, base + NvRegTxDeferral);
2880
95d161cb
AA
2881 if (np->desc_ver == DESC_VER_1) {
2882 txreg = NVREG_TX_WM_DESC1_DEFAULT;
2883 } else {
2884 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000)
2885 txreg = NVREG_TX_WM_DESC2_3_1000;
2886 else
2887 txreg = NVREG_TX_WM_DESC2_3_DEFAULT;
2888 }
2889 writel(txreg, base + NvRegTxWatermark);
2890
1da177e4
LT
2891 writel(NVREG_MISC1_FORCE | ( np->duplex ? 0 : NVREG_MISC1_HD),
2892 base + NvRegMisc1);
2893 pci_push(base);
2894 writel(np->linkspeed, base + NvRegLinkSpeed);
2895 pci_push(base);
2896
b6d0773f
AA
2897 pause_flags = 0;
2898 /* setup pause frame */
eb91f61b 2899 if (np->duplex != 0) {
b6d0773f
AA
2900 if (np->autoneg && np->pause_flags & NV_PAUSEFRAME_AUTONEG) {
2901 adv_pause = adv & (ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM);
2902 lpa_pause = lpa & (LPA_PAUSE_CAP| LPA_PAUSE_ASYM);
2903
2904 switch (adv_pause) {
f82a9352 2905 case ADVERTISE_PAUSE_CAP:
b6d0773f
AA
2906 if (lpa_pause & LPA_PAUSE_CAP) {
2907 pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
2908 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
2909 pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
2910 }
2911 break;
f82a9352 2912 case ADVERTISE_PAUSE_ASYM:
b6d0773f
AA
2913 if (lpa_pause == (LPA_PAUSE_CAP| LPA_PAUSE_ASYM))
2914 {
2915 pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
2916 }
2917 break;
f82a9352 2918 case ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM:
b6d0773f
AA
2919 if (lpa_pause & LPA_PAUSE_CAP)
2920 {
2921 pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
2922 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
2923 pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
2924 }
2925 if (lpa_pause == LPA_PAUSE_ASYM)
2926 {
2927 pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
2928 }
2929 break;
f3b197ac 2930 }
eb91f61b 2931 } else {
b6d0773f 2932 pause_flags = np->pause_flags;
eb91f61b
AA
2933 }
2934 }
b6d0773f 2935 nv_update_pause(dev, pause_flags);
eb91f61b 2936
1da177e4
LT
2937 return retval;
2938}
2939
2940static void nv_linkchange(struct net_device *dev)
2941{
2942 if (nv_update_linkspeed(dev)) {
4ea7f299 2943 if (!netif_carrier_ok(dev)) {
1da177e4
LT
2944 netif_carrier_on(dev);
2945 printk(KERN_INFO "%s: link up.\n", dev->name);
4ea7f299 2946 nv_start_rx(dev);
1da177e4 2947 }
1da177e4
LT
2948 } else {
2949 if (netif_carrier_ok(dev)) {
2950 netif_carrier_off(dev);
2951 printk(KERN_INFO "%s: link down.\n", dev->name);
2952 nv_stop_rx(dev);
2953 }
2954 }
2955}
2956
2957static void nv_link_irq(struct net_device *dev)
2958{
2959 u8 __iomem *base = get_hwbase(dev);
2960 u32 miistat;
2961
2962 miistat = readl(base + NvRegMIIStatus);
2963 writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus);
2964 dprintk(KERN_INFO "%s: link change irq, status 0x%x.\n", dev->name, miistat);
2965
2966 if (miistat & (NVREG_MIISTAT_LINKCHANGE))
2967 nv_linkchange(dev);
2968 dprintk(KERN_DEBUG "%s: link change notification done.\n", dev->name);
2969}
2970
7d12e780 2971static irqreturn_t nv_nic_irq(int foo, void *data)
1da177e4
LT
2972{
2973 struct net_device *dev = (struct net_device *) data;
ac9c1897 2974 struct fe_priv *np = netdev_priv(dev);
1da177e4
LT
2975 u8 __iomem *base = get_hwbase(dev);
2976 u32 events;
2977 int i;
2978
2979 dprintk(KERN_DEBUG "%s: nv_nic_irq\n", dev->name);
2980
2981 for (i=0; ; i++) {
d33a73c8
AA
2982 if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
2983 events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
2984 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
2985 } else {
2986 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
2987 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
2988 }
1da177e4
LT
2989 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events);
2990 if (!(events & np->irqmask))
2991 break;
2992
a971c324
AA
2993 spin_lock(&np->lock);
2994 nv_tx_done(dev);
2995 spin_unlock(&np->lock);
f3b197ac 2996
f0734ab6
AA
2997#ifdef CONFIG_FORCEDETH_NAPI
2998 if (events & NVREG_IRQ_RX_ALL) {
2999 netif_rx_schedule(dev);
3000
3001 /* Disable furthur receive irq's */
3002 spin_lock(&np->lock);
3003 np->irqmask &= ~NVREG_IRQ_RX_ALL;
3004
3005 if (np->msi_flags & NV_MSI_X_ENABLED)
3006 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
3007 else
3008 writel(np->irqmask, base + NvRegIrqMask);
3009 spin_unlock(&np->lock);
3010 }
3011#else
3012 if (nv_rx_process(dev, dev->weight)) {
3013 if (unlikely(nv_alloc_rx(dev))) {
3014 spin_lock(&np->lock);
3015 if (!np->in_shutdown)
3016 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
3017 spin_unlock(&np->lock);
3018 }
3019 }
3020#endif
3021 if (unlikely(events & NVREG_IRQ_LINK)) {
1da177e4
LT
3022 spin_lock(&np->lock);
3023 nv_link_irq(dev);
3024 spin_unlock(&np->lock);
3025 }
f0734ab6 3026 if (unlikely(np->need_linktimer && time_after(jiffies, np->link_timeout))) {
1da177e4
LT
3027 spin_lock(&np->lock);
3028 nv_linkchange(dev);
3029 spin_unlock(&np->lock);
3030 np->link_timeout = jiffies + LINK_TIMEOUT;
3031 }
f0734ab6 3032 if (unlikely(events & (NVREG_IRQ_TX_ERR))) {
1da177e4
LT
3033 dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n",
3034 dev->name, events);
3035 }
f0734ab6 3036 if (unlikely(events & (NVREG_IRQ_UNKNOWN))) {
1da177e4
LT
3037 printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n",
3038 dev->name, events);
3039 }
c5cf9101
AA
3040 if (unlikely(events & NVREG_IRQ_RECOVER_ERROR)) {
3041 spin_lock(&np->lock);
3042 /* disable interrupts on the nic */
3043 if (!(np->msi_flags & NV_MSI_X_ENABLED))
3044 writel(0, base + NvRegIrqMask);
3045 else
3046 writel(np->irqmask, base + NvRegIrqMask);
3047 pci_push(base);
3048
3049 if (!np->in_shutdown) {
3050 np->nic_poll_irq = np->irqmask;
3051 np->recover_error = 1;
3052 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3053 }
3054 spin_unlock(&np->lock);
3055 break;
3056 }
f0734ab6 3057 if (unlikely(i > max_interrupt_work)) {
1da177e4
LT
3058 spin_lock(&np->lock);
3059 /* disable interrupts on the nic */
d33a73c8
AA
3060 if (!(np->msi_flags & NV_MSI_X_ENABLED))
3061 writel(0, base + NvRegIrqMask);
3062 else
3063 writel(np->irqmask, base + NvRegIrqMask);
1da177e4
LT
3064 pci_push(base);
3065
d33a73c8
AA
3066 if (!np->in_shutdown) {
3067 np->nic_poll_irq = np->irqmask;
1da177e4 3068 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
d33a73c8 3069 }
1da177e4
LT
3070 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq.\n", dev->name, i);
3071 spin_unlock(&np->lock);
3072 break;
3073 }
3074
3075 }
3076 dprintk(KERN_DEBUG "%s: nv_nic_irq completed\n", dev->name);
3077
3078 return IRQ_RETVAL(i);
3079}
3080
f0734ab6
AA
3081#define TX_WORK_PER_LOOP 64
3082#define RX_WORK_PER_LOOP 64
3083/**
3084 * All _optimized functions are used to help increase performance
3085 * (reduce CPU and increase throughput). They use descripter version 3,
3086 * compiler directives, and reduce memory accesses.
3087 */
86b22b0d
AA
3088static irqreturn_t nv_nic_irq_optimized(int foo, void *data)
3089{
3090 struct net_device *dev = (struct net_device *) data;
3091 struct fe_priv *np = netdev_priv(dev);
3092 u8 __iomem *base = get_hwbase(dev);
3093 u32 events;
3094 int i;
3095
3096 dprintk(KERN_DEBUG "%s: nv_nic_irq_optimized\n", dev->name);
3097
3098 for (i=0; ; i++) {
3099 if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
3100 events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
3101 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
3102 } else {
3103 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
3104 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
3105 }
86b22b0d
AA
3106 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events);
3107 if (!(events & np->irqmask))
3108 break;
3109
3110 spin_lock(&np->lock);
4e16ed1b 3111 nv_tx_done_optimized(dev, TX_WORK_PER_LOOP);
86b22b0d
AA
3112 spin_unlock(&np->lock);
3113
f0734ab6
AA
3114#ifdef CONFIG_FORCEDETH_NAPI
3115 if (events & NVREG_IRQ_RX_ALL) {
3116 netif_rx_schedule(dev);
3117
3118 /* Disable furthur receive irq's */
3119 spin_lock(&np->lock);
3120 np->irqmask &= ~NVREG_IRQ_RX_ALL;
3121
3122 if (np->msi_flags & NV_MSI_X_ENABLED)
3123 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
3124 else
3125 writel(np->irqmask, base + NvRegIrqMask);
3126 spin_unlock(&np->lock);
3127 }
3128#else
3129 if (nv_rx_process_optimized(dev, dev->weight)) {
3130 if (unlikely(nv_alloc_rx_optimized(dev))) {
3131 spin_lock(&np->lock);
3132 if (!np->in_shutdown)
3133 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
3134 spin_unlock(&np->lock);
3135 }
3136 }
3137#endif
3138 if (unlikely(events & NVREG_IRQ_LINK)) {
86b22b0d
AA
3139 spin_lock(&np->lock);
3140 nv_link_irq(dev);
3141 spin_unlock(&np->lock);
3142 }
f0734ab6 3143 if (unlikely(np->need_linktimer && time_after(jiffies, np->link_timeout))) {
86b22b0d
AA
3144 spin_lock(&np->lock);
3145 nv_linkchange(dev);
3146 spin_unlock(&np->lock);
3147 np->link_timeout = jiffies + LINK_TIMEOUT;
3148 }
f0734ab6 3149 if (unlikely(events & (NVREG_IRQ_TX_ERR))) {
86b22b0d
AA
3150 dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n",
3151 dev->name, events);
3152 }
f0734ab6 3153 if (unlikely(events & (NVREG_IRQ_UNKNOWN))) {
86b22b0d
AA
3154 printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n",
3155 dev->name, events);
3156 }
3157 if (unlikely(events & NVREG_IRQ_RECOVER_ERROR)) {
3158 spin_lock(&np->lock);
3159 /* disable interrupts on the nic */
3160 if (!(np->msi_flags & NV_MSI_X_ENABLED))
3161 writel(0, base + NvRegIrqMask);
3162 else
3163 writel(np->irqmask, base + NvRegIrqMask);
3164 pci_push(base);
3165
3166 if (!np->in_shutdown) {
3167 np->nic_poll_irq = np->irqmask;
3168 np->recover_error = 1;
3169 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3170 }
3171 spin_unlock(&np->lock);
3172 break;
3173 }
3174
f0734ab6 3175 if (unlikely(i > max_interrupt_work)) {
86b22b0d
AA
3176 spin_lock(&np->lock);
3177 /* disable interrupts on the nic */
3178 if (!(np->msi_flags & NV_MSI_X_ENABLED))
3179 writel(0, base + NvRegIrqMask);
3180 else
3181 writel(np->irqmask, base + NvRegIrqMask);
3182 pci_push(base);
3183
3184 if (!np->in_shutdown) {
3185 np->nic_poll_irq = np->irqmask;
3186 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3187 }
3188 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq.\n", dev->name, i);
3189 spin_unlock(&np->lock);
3190 break;
3191 }
3192
3193 }
3194 dprintk(KERN_DEBUG "%s: nv_nic_irq_optimized completed\n", dev->name);
3195
3196 return IRQ_RETVAL(i);
3197}
3198
7d12e780 3199static irqreturn_t nv_nic_irq_tx(int foo, void *data)
d33a73c8
AA
3200{
3201 struct net_device *dev = (struct net_device *) data;
3202 struct fe_priv *np = netdev_priv(dev);
3203 u8 __iomem *base = get_hwbase(dev);
3204 u32 events;
3205 int i;
0a07bc64 3206 unsigned long flags;
d33a73c8
AA
3207
3208 dprintk(KERN_DEBUG "%s: nv_nic_irq_tx\n", dev->name);
3209
3210 for (i=0; ; i++) {
3211 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_TX_ALL;
3212 writel(NVREG_IRQ_TX_ALL, base + NvRegMSIXIrqStatus);
d33a73c8
AA
3213 dprintk(KERN_DEBUG "%s: tx irq: %08x\n", dev->name, events);
3214 if (!(events & np->irqmask))
3215 break;
3216
0a07bc64 3217 spin_lock_irqsave(&np->lock, flags);
4e16ed1b 3218 nv_tx_done_optimized(dev, TX_WORK_PER_LOOP);
0a07bc64 3219 spin_unlock_irqrestore(&np->lock, flags);
f3b197ac 3220
f0734ab6 3221 if (unlikely(events & (NVREG_IRQ_TX_ERR))) {
d33a73c8
AA
3222 dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n",
3223 dev->name, events);
3224 }
f0734ab6 3225 if (unlikely(i > max_interrupt_work)) {
0a07bc64 3226 spin_lock_irqsave(&np->lock, flags);
d33a73c8
AA
3227 /* disable interrupts on the nic */
3228 writel(NVREG_IRQ_TX_ALL, base + NvRegIrqMask);
3229 pci_push(base);
3230
3231 if (!np->in_shutdown) {
3232 np->nic_poll_irq |= NVREG_IRQ_TX_ALL;
3233 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3234 }
3235 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_tx.\n", dev->name, i);
0a07bc64 3236 spin_unlock_irqrestore(&np->lock, flags);
d33a73c8
AA
3237 break;
3238 }
3239
3240 }
3241 dprintk(KERN_DEBUG "%s: nv_nic_irq_tx completed\n", dev->name);
3242
3243 return IRQ_RETVAL(i);
3244}
3245
e27cdba5
SH
3246#ifdef CONFIG_FORCEDETH_NAPI
3247static int nv_napi_poll(struct net_device *dev, int *budget)
3248{
3249 int pkts, limit = min(*budget, dev->quota);
3250 struct fe_priv *np = netdev_priv(dev);
3251 u8 __iomem *base = get_hwbase(dev);
d15e9c4d 3252 unsigned long flags;
e0379a14 3253 int retcode;
e27cdba5 3254
e0379a14 3255 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
86b22b0d 3256 pkts = nv_rx_process(dev, limit);
e0379a14
AA
3257 retcode = nv_alloc_rx(dev);
3258 } else {
86b22b0d 3259 pkts = nv_rx_process_optimized(dev, limit);
e0379a14
AA
3260 retcode = nv_alloc_rx_optimized(dev);
3261 }
e27cdba5 3262
e0379a14 3263 if (retcode) {
d15e9c4d 3264 spin_lock_irqsave(&np->lock, flags);
e27cdba5
SH
3265 if (!np->in_shutdown)
3266 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
d15e9c4d 3267 spin_unlock_irqrestore(&np->lock, flags);
e27cdba5
SH
3268 }
3269
3270 if (pkts < limit) {
3271 /* all done, no more packets present */
3272 netif_rx_complete(dev);
3273
3274 /* re-enable receive interrupts */
d15e9c4d
FR
3275 spin_lock_irqsave(&np->lock, flags);
3276
e27cdba5
SH
3277 np->irqmask |= NVREG_IRQ_RX_ALL;
3278 if (np->msi_flags & NV_MSI_X_ENABLED)
3279 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
3280 else
3281 writel(np->irqmask, base + NvRegIrqMask);
d15e9c4d
FR
3282
3283 spin_unlock_irqrestore(&np->lock, flags);
e27cdba5
SH
3284 return 0;
3285 } else {
3286 /* used up our quantum, so reschedule */
3287 dev->quota -= pkts;
3288 *budget -= pkts;
3289 return 1;
3290 }
3291}
3292#endif
3293
3294#ifdef CONFIG_FORCEDETH_NAPI
7d12e780 3295static irqreturn_t nv_nic_irq_rx(int foo, void *data)
e27cdba5
SH
3296{
3297 struct net_device *dev = (struct net_device *) data;
3298 u8 __iomem *base = get_hwbase(dev);
3299 u32 events;
3300
3301 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL;
3302 writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus);
3303
3304 if (events) {
3305 netif_rx_schedule(dev);
3306 /* disable receive interrupts on the nic */
3307 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
3308 pci_push(base);
3309 }
3310 return IRQ_HANDLED;
3311}
3312#else
7d12e780 3313static irqreturn_t nv_nic_irq_rx(int foo, void *data)
d33a73c8
AA
3314{
3315 struct net_device *dev = (struct net_device *) data;
3316 struct fe_priv *np = netdev_priv(dev);
3317 u8 __iomem *base = get_hwbase(dev);
3318 u32 events;
3319 int i;
0a07bc64 3320 unsigned long flags;
d33a73c8
AA
3321
3322 dprintk(KERN_DEBUG "%s: nv_nic_irq_rx\n", dev->name);
3323
3324 for (i=0; ; i++) {
3325 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL;
3326 writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus);
d33a73c8
AA
3327 dprintk(KERN_DEBUG "%s: rx irq: %08x\n", dev->name, events);
3328 if (!(events & np->irqmask))
3329 break;
f3b197ac 3330
f0734ab6
AA
3331 if (nv_rx_process_optimized(dev, dev->weight)) {
3332 if (unlikely(nv_alloc_rx_optimized(dev))) {
3333 spin_lock_irqsave(&np->lock, flags);
3334 if (!np->in_shutdown)
3335 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
3336 spin_unlock_irqrestore(&np->lock, flags);
3337 }
d33a73c8 3338 }
f3b197ac 3339
f0734ab6 3340 if (unlikely(i > max_interrupt_work)) {
0a07bc64 3341 spin_lock_irqsave(&np->lock, flags);
d33a73c8
AA
3342 /* disable interrupts on the nic */
3343 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
3344 pci_push(base);
3345
3346 if (!np->in_shutdown) {
3347 np->nic_poll_irq |= NVREG_IRQ_RX_ALL;
3348 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3349 }
3350 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_rx.\n", dev->name, i);
0a07bc64 3351 spin_unlock_irqrestore(&np->lock, flags);
d33a73c8
AA
3352 break;
3353 }
d33a73c8
AA
3354 }
3355 dprintk(KERN_DEBUG "%s: nv_nic_irq_rx completed\n", dev->name);
3356
3357 return IRQ_RETVAL(i);
3358}
e27cdba5 3359#endif
d33a73c8 3360
7d12e780 3361static irqreturn_t nv_nic_irq_other(int foo, void *data)
d33a73c8
AA
3362{
3363 struct net_device *dev = (struct net_device *) data;
3364 struct fe_priv *np = netdev_priv(dev);
3365 u8 __iomem *base = get_hwbase(dev);
3366 u32 events;
3367 int i;
0a07bc64 3368 unsigned long flags;
d33a73c8
AA
3369
3370 dprintk(KERN_DEBUG "%s: nv_nic_irq_other\n", dev->name);
3371
3372 for (i=0; ; i++) {
3373 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_OTHER;
3374 writel(NVREG_IRQ_OTHER, base + NvRegMSIXIrqStatus);
d33a73c8
AA
3375 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events);
3376 if (!(events & np->irqmask))
3377 break;
f3b197ac 3378
4e16ed1b
AA
3379 /* check tx in case we reached max loop limit in tx isr */
3380 spin_lock_irqsave(&np->lock, flags);
3381 nv_tx_done_optimized(dev, TX_WORK_PER_LOOP);
3382 spin_unlock_irqrestore(&np->lock, flags);
3383
d33a73c8 3384 if (events & NVREG_IRQ_LINK) {
0a07bc64 3385 spin_lock_irqsave(&np->lock, flags);
d33a73c8 3386 nv_link_irq(dev);
0a07bc64 3387 spin_unlock_irqrestore(&np->lock, flags);
d33a73c8
AA
3388 }
3389 if (np->need_linktimer && time_after(jiffies, np->link_timeout)) {
0a07bc64 3390 spin_lock_irqsave(&np->lock, flags);
d33a73c8 3391 nv_linkchange(dev);
0a07bc64 3392 spin_unlock_irqrestore(&np->lock, flags);
d33a73c8
AA
3393 np->link_timeout = jiffies + LINK_TIMEOUT;
3394 }
c5cf9101
AA
3395 if (events & NVREG_IRQ_RECOVER_ERROR) {
3396 spin_lock_irq(&np->lock);
3397 /* disable interrupts on the nic */
3398 writel(NVREG_IRQ_OTHER, base + NvRegIrqMask);
3399 pci_push(base);
3400
3401 if (!np->in_shutdown) {
3402 np->nic_poll_irq |= NVREG_IRQ_OTHER;
3403 np->recover_error = 1;
3404 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3405 }
3406 spin_unlock_irq(&np->lock);
3407 break;
3408 }
d33a73c8
AA
3409 if (events & (NVREG_IRQ_UNKNOWN)) {
3410 printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n",
3411 dev->name, events);
3412 }
f0734ab6 3413 if (unlikely(i > max_interrupt_work)) {
0a07bc64 3414 spin_lock_irqsave(&np->lock, flags);
d33a73c8
AA
3415 /* disable interrupts on the nic */
3416 writel(NVREG_IRQ_OTHER, base + NvRegIrqMask);
3417 pci_push(base);
3418
3419 if (!np->in_shutdown) {
3420 np->nic_poll_irq |= NVREG_IRQ_OTHER;
3421 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3422 }
3423 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_other.\n", dev->name, i);
0a07bc64 3424 spin_unlock_irqrestore(&np->lock, flags);
d33a73c8
AA
3425 break;
3426 }
3427
3428 }
3429 dprintk(KERN_DEBUG "%s: nv_nic_irq_other completed\n", dev->name);
3430
3431 return IRQ_RETVAL(i);
3432}
3433
7d12e780 3434static irqreturn_t nv_nic_irq_test(int foo, void *data)
9589c77a
AA
3435{
3436 struct net_device *dev = (struct net_device *) data;
3437 struct fe_priv *np = netdev_priv(dev);
3438 u8 __iomem *base = get_hwbase(dev);
3439 u32 events;
3440
3441 dprintk(KERN_DEBUG "%s: nv_nic_irq_test\n", dev->name);
3442
3443 if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
3444 events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
3445 writel(NVREG_IRQ_TIMER, base + NvRegIrqStatus);
3446 } else {
3447 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
3448 writel(NVREG_IRQ_TIMER, base + NvRegMSIXIrqStatus);
3449 }
3450 pci_push(base);
3451 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events);
3452 if (!(events & NVREG_IRQ_TIMER))
3453 return IRQ_RETVAL(0);
3454
3455 spin_lock(&np->lock);
3456 np->intr_test = 1;
3457 spin_unlock(&np->lock);
3458
3459 dprintk(KERN_DEBUG "%s: nv_nic_irq_test completed\n", dev->name);
3460
3461 return IRQ_RETVAL(1);
3462}
3463
7a1854b7
AA
3464static void set_msix_vector_map(struct net_device *dev, u32 vector, u32 irqmask)
3465{
3466 u8 __iomem *base = get_hwbase(dev);
3467 int i;
3468 u32 msixmap = 0;
3469
3470 /* Each interrupt bit can be mapped to a MSIX vector (4 bits).
3471 * MSIXMap0 represents the first 8 interrupts and MSIXMap1 represents
3472 * the remaining 8 interrupts.
3473 */
3474 for (i = 0; i < 8; i++) {
3475 if ((irqmask >> i) & 0x1) {
3476 msixmap |= vector << (i << 2);
3477 }
3478 }
3479 writel(readl(base + NvRegMSIXMap0) | msixmap, base + NvRegMSIXMap0);
3480
3481 msixmap = 0;
3482 for (i = 0; i < 8; i++) {
3483 if ((irqmask >> (i + 8)) & 0x1) {
3484 msixmap |= vector << (i << 2);
3485 }
3486 }
3487 writel(readl(base + NvRegMSIXMap1) | msixmap, base + NvRegMSIXMap1);
3488}
3489
9589c77a 3490static int nv_request_irq(struct net_device *dev, int intr_test)
7a1854b7
AA
3491{
3492 struct fe_priv *np = get_nvpriv(dev);
3493 u8 __iomem *base = get_hwbase(dev);
3494 int ret = 1;
3495 int i;
86b22b0d
AA
3496 irqreturn_t (*handler)(int foo, void *data);
3497
3498 if (intr_test) {
3499 handler = nv_nic_irq_test;
3500 } else {
3501 if (np->desc_ver == DESC_VER_3)
3502 handler = nv_nic_irq_optimized;
3503 else
3504 handler = nv_nic_irq;
3505 }
7a1854b7
AA
3506
3507 if (np->msi_flags & NV_MSI_X_CAPABLE) {
3508 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
3509 np->msi_x_entry[i].entry = i;
3510 }
3511 if ((ret = pci_enable_msix(np->pci_dev, np->msi_x_entry, (np->msi_flags & NV_MSI_X_VECTORS_MASK))) == 0) {
3512 np->msi_flags |= NV_MSI_X_ENABLED;
9589c77a 3513 if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT && !intr_test) {
7a1854b7 3514 /* Request irq for rx handling */
1fb9df5d 3515 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, &nv_nic_irq_rx, IRQF_SHARED, dev->name, dev) != 0) {
7a1854b7
AA
3516 printk(KERN_INFO "forcedeth: request_irq failed for rx %d\n", ret);
3517 pci_disable_msix(np->pci_dev);
3518 np->msi_flags &= ~NV_MSI_X_ENABLED;
3519 goto out_err;
3520 }
3521 /* Request irq for tx handling */
1fb9df5d 3522 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, &nv_nic_irq_tx, IRQF_SHARED, dev->name, dev) != 0) {
7a1854b7
AA
3523 printk(KERN_INFO "forcedeth: request_irq failed for tx %d\n", ret);
3524 pci_disable_msix(np->pci_dev);
3525 np->msi_flags &= ~NV_MSI_X_ENABLED;
3526 goto out_free_rx;
3527 }
3528 /* Request irq for link and timer handling */
1fb9df5d 3529 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector, &nv_nic_irq_other, IRQF_SHARED, dev->name, dev) != 0) {
7a1854b7
AA
3530 printk(KERN_INFO "forcedeth: request_irq failed for link %d\n", ret);
3531 pci_disable_msix(np->pci_dev);
3532 np->msi_flags &= ~NV_MSI_X_ENABLED;
3533 goto out_free_tx;
3534 }
3535 /* map interrupts to their respective vector */
3536 writel(0, base + NvRegMSIXMap0);
3537 writel(0, base + NvRegMSIXMap1);
3538 set_msix_vector_map(dev, NV_MSI_X_VECTOR_RX, NVREG_IRQ_RX_ALL);
3539 set_msix_vector_map(dev, NV_MSI_X_VECTOR_TX, NVREG_IRQ_TX_ALL);
3540 set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER);
3541 } else {
3542 /* Request irq for all interrupts */
86b22b0d 3543 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, handler, IRQF_SHARED, dev->name, dev) != 0) {
7a1854b7
AA
3544 printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret);
3545 pci_disable_msix(np->pci_dev);
3546 np->msi_flags &= ~NV_MSI_X_ENABLED;
3547 goto out_err;
3548 }
3549
3550 /* map interrupts to vector 0 */
3551 writel(0, base + NvRegMSIXMap0);
3552 writel(0, base + NvRegMSIXMap1);
3553 }
3554 }
3555 }
3556 if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) {
3557 if ((ret = pci_enable_msi(np->pci_dev)) == 0) {
3558 np->msi_flags |= NV_MSI_ENABLED;
86b22b0d 3559 if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0) {
7a1854b7
AA
3560 printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret);
3561 pci_disable_msi(np->pci_dev);
3562 np->msi_flags &= ~NV_MSI_ENABLED;
3563 goto out_err;
3564 }
3565
3566 /* map interrupts to vector 0 */
3567 writel(0, base + NvRegMSIMap0);
3568 writel(0, base + NvRegMSIMap1);
3569 /* enable msi vector 0 */
3570 writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask);
3571 }
3572 }
3573 if (ret != 0) {
86b22b0d 3574 if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0)
7a1854b7 3575 goto out_err;
9589c77a 3576
7a1854b7
AA
3577 }
3578
3579 return 0;
3580out_free_tx:
3581 free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, dev);
3582out_free_rx:
3583 free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, dev);
3584out_err:
3585 return 1;
3586}
3587
3588static void nv_free_irq(struct net_device *dev)
3589{
3590 struct fe_priv *np = get_nvpriv(dev);
3591 int i;
3592
3593 if (np->msi_flags & NV_MSI_X_ENABLED) {
3594 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
3595 free_irq(np->msi_x_entry[i].vector, dev);
3596 }
3597 pci_disable_msix(np->pci_dev);
3598 np->msi_flags &= ~NV_MSI_X_ENABLED;
3599 } else {
3600 free_irq(np->pci_dev->irq, dev);
3601 if (np->msi_flags & NV_MSI_ENABLED) {
3602 pci_disable_msi(np->pci_dev);
3603 np->msi_flags &= ~NV_MSI_ENABLED;
3604 }
3605 }
3606}
3607
1da177e4
LT
3608static void nv_do_nic_poll(unsigned long data)
3609{
3610 struct net_device *dev = (struct net_device *) data;
ac9c1897 3611 struct fe_priv *np = netdev_priv(dev);
1da177e4 3612 u8 __iomem *base = get_hwbase(dev);
d33a73c8 3613 u32 mask = 0;
1da177e4 3614
1da177e4 3615 /*
d33a73c8 3616 * First disable irq(s) and then
1da177e4
LT
3617 * reenable interrupts on the nic, we have to do this before calling
3618 * nv_nic_irq because that may decide to do otherwise
3619 */
d33a73c8 3620
84b3932b
AA
3621 if (!using_multi_irqs(dev)) {
3622 if (np->msi_flags & NV_MSI_X_ENABLED)
8688cfce 3623 disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
84b3932b 3624 else
8688cfce 3625 disable_irq_lockdep(dev->irq);
d33a73c8
AA
3626 mask = np->irqmask;
3627 } else {
3628 if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
8688cfce 3629 disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
d33a73c8
AA
3630 mask |= NVREG_IRQ_RX_ALL;
3631 }
3632 if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) {
8688cfce 3633 disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
d33a73c8
AA
3634 mask |= NVREG_IRQ_TX_ALL;
3635 }
3636 if (np->nic_poll_irq & NVREG_IRQ_OTHER) {
8688cfce 3637 disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
d33a73c8
AA
3638 mask |= NVREG_IRQ_OTHER;
3639 }
3640 }
3641 np->nic_poll_irq = 0;
3642
c5cf9101
AA
3643 if (np->recover_error) {
3644 np->recover_error = 0;
3645 printk(KERN_INFO "forcedeth: MAC in recoverable error state\n");
3646 if (netif_running(dev)) {
3647 netif_tx_lock_bh(dev);
3648 spin_lock(&np->lock);
3649 /* stop engines */
3650 nv_stop_rx(dev);
3651 nv_stop_tx(dev);
3652 nv_txrx_reset(dev);
3653 /* drain rx queue */
3654 nv_drain_rx(dev);
3655 nv_drain_tx(dev);
3656 /* reinit driver view of the rx queue */
3657 set_bufsize(dev);
3658 if (nv_init_ring(dev)) {
3659 if (!np->in_shutdown)
3660 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
3661 }
3662 /* reinit nic view of the rx queue */
3663 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
3664 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
3665 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
3666 base + NvRegRingSizes);
3667 pci_push(base);
3668 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
3669 pci_push(base);
3670
3671 /* restart rx engine */
3672 nv_start_rx(dev);
3673 nv_start_tx(dev);
3674 spin_unlock(&np->lock);
3675 netif_tx_unlock_bh(dev);
3676 }
3677 }
3678
d33a73c8 3679 /* FIXME: Do we need synchronize_irq(dev->irq) here? */
f3b197ac 3680
d33a73c8 3681 writel(mask, base + NvRegIrqMask);
1da177e4 3682 pci_push(base);
d33a73c8 3683
84b3932b 3684 if (!using_multi_irqs(dev)) {
fcc5f266
AA
3685 if (np->desc_ver == DESC_VER_3)
3686 nv_nic_irq_optimized(0, dev);
3687 else
3688 nv_nic_irq(0, dev);
84b3932b 3689 if (np->msi_flags & NV_MSI_X_ENABLED)
8688cfce 3690 enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
84b3932b 3691 else
8688cfce 3692 enable_irq_lockdep(dev->irq);
d33a73c8
AA
3693 } else {
3694 if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
7d12e780 3695 nv_nic_irq_rx(0, dev);
8688cfce 3696 enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
d33a73c8
AA
3697 }
3698 if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) {
7d12e780 3699 nv_nic_irq_tx(0, dev);
8688cfce 3700 enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
d33a73c8
AA
3701 }
3702 if (np->nic_poll_irq & NVREG_IRQ_OTHER) {
7d12e780 3703 nv_nic_irq_other(0, dev);
8688cfce 3704 enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
d33a73c8
AA
3705 }
3706 }
1da177e4
LT
3707}
3708
2918c35d
MS
3709#ifdef CONFIG_NET_POLL_CONTROLLER
3710static void nv_poll_controller(struct net_device *dev)
3711{
3712 nv_do_nic_poll((unsigned long) dev);
3713}
3714#endif
3715
52da3578
AA
3716static void nv_do_stats_poll(unsigned long data)
3717{
3718 struct net_device *dev = (struct net_device *) data;
3719 struct fe_priv *np = netdev_priv(dev);
52da3578 3720
57fff698 3721 nv_get_hw_stats(dev);
52da3578
AA
3722
3723 if (!np->in_shutdown)
3724 mod_timer(&np->stats_poll, jiffies + STATS_INTERVAL);
3725}
3726
1da177e4
LT
3727static void nv_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
3728{
ac9c1897 3729 struct fe_priv *np = netdev_priv(dev);
1da177e4
LT
3730 strcpy(info->driver, "forcedeth");
3731 strcpy(info->version, FORCEDETH_VERSION);
3732 strcpy(info->bus_info, pci_name(np->pci_dev));
3733}
3734
3735static void nv_get_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo)
3736{
ac9c1897 3737 struct fe_priv *np = netdev_priv(dev);
1da177e4
LT
3738 wolinfo->supported = WAKE_MAGIC;
3739
3740 spin_lock_irq(&np->lock);
3741 if (np->wolenabled)
3742 wolinfo->wolopts = WAKE_MAGIC;
3743 spin_unlock_irq(&np->lock);
3744}
3745
3746static int nv_set_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo)
3747{
ac9c1897 3748 struct fe_priv *np = netdev_priv(dev);
1da177e4 3749 u8 __iomem *base = get_hwbase(dev);
c42d9df9 3750 u32 flags = 0;
1da177e4 3751
1da177e4 3752 if (wolinfo->wolopts == 0) {
1da177e4 3753 np->wolenabled = 0;
c42d9df9 3754 } else if (wolinfo->wolopts & WAKE_MAGIC) {
1da177e4 3755 np->wolenabled = 1;
c42d9df9
AA
3756 flags = NVREG_WAKEUPFLAGS_ENABLE;
3757 }
3758 if (netif_running(dev)) {
3759 spin_lock_irq(&np->lock);
3760 writel(flags, base + NvRegWakeUpFlags);
3761 spin_unlock_irq(&np->lock);
1da177e4 3762 }
1da177e4
LT
3763 return 0;
3764}
3765
3766static int nv_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
3767{
3768 struct fe_priv *np = netdev_priv(dev);
3769 int adv;
3770
3771 spin_lock_irq(&np->lock);
3772 ecmd->port = PORT_MII;
3773 if (!netif_running(dev)) {
3774 /* We do not track link speed / duplex setting if the
3775 * interface is disabled. Force a link check */
f9430a01
AA
3776 if (nv_update_linkspeed(dev)) {
3777 if (!netif_carrier_ok(dev))
3778 netif_carrier_on(dev);
3779 } else {
3780 if (netif_carrier_ok(dev))
3781 netif_carrier_off(dev);
3782 }
1da177e4 3783 }
f9430a01
AA
3784
3785 if (netif_carrier_ok(dev)) {
3786 switch(np->linkspeed & (NVREG_LINKSPEED_MASK)) {
1da177e4
LT
3787 case NVREG_LINKSPEED_10:
3788 ecmd->speed = SPEED_10;
3789 break;
3790 case NVREG_LINKSPEED_100:
3791 ecmd->speed = SPEED_100;
3792 break;
3793 case NVREG_LINKSPEED_1000:
3794 ecmd->speed = SPEED_1000;
3795 break;
f9430a01
AA
3796 }
3797 ecmd->duplex = DUPLEX_HALF;
3798 if (np->duplex)
3799 ecmd->duplex = DUPLEX_FULL;
3800 } else {
3801 ecmd->speed = -1;
3802 ecmd->duplex = -1;
1da177e4 3803 }
1da177e4
LT
3804
3805 ecmd->autoneg = np->autoneg;
3806
3807 ecmd->advertising = ADVERTISED_MII;
3808 if (np->autoneg) {
3809 ecmd->advertising |= ADVERTISED_Autoneg;
3810 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
f9430a01
AA
3811 if (adv & ADVERTISE_10HALF)
3812 ecmd->advertising |= ADVERTISED_10baseT_Half;
3813 if (adv & ADVERTISE_10FULL)
3814 ecmd->advertising |= ADVERTISED_10baseT_Full;
3815 if (adv & ADVERTISE_100HALF)
3816 ecmd->advertising |= ADVERTISED_100baseT_Half;
3817 if (adv & ADVERTISE_100FULL)
3818 ecmd->advertising |= ADVERTISED_100baseT_Full;
3819 if (np->gigabit == PHY_GIGABIT) {
3820 adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
3821 if (adv & ADVERTISE_1000FULL)
3822 ecmd->advertising |= ADVERTISED_1000baseT_Full;
3823 }
1da177e4 3824 }
1da177e4
LT
3825 ecmd->supported = (SUPPORTED_Autoneg |
3826 SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
3827 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
3828 SUPPORTED_MII);
3829 if (np->gigabit == PHY_GIGABIT)
3830 ecmd->supported |= SUPPORTED_1000baseT_Full;
3831
3832 ecmd->phy_address = np->phyaddr;
3833 ecmd->transceiver = XCVR_EXTERNAL;
3834
3835 /* ignore maxtxpkt, maxrxpkt for now */
3836 spin_unlock_irq(&np->lock);
3837 return 0;
3838}
3839
3840static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
3841{
3842 struct fe_priv *np = netdev_priv(dev);
3843
3844 if (ecmd->port != PORT_MII)
3845 return -EINVAL;
3846 if (ecmd->transceiver != XCVR_EXTERNAL)
3847 return -EINVAL;
3848 if (ecmd->phy_address != np->phyaddr) {
3849 /* TODO: support switching between multiple phys. Should be
3850 * trivial, but not enabled due to lack of test hardware. */
3851 return -EINVAL;
3852 }
3853 if (ecmd->autoneg == AUTONEG_ENABLE) {
3854 u32 mask;
3855
3856 mask = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
3857 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full;
3858 if (np->gigabit == PHY_GIGABIT)
3859 mask |= ADVERTISED_1000baseT_Full;
3860
3861 if ((ecmd->advertising & mask) == 0)
3862 return -EINVAL;
3863
3864 } else if (ecmd->autoneg == AUTONEG_DISABLE) {
3865 /* Note: autonegotiation disable, speed 1000 intentionally
3866 * forbidden - noone should need that. */
3867
3868 if (ecmd->speed != SPEED_10 && ecmd->speed != SPEED_100)
3869 return -EINVAL;
3870 if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL)
3871 return -EINVAL;
3872 } else {
3873 return -EINVAL;
3874 }
3875
f9430a01
AA
3876 netif_carrier_off(dev);
3877 if (netif_running(dev)) {
3878 nv_disable_irq(dev);
58dfd9c1 3879 netif_tx_lock_bh(dev);
f9430a01
AA
3880 spin_lock(&np->lock);
3881 /* stop engines */
3882 nv_stop_rx(dev);
3883 nv_stop_tx(dev);
3884 spin_unlock(&np->lock);
58dfd9c1 3885 netif_tx_unlock_bh(dev);
f9430a01
AA
3886 }
3887
1da177e4
LT
3888 if (ecmd->autoneg == AUTONEG_ENABLE) {
3889 int adv, bmcr;
3890
3891 np->autoneg = 1;
3892
3893 /* advertise only what has been requested */
3894 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
eb91f61b 3895 adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1da177e4
LT
3896 if (ecmd->advertising & ADVERTISED_10baseT_Half)
3897 adv |= ADVERTISE_10HALF;
3898 if (ecmd->advertising & ADVERTISED_10baseT_Full)
b6d0773f 3899 adv |= ADVERTISE_10FULL;
1da177e4
LT
3900 if (ecmd->advertising & ADVERTISED_100baseT_Half)
3901 adv |= ADVERTISE_100HALF;
3902 if (ecmd->advertising & ADVERTISED_100baseT_Full)
b6d0773f
AA
3903 adv |= ADVERTISE_100FULL;
3904 if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisments but disable tx pause */
3905 adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
3906 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
3907 adv |= ADVERTISE_PAUSE_ASYM;
1da177e4
LT
3908 mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
3909
3910 if (np->gigabit == PHY_GIGABIT) {
eb91f61b 3911 adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
1da177e4
LT
3912 adv &= ~ADVERTISE_1000FULL;
3913 if (ecmd->advertising & ADVERTISED_1000baseT_Full)
3914 adv |= ADVERTISE_1000FULL;
eb91f61b 3915 mii_rw(dev, np->phyaddr, MII_CTRL1000, adv);
1da177e4
LT
3916 }
3917
f9430a01
AA
3918 if (netif_running(dev))
3919 printk(KERN_INFO "%s: link down.\n", dev->name);
1da177e4 3920 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
edf7e5ec
AA
3921 if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
3922 bmcr |= BMCR_ANENABLE;
3923 /* reset the phy in order for settings to stick,
3924 * and cause autoneg to start */
3925 if (phy_reset(dev, bmcr)) {
3926 printk(KERN_INFO "%s: phy reset failed\n", dev->name);
3927 return -EINVAL;
3928 }
3929 } else {
3930 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
3931 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
3932 }
1da177e4
LT
3933 } else {
3934 int adv, bmcr;
3935
3936 np->autoneg = 0;
3937
3938 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
eb91f61b 3939 adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1da177e4
LT
3940 if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_HALF)
3941 adv |= ADVERTISE_10HALF;
3942 if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_FULL)
b6d0773f 3943 adv |= ADVERTISE_10FULL;
1da177e4
LT
3944 if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_HALF)
3945 adv |= ADVERTISE_100HALF;
3946 if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_FULL)
b6d0773f
AA
3947 adv |= ADVERTISE_100FULL;
3948 np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE);
3949 if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) {/* for rx we set both advertisments but disable tx pause */
3950 adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
3951 np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
3952 }
3953 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) {
3954 adv |= ADVERTISE_PAUSE_ASYM;
3955 np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
3956 }
1da177e4
LT
3957 mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
3958 np->fixed_mode = adv;
3959
3960 if (np->gigabit == PHY_GIGABIT) {
eb91f61b 3961 adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
1da177e4 3962 adv &= ~ADVERTISE_1000FULL;
eb91f61b 3963 mii_rw(dev, np->phyaddr, MII_CTRL1000, adv);
1da177e4
LT
3964 }
3965
3966 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
f9430a01
AA
3967 bmcr &= ~(BMCR_ANENABLE|BMCR_SPEED100|BMCR_SPEED1000|BMCR_FULLDPLX);
3968 if (np->fixed_mode & (ADVERTISE_10FULL|ADVERTISE_100FULL))
1da177e4 3969 bmcr |= BMCR_FULLDPLX;
f9430a01 3970 if (np->fixed_mode & (ADVERTISE_100HALF|ADVERTISE_100FULL))
1da177e4 3971 bmcr |= BMCR_SPEED100;
f9430a01 3972 if (np->phy_oui == PHY_OUI_MARVELL) {
edf7e5ec
AA
3973 /* reset the phy in order for forced mode settings to stick */
3974 if (phy_reset(dev, bmcr)) {
f9430a01
AA
3975 printk(KERN_INFO "%s: phy reset failed\n", dev->name);
3976 return -EINVAL;
3977 }
edf7e5ec
AA
3978 } else {
3979 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
3980 if (netif_running(dev)) {
3981 /* Wait a bit and then reconfigure the nic. */
3982 udelay(10);
3983 nv_linkchange(dev);
3984 }
1da177e4
LT
3985 }
3986 }
f9430a01
AA
3987
3988 if (netif_running(dev)) {
3989 nv_start_rx(dev);
3990 nv_start_tx(dev);
3991 nv_enable_irq(dev);
3992 }
1da177e4
LT
3993
3994 return 0;
3995}
3996
dc8216c1 3997#define FORCEDETH_REGS_VER 1
dc8216c1
MS
3998
3999static int nv_get_regs_len(struct net_device *dev)
4000{
86a0f043
AA
4001 struct fe_priv *np = netdev_priv(dev);
4002 return np->register_size;
dc8216c1
MS
4003}
4004
4005static void nv_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf)
4006{
ac9c1897 4007 struct fe_priv *np = netdev_priv(dev);
dc8216c1
MS
4008 u8 __iomem *base = get_hwbase(dev);
4009 u32 *rbuf = buf;
4010 int i;
4011
4012 regs->version = FORCEDETH_REGS_VER;
4013 spin_lock_irq(&np->lock);
86a0f043 4014 for (i = 0;i <= np->register_size/sizeof(u32); i++)
dc8216c1
MS
4015 rbuf[i] = readl(base + i*sizeof(u32));
4016 spin_unlock_irq(&np->lock);
4017}
4018
4019static int nv_nway_reset(struct net_device *dev)
4020{
ac9c1897 4021 struct fe_priv *np = netdev_priv(dev);
dc8216c1
MS
4022 int ret;
4023
dc8216c1
MS
4024 if (np->autoneg) {
4025 int bmcr;
4026
f9430a01
AA
4027 netif_carrier_off(dev);
4028 if (netif_running(dev)) {
4029 nv_disable_irq(dev);
58dfd9c1 4030 netif_tx_lock_bh(dev);
f9430a01
AA
4031 spin_lock(&np->lock);
4032 /* stop engines */
4033 nv_stop_rx(dev);
4034 nv_stop_tx(dev);
4035 spin_unlock(&np->lock);
58dfd9c1 4036 netif_tx_unlock_bh(dev);
f9430a01
AA
4037 printk(KERN_INFO "%s: link down.\n", dev->name);
4038 }
4039
dc8216c1 4040 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
edf7e5ec
AA
4041 if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
4042 bmcr |= BMCR_ANENABLE;
4043 /* reset the phy in order for settings to stick*/
4044 if (phy_reset(dev, bmcr)) {
4045 printk(KERN_INFO "%s: phy reset failed\n", dev->name);
4046 return -EINVAL;
4047 }
4048 } else {
4049 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
4050 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
4051 }
dc8216c1 4052
f9430a01
AA
4053 if (netif_running(dev)) {
4054 nv_start_rx(dev);
4055 nv_start_tx(dev);
4056 nv_enable_irq(dev);
4057 }
dc8216c1
MS
4058 ret = 0;
4059 } else {
4060 ret = -EINVAL;
4061 }
dc8216c1
MS
4062
4063 return ret;
4064}
4065
0674d594
ZA
4066static int nv_set_tso(struct net_device *dev, u32 value)
4067{
4068 struct fe_priv *np = netdev_priv(dev);
4069
4070 if ((np->driver_data & DEV_HAS_CHECKSUM))
4071 return ethtool_op_set_tso(dev, value);
4072 else
6a78814f 4073 return -EOPNOTSUPP;
0674d594 4074}
0674d594 4075
eafa59f6
AA
4076static void nv_get_ringparam(struct net_device *dev, struct ethtool_ringparam* ring)
4077{
4078 struct fe_priv *np = netdev_priv(dev);
4079
4080 ring->rx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3;
4081 ring->rx_mini_max_pending = 0;
4082 ring->rx_jumbo_max_pending = 0;
4083 ring->tx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3;
4084
4085 ring->rx_pending = np->rx_ring_size;
4086 ring->rx_mini_pending = 0;
4087 ring->rx_jumbo_pending = 0;
4088 ring->tx_pending = np->tx_ring_size;
4089}
4090
4091static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ring)
4092{
4093 struct fe_priv *np = netdev_priv(dev);
4094 u8 __iomem *base = get_hwbase(dev);
761fcd9e 4095 u8 *rxtx_ring, *rx_skbuff, *tx_skbuff;
eafa59f6
AA
4096 dma_addr_t ring_addr;
4097
4098 if (ring->rx_pending < RX_RING_MIN ||
4099 ring->tx_pending < TX_RING_MIN ||
4100 ring->rx_mini_pending != 0 ||
4101 ring->rx_jumbo_pending != 0 ||
4102 (np->desc_ver == DESC_VER_1 &&
4103 (ring->rx_pending > RING_MAX_DESC_VER_1 ||
4104 ring->tx_pending > RING_MAX_DESC_VER_1)) ||
4105 (np->desc_ver != DESC_VER_1 &&
4106 (ring->rx_pending > RING_MAX_DESC_VER_2_3 ||
4107 ring->tx_pending > RING_MAX_DESC_VER_2_3))) {
4108 return -EINVAL;
4109 }
4110
4111 /* allocate new rings */
4112 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
4113 rxtx_ring = pci_alloc_consistent(np->pci_dev,
4114 sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending),
4115 &ring_addr);
4116 } else {
4117 rxtx_ring = pci_alloc_consistent(np->pci_dev,
4118 sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending),
4119 &ring_addr);
4120 }
761fcd9e
AA
4121 rx_skbuff = kmalloc(sizeof(struct nv_skb_map) * ring->rx_pending, GFP_KERNEL);
4122 tx_skbuff = kmalloc(sizeof(struct nv_skb_map) * ring->tx_pending, GFP_KERNEL);
4123 if (!rxtx_ring || !rx_skbuff || !tx_skbuff) {
eafa59f6
AA
4124 /* fall back to old rings */
4125 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
f82a9352 4126 if (rxtx_ring)
eafa59f6
AA
4127 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending),
4128 rxtx_ring, ring_addr);
4129 } else {
4130 if (rxtx_ring)
4131 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending),
4132 rxtx_ring, ring_addr);
4133 }
4134 if (rx_skbuff)
4135 kfree(rx_skbuff);
eafa59f6
AA
4136 if (tx_skbuff)
4137 kfree(tx_skbuff);
eafa59f6
AA
4138 goto exit;
4139 }
4140
4141 if (netif_running(dev)) {
4142 nv_disable_irq(dev);
58dfd9c1 4143 netif_tx_lock_bh(dev);
eafa59f6
AA
4144 spin_lock(&np->lock);
4145 /* stop engines */
4146 nv_stop_rx(dev);
4147 nv_stop_tx(dev);
4148 nv_txrx_reset(dev);
4149 /* drain queues */
4150 nv_drain_rx(dev);
4151 nv_drain_tx(dev);
4152 /* delete queues */
4153 free_rings(dev);
4154 }
4155
4156 /* set new values */
4157 np->rx_ring_size = ring->rx_pending;
4158 np->tx_ring_size = ring->tx_pending;
eafa59f6
AA
4159 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
4160 np->rx_ring.orig = (struct ring_desc*)rxtx_ring;
4161 np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size];
4162 } else {
4163 np->rx_ring.ex = (struct ring_desc_ex*)rxtx_ring;
4164 np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size];
4165 }
761fcd9e
AA
4166 np->rx_skb = (struct nv_skb_map*)rx_skbuff;
4167 np->tx_skb = (struct nv_skb_map*)tx_skbuff;
eafa59f6
AA
4168 np->ring_addr = ring_addr;
4169
761fcd9e
AA
4170 memset(np->rx_skb, 0, sizeof(struct nv_skb_map) * np->rx_ring_size);
4171 memset(np->tx_skb, 0, sizeof(struct nv_skb_map) * np->tx_ring_size);
eafa59f6
AA
4172
4173 if (netif_running(dev)) {
4174 /* reinit driver view of the queues */
4175 set_bufsize(dev);
4176 if (nv_init_ring(dev)) {
4177 if (!np->in_shutdown)
4178 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
4179 }
4180
4181 /* reinit nic view of the queues */
4182 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
4183 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
4184 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
4185 base + NvRegRingSizes);
4186 pci_push(base);
4187 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
4188 pci_push(base);
4189
4190 /* restart engines */
4191 nv_start_rx(dev);
4192 nv_start_tx(dev);
4193 spin_unlock(&np->lock);
58dfd9c1 4194 netif_tx_unlock_bh(dev);
eafa59f6
AA
4195 nv_enable_irq(dev);
4196 }
4197 return 0;
4198exit:
4199 return -ENOMEM;
4200}
4201
b6d0773f
AA
4202static void nv_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam* pause)
4203{
4204 struct fe_priv *np = netdev_priv(dev);
4205
4206 pause->autoneg = (np->pause_flags & NV_PAUSEFRAME_AUTONEG) != 0;
4207 pause->rx_pause = (np->pause_flags & NV_PAUSEFRAME_RX_ENABLE) != 0;
4208 pause->tx_pause = (np->pause_flags & NV_PAUSEFRAME_TX_ENABLE) != 0;
4209}
4210
4211static int nv_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam* pause)
4212{
4213 struct fe_priv *np = netdev_priv(dev);
4214 int adv, bmcr;
4215
4216 if ((!np->autoneg && np->duplex == 0) ||
4217 (np->autoneg && !pause->autoneg && np->duplex == 0)) {
4218 printk(KERN_INFO "%s: can not set pause settings when forced link is in half duplex.\n",
4219 dev->name);
4220 return -EINVAL;
4221 }
4222 if (pause->tx_pause && !(np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE)) {
4223 printk(KERN_INFO "%s: hardware does not support tx pause frames.\n", dev->name);
4224 return -EINVAL;
4225 }
4226
4227 netif_carrier_off(dev);
4228 if (netif_running(dev)) {
4229 nv_disable_irq(dev);
58dfd9c1 4230 netif_tx_lock_bh(dev);
b6d0773f
AA
4231 spin_lock(&np->lock);
4232 /* stop engines */
4233 nv_stop_rx(dev);
4234 nv_stop_tx(dev);
4235 spin_unlock(&np->lock);
58dfd9c1 4236 netif_tx_unlock_bh(dev);
b6d0773f
AA
4237 }
4238
4239 np->pause_flags &= ~(NV_PAUSEFRAME_RX_REQ|NV_PAUSEFRAME_TX_REQ);
4240 if (pause->rx_pause)
4241 np->pause_flags |= NV_PAUSEFRAME_RX_REQ;
4242 if (pause->tx_pause)
4243 np->pause_flags |= NV_PAUSEFRAME_TX_REQ;
4244
4245 if (np->autoneg && pause->autoneg) {
4246 np->pause_flags |= NV_PAUSEFRAME_AUTONEG;
4247
4248 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
4249 adv &= ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
4250 if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisments but disable tx pause */
4251 adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4252 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
4253 adv |= ADVERTISE_PAUSE_ASYM;
4254 mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
4255
4256 if (netif_running(dev))
4257 printk(KERN_INFO "%s: link down.\n", dev->name);
4258 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
4259 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
4260 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
4261 } else {
4262 np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE);
4263 if (pause->rx_pause)
4264 np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
4265 if (pause->tx_pause)
4266 np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
4267
4268 if (!netif_running(dev))
4269 nv_update_linkspeed(dev);
4270 else
4271 nv_update_pause(dev, np->pause_flags);
4272 }
4273
4274 if (netif_running(dev)) {
4275 nv_start_rx(dev);
4276 nv_start_tx(dev);
4277 nv_enable_irq(dev);
4278 }
4279 return 0;
4280}
4281
5ed2616f
AA
4282static u32 nv_get_rx_csum(struct net_device *dev)
4283{
4284 struct fe_priv *np = netdev_priv(dev);
f2ad2d9b 4285 return (np->rx_csum) != 0;
5ed2616f
AA
4286}
4287
4288static int nv_set_rx_csum(struct net_device *dev, u32 data)
4289{
4290 struct fe_priv *np = netdev_priv(dev);
4291 u8 __iomem *base = get_hwbase(dev);
4292 int retcode = 0;
4293
4294 if (np->driver_data & DEV_HAS_CHECKSUM) {
5ed2616f 4295 if (data) {
f2ad2d9b 4296 np->rx_csum = 1;
5ed2616f 4297 np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
5ed2616f 4298 } else {
f2ad2d9b
AA
4299 np->rx_csum = 0;
4300 /* vlan is dependent on rx checksum offload */
4301 if (!(np->vlanctl_bits & NVREG_VLANCONTROL_ENABLE))
4302 np->txrxctl_bits &= ~NVREG_TXRXCTL_RXCHECK;
5ed2616f 4303 }
5ed2616f
AA
4304 if (netif_running(dev)) {
4305 spin_lock_irq(&np->lock);
4306 writel(np->txrxctl_bits, base + NvRegTxRxControl);
4307 spin_unlock_irq(&np->lock);
4308 }
4309 } else {
4310 return -EINVAL;
4311 }
4312
4313 return retcode;
4314}
4315
4316static int nv_set_tx_csum(struct net_device *dev, u32 data)
4317{
4318 struct fe_priv *np = netdev_priv(dev);
4319
4320 if (np->driver_data & DEV_HAS_CHECKSUM)
4321 return ethtool_op_set_tx_hw_csum(dev, data);
4322 else
4323 return -EOPNOTSUPP;
4324}
4325
4326static int nv_set_sg(struct net_device *dev, u32 data)
4327{
4328 struct fe_priv *np = netdev_priv(dev);
4329
4330 if (np->driver_data & DEV_HAS_CHECKSUM)
4331 return ethtool_op_set_sg(dev, data);
4332 else
4333 return -EOPNOTSUPP;
4334}
4335
52da3578
AA
4336static int nv_get_stats_count(struct net_device *dev)
4337{
4338 struct fe_priv *np = netdev_priv(dev);
4339
57fff698
AA
4340 if (np->driver_data & DEV_HAS_STATISTICS_V1)
4341 return NV_DEV_STATISTICS_V1_COUNT;
4342 else if (np->driver_data & DEV_HAS_STATISTICS_V2)
4343 return NV_DEV_STATISTICS_V2_COUNT;
52da3578
AA
4344 else
4345 return 0;
4346}
4347
4348static void nv_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *estats, u64 *buffer)
4349{
4350 struct fe_priv *np = netdev_priv(dev);
4351
4352 /* update stats */
4353 nv_do_stats_poll((unsigned long)dev);
4354
4355 memcpy(buffer, &np->estats, nv_get_stats_count(dev)*sizeof(u64));
4356}
4357
9589c77a
AA
4358static int nv_self_test_count(struct net_device *dev)
4359{
4360 struct fe_priv *np = netdev_priv(dev);
4361
4362 if (np->driver_data & DEV_HAS_TEST_EXTENDED)
4363 return NV_TEST_COUNT_EXTENDED;
4364 else
4365 return NV_TEST_COUNT_BASE;
4366}
4367
4368static int nv_link_test(struct net_device *dev)
4369{
4370 struct fe_priv *np = netdev_priv(dev);
4371 int mii_status;
4372
4373 mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
4374 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
4375
4376 /* check phy link status */
4377 if (!(mii_status & BMSR_LSTATUS))
4378 return 0;
4379 else
4380 return 1;
4381}
4382
4383static int nv_register_test(struct net_device *dev)
4384{
4385 u8 __iomem *base = get_hwbase(dev);
4386 int i = 0;
4387 u32 orig_read, new_read;
4388
4389 do {
4390 orig_read = readl(base + nv_registers_test[i].reg);
4391
4392 /* xor with mask to toggle bits */
4393 orig_read ^= nv_registers_test[i].mask;
4394
4395 writel(orig_read, base + nv_registers_test[i].reg);
4396
4397 new_read = readl(base + nv_registers_test[i].reg);
4398
4399 if ((new_read & nv_registers_test[i].mask) != (orig_read & nv_registers_test[i].mask))
4400 return 0;
4401
4402 /* restore original value */
4403 orig_read ^= nv_registers_test[i].mask;
4404 writel(orig_read, base + nv_registers_test[i].reg);
4405
4406 } while (nv_registers_test[++i].reg != 0);
4407
4408 return 1;
4409}
4410
4411static int nv_interrupt_test(struct net_device *dev)
4412{
4413 struct fe_priv *np = netdev_priv(dev);
4414 u8 __iomem *base = get_hwbase(dev);
4415 int ret = 1;
4416 int testcnt;
4417 u32 save_msi_flags, save_poll_interval = 0;
4418
4419 if (netif_running(dev)) {
4420 /* free current irq */
4421 nv_free_irq(dev);
4422 save_poll_interval = readl(base+NvRegPollingInterval);
4423 }
4424
4425 /* flag to test interrupt handler */
4426 np->intr_test = 0;
4427
4428 /* setup test irq */
4429 save_msi_flags = np->msi_flags;
4430 np->msi_flags &= ~NV_MSI_X_VECTORS_MASK;
4431 np->msi_flags |= 0x001; /* setup 1 vector */
4432 if (nv_request_irq(dev, 1))
4433 return 0;
4434
4435 /* setup timer interrupt */
4436 writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval);
4437 writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
4438
4439 nv_enable_hw_interrupts(dev, NVREG_IRQ_TIMER);
4440
4441 /* wait for at least one interrupt */
4442 msleep(100);
4443
4444 spin_lock_irq(&np->lock);
4445
4446 /* flag should be set within ISR */
4447 testcnt = np->intr_test;
4448 if (!testcnt)
4449 ret = 2;
4450
4451 nv_disable_hw_interrupts(dev, NVREG_IRQ_TIMER);
4452 if (!(np->msi_flags & NV_MSI_X_ENABLED))
4453 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
4454 else
4455 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
4456
4457 spin_unlock_irq(&np->lock);
4458
4459 nv_free_irq(dev);
4460
4461 np->msi_flags = save_msi_flags;
4462
4463 if (netif_running(dev)) {
4464 writel(save_poll_interval, base + NvRegPollingInterval);
4465 writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
4466 /* restore original irq */
4467 if (nv_request_irq(dev, 0))
4468 return 0;
4469 }
4470
4471 return ret;
4472}
4473
4474static int nv_loopback_test(struct net_device *dev)
4475{
4476 struct fe_priv *np = netdev_priv(dev);
4477 u8 __iomem *base = get_hwbase(dev);
4478 struct sk_buff *tx_skb, *rx_skb;
4479 dma_addr_t test_dma_addr;
4480 u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET);
f82a9352 4481 u32 flags;
9589c77a
AA
4482 int len, i, pkt_len;
4483 u8 *pkt_data;
4484 u32 filter_flags = 0;
4485 u32 misc1_flags = 0;
4486 int ret = 1;
4487
4488 if (netif_running(dev)) {
4489 nv_disable_irq(dev);
4490 filter_flags = readl(base + NvRegPacketFilterFlags);
4491 misc1_flags = readl(base + NvRegMisc1);
4492 } else {
4493 nv_txrx_reset(dev);
4494 }
4495
4496 /* reinit driver view of the rx queue */
4497 set_bufsize(dev);
4498 nv_init_ring(dev);
4499
4500 /* setup hardware for loopback */
4501 writel(NVREG_MISC1_FORCE, base + NvRegMisc1);
4502 writel(NVREG_PFF_ALWAYS | NVREG_PFF_LOOPBACK, base + NvRegPacketFilterFlags);
4503
4504 /* reinit nic view of the rx queue */
4505 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
4506 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
4507 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
4508 base + NvRegRingSizes);
4509 pci_push(base);
4510
4511 /* restart rx engine */
4512 nv_start_rx(dev);
4513 nv_start_tx(dev);
4514
4515 /* setup packet for tx */
4516 pkt_len = ETH_DATA_LEN;
4517 tx_skb = dev_alloc_skb(pkt_len);
46798c89
JJ
4518 if (!tx_skb) {
4519 printk(KERN_ERR "dev_alloc_skb() failed during loopback test"
4520 " of %s\n", dev->name);
4521 ret = 0;
4522 goto out;
4523 }
8b5be268
ACM
4524 test_dma_addr = pci_map_single(np->pci_dev, tx_skb->data,
4525 skb_tailroom(tx_skb),
4526 PCI_DMA_FROMDEVICE);
9589c77a
AA
4527 pkt_data = skb_put(tx_skb, pkt_len);
4528 for (i = 0; i < pkt_len; i++)
4529 pkt_data[i] = (u8)(i & 0xff);
9589c77a
AA
4530
4531 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
f82a9352
SH
4532 np->tx_ring.orig[0].buf = cpu_to_le32(test_dma_addr);
4533 np->tx_ring.orig[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra);
9589c77a 4534 } else {
f82a9352
SH
4535 np->tx_ring.ex[0].bufhigh = cpu_to_le64(test_dma_addr) >> 32;
4536 np->tx_ring.ex[0].buflow = cpu_to_le64(test_dma_addr) & 0x0FFFFFFFF;
4537 np->tx_ring.ex[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra);
9589c77a
AA
4538 }
4539 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
4540 pci_push(get_hwbase(dev));
4541
4542 msleep(500);
4543
4544 /* check for rx of the packet */
4545 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
f82a9352 4546 flags = le32_to_cpu(np->rx_ring.orig[0].flaglen);
9589c77a
AA
4547 len = nv_descr_getlength(&np->rx_ring.orig[0], np->desc_ver);
4548
4549 } else {
f82a9352 4550 flags = le32_to_cpu(np->rx_ring.ex[0].flaglen);
9589c77a
AA
4551 len = nv_descr_getlength_ex(&np->rx_ring.ex[0], np->desc_ver);
4552 }
4553
f82a9352 4554 if (flags & NV_RX_AVAIL) {
9589c77a
AA
4555 ret = 0;
4556 } else if (np->desc_ver == DESC_VER_1) {
f82a9352 4557 if (flags & NV_RX_ERROR)
9589c77a
AA
4558 ret = 0;
4559 } else {
f82a9352 4560 if (flags & NV_RX2_ERROR) {
9589c77a
AA
4561 ret = 0;
4562 }
4563 }
4564
4565 if (ret) {
4566 if (len != pkt_len) {
4567 ret = 0;
4568 dprintk(KERN_DEBUG "%s: loopback len mismatch %d vs %d\n",
4569 dev->name, len, pkt_len);
4570 } else {
761fcd9e 4571 rx_skb = np->rx_skb[0].skb;
9589c77a
AA
4572 for (i = 0; i < pkt_len; i++) {
4573 if (rx_skb->data[i] != (u8)(i & 0xff)) {
4574 ret = 0;
4575 dprintk(KERN_DEBUG "%s: loopback pattern check failed on byte %d\n",
4576 dev->name, i);
4577 break;
4578 }
4579 }
4580 }
4581 } else {
4582 dprintk(KERN_DEBUG "%s: loopback - did not receive test packet\n", dev->name);
4583 }
4584
4585 pci_unmap_page(np->pci_dev, test_dma_addr,
4305b541 4586 (skb_end_pointer(tx_skb) - tx_skb->data),
9589c77a
AA
4587 PCI_DMA_TODEVICE);
4588 dev_kfree_skb_any(tx_skb);
46798c89 4589 out:
9589c77a
AA
4590 /* stop engines */
4591 nv_stop_rx(dev);
4592 nv_stop_tx(dev);
4593 nv_txrx_reset(dev);
4594 /* drain rx queue */
4595 nv_drain_rx(dev);
4596 nv_drain_tx(dev);
4597
4598 if (netif_running(dev)) {
4599 writel(misc1_flags, base + NvRegMisc1);
4600 writel(filter_flags, base + NvRegPacketFilterFlags);
4601 nv_enable_irq(dev);
4602 }
4603
4604 return ret;
4605}
4606
4607static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64 *buffer)
4608{
4609 struct fe_priv *np = netdev_priv(dev);
4610 u8 __iomem *base = get_hwbase(dev);
4611 int result;
4612 memset(buffer, 0, nv_self_test_count(dev)*sizeof(u64));
4613
4614 if (!nv_link_test(dev)) {
4615 test->flags |= ETH_TEST_FL_FAILED;
4616 buffer[0] = 1;
4617 }
4618
4619 if (test->flags & ETH_TEST_FL_OFFLINE) {
4620 if (netif_running(dev)) {
4621 netif_stop_queue(dev);
e27cdba5 4622 netif_poll_disable(dev);
58dfd9c1 4623 netif_tx_lock_bh(dev);
9589c77a
AA
4624 spin_lock_irq(&np->lock);
4625 nv_disable_hw_interrupts(dev, np->irqmask);
4626 if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
4627 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
4628 } else {
4629 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
4630 }
4631 /* stop engines */
4632 nv_stop_rx(dev);
4633 nv_stop_tx(dev);
4634 nv_txrx_reset(dev);
4635 /* drain rx queue */
4636 nv_drain_rx(dev);
4637 nv_drain_tx(dev);
4638 spin_unlock_irq(&np->lock);
58dfd9c1 4639 netif_tx_unlock_bh(dev);
9589c77a
AA
4640 }
4641
4642 if (!nv_register_test(dev)) {
4643 test->flags |= ETH_TEST_FL_FAILED;
4644 buffer[1] = 1;
4645 }
4646
4647 result = nv_interrupt_test(dev);
4648 if (result != 1) {
4649 test->flags |= ETH_TEST_FL_FAILED;
4650 buffer[2] = 1;
4651 }
4652 if (result == 0) {
4653 /* bail out */
4654 return;
4655 }
4656
4657 if (!nv_loopback_test(dev)) {
4658 test->flags |= ETH_TEST_FL_FAILED;
4659 buffer[3] = 1;
4660 }
4661
4662 if (netif_running(dev)) {
4663 /* reinit driver view of the rx queue */
4664 set_bufsize(dev);
4665 if (nv_init_ring(dev)) {
4666 if (!np->in_shutdown)
4667 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
4668 }
4669 /* reinit nic view of the rx queue */
4670 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
4671 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
4672 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
4673 base + NvRegRingSizes);
4674 pci_push(base);
4675 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
4676 pci_push(base);
4677 /* restart rx engine */
4678 nv_start_rx(dev);
4679 nv_start_tx(dev);
4680 netif_start_queue(dev);
e27cdba5 4681 netif_poll_enable(dev);
9589c77a
AA
4682 nv_enable_hw_interrupts(dev, np->irqmask);
4683 }
4684 }
4685}
4686
52da3578
AA
4687static void nv_get_strings(struct net_device *dev, u32 stringset, u8 *buffer)
4688{
4689 switch (stringset) {
4690 case ETH_SS_STATS:
4691 memcpy(buffer, &nv_estats_str, nv_get_stats_count(dev)*sizeof(struct nv_ethtool_str));
4692 break;
9589c77a
AA
4693 case ETH_SS_TEST:
4694 memcpy(buffer, &nv_etests_str, nv_self_test_count(dev)*sizeof(struct nv_ethtool_str));
4695 break;
52da3578
AA
4696 }
4697}
4698
7282d491 4699static const struct ethtool_ops ops = {
1da177e4
LT
4700 .get_drvinfo = nv_get_drvinfo,
4701 .get_link = ethtool_op_get_link,
4702 .get_wol = nv_get_wol,
4703 .set_wol = nv_set_wol,
4704 .get_settings = nv_get_settings,
4705 .set_settings = nv_set_settings,
dc8216c1
MS
4706 .get_regs_len = nv_get_regs_len,
4707 .get_regs = nv_get_regs,
4708 .nway_reset = nv_nway_reset,
c704b856 4709 .get_perm_addr = ethtool_op_get_perm_addr,
0674d594 4710 .get_tso = ethtool_op_get_tso,
6a78814f 4711 .set_tso = nv_set_tso,
eafa59f6
AA
4712 .get_ringparam = nv_get_ringparam,
4713 .set_ringparam = nv_set_ringparam,
b6d0773f
AA
4714 .get_pauseparam = nv_get_pauseparam,
4715 .set_pauseparam = nv_set_pauseparam,
5ed2616f
AA
4716 .get_rx_csum = nv_get_rx_csum,
4717 .set_rx_csum = nv_set_rx_csum,
4718 .get_tx_csum = ethtool_op_get_tx_csum,
4719 .set_tx_csum = nv_set_tx_csum,
4720 .get_sg = ethtool_op_get_sg,
4721 .set_sg = nv_set_sg,
52da3578
AA
4722 .get_strings = nv_get_strings,
4723 .get_stats_count = nv_get_stats_count,
4724 .get_ethtool_stats = nv_get_ethtool_stats,
9589c77a
AA
4725 .self_test_count = nv_self_test_count,
4726 .self_test = nv_self_test,
1da177e4
LT
4727};
4728
ee407b02
AA
4729static void nv_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
4730{
4731 struct fe_priv *np = get_nvpriv(dev);
4732
4733 spin_lock_irq(&np->lock);
4734
4735 /* save vlan group */
4736 np->vlangrp = grp;
4737
4738 if (grp) {
4739 /* enable vlan on MAC */
4740 np->txrxctl_bits |= NVREG_TXRXCTL_VLANSTRIP | NVREG_TXRXCTL_VLANINS;
4741 } else {
4742 /* disable vlan on MAC */
4743 np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANSTRIP;
4744 np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANINS;
4745 }
4746
4747 writel(np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
4748
4749 spin_unlock_irq(&np->lock);
25805dcf 4750}
ee407b02 4751
7e680c22
AA
4752/* The mgmt unit and driver use a semaphore to access the phy during init */
4753static int nv_mgmt_acquire_sema(struct net_device *dev)
4754{
4755 u8 __iomem *base = get_hwbase(dev);
4756 int i;
4757 u32 tx_ctrl, mgmt_sema;
4758
4759 for (i = 0; i < 10; i++) {
4760 mgmt_sema = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_SEMA_MASK;
4761 if (mgmt_sema == NVREG_XMITCTL_MGMT_SEMA_FREE)
4762 break;
4763 msleep(500);
4764 }
4765
4766 if (mgmt_sema != NVREG_XMITCTL_MGMT_SEMA_FREE)
4767 return 0;
4768
4769 for (i = 0; i < 2; i++) {
4770 tx_ctrl = readl(base + NvRegTransmitterControl);
4771 tx_ctrl |= NVREG_XMITCTL_HOST_SEMA_ACQ;
4772 writel(tx_ctrl, base + NvRegTransmitterControl);
4773
4774 /* verify that semaphore was acquired */
4775 tx_ctrl = readl(base + NvRegTransmitterControl);
4776 if (((tx_ctrl & NVREG_XMITCTL_HOST_SEMA_MASK) == NVREG_XMITCTL_HOST_SEMA_ACQ) &&
4777 ((tx_ctrl & NVREG_XMITCTL_MGMT_SEMA_MASK) == NVREG_XMITCTL_MGMT_SEMA_FREE))
4778 return 1;
4779 else
4780 udelay(50);
4781 }
4782
4783 return 0;
4784}
4785
1da177e4
LT
4786static int nv_open(struct net_device *dev)
4787{
ac9c1897 4788 struct fe_priv *np = netdev_priv(dev);
1da177e4 4789 u8 __iomem *base = get_hwbase(dev);
d33a73c8
AA
4790 int ret = 1;
4791 int oom, i;
1da177e4
LT
4792
4793 dprintk(KERN_DEBUG "nv_open: begin\n");
4794
f1489653 4795 /* erase previous misconfiguration */
86a0f043
AA
4796 if (np->driver_data & DEV_HAS_POWER_CNTRL)
4797 nv_mac_reset(dev);
1da177e4
LT
4798 writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA);
4799 writel(0, base + NvRegMulticastAddrB);
4800 writel(0, base + NvRegMulticastMaskA);
4801 writel(0, base + NvRegMulticastMaskB);
4802 writel(0, base + NvRegPacketFilterFlags);
4803
4804 writel(0, base + NvRegTransmitterControl);
4805 writel(0, base + NvRegReceiverControl);
4806
4807 writel(0, base + NvRegAdapterControl);
4808
eb91f61b
AA
4809 if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE)
4810 writel(NVREG_TX_PAUSEFRAME_DISABLE, base + NvRegTxPauseFrame);
4811
f1489653 4812 /* initialize descriptor rings */
d81c0983 4813 set_bufsize(dev);
1da177e4
LT
4814 oom = nv_init_ring(dev);
4815
4816 writel(0, base + NvRegLinkSpeed);
5070d340 4817 writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll);
1da177e4
LT
4818 nv_txrx_reset(dev);
4819 writel(0, base + NvRegUnknownSetupReg6);
4820
4821 np->in_shutdown = 0;
4822
f1489653 4823 /* give hw rings */
0832b25a 4824 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
eafa59f6 4825 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
1da177e4
LT
4826 base + NvRegRingSizes);
4827
1da177e4 4828 writel(np->linkspeed, base + NvRegLinkSpeed);
95d161cb
AA
4829 if (np->desc_ver == DESC_VER_1)
4830 writel(NVREG_TX_WM_DESC1_DEFAULT, base + NvRegTxWatermark);
4831 else
4832 writel(NVREG_TX_WM_DESC2_3_DEFAULT, base + NvRegTxWatermark);
8a4ae7f2 4833 writel(np->txrxctl_bits, base + NvRegTxRxControl);
ee407b02 4834 writel(np->vlanctl_bits, base + NvRegVlanControl);
1da177e4 4835 pci_push(base);
8a4ae7f2 4836 writel(NVREG_TXRXCTL_BIT1|np->txrxctl_bits, base + NvRegTxRxControl);
1da177e4
LT
4837 reg_delay(dev, NvRegUnknownSetupReg5, NVREG_UNKSETUP5_BIT31, NVREG_UNKSETUP5_BIT31,
4838 NV_SETUP5_DELAY, NV_SETUP5_DELAYMAX,
4839 KERN_INFO "open: SetupReg5, Bit 31 remained off\n");
4840
7e680c22 4841 writel(0, base + NvRegMIIMask);
1da177e4
LT
4842 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
4843 writel(NVREG_MIISTAT_MASK2, base + NvRegMIIStatus);
4844
1da177e4
LT
4845 writel(NVREG_MISC1_FORCE | NVREG_MISC1_HD, base + NvRegMisc1);
4846 writel(readl(base + NvRegTransmitterStatus), base + NvRegTransmitterStatus);
4847 writel(NVREG_PFF_ALWAYS, base + NvRegPacketFilterFlags);
d81c0983 4848 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
1da177e4
LT
4849
4850 writel(readl(base + NvRegReceiverStatus), base + NvRegReceiverStatus);
4851 get_random_bytes(&i, sizeof(i));
4852 writel(NVREG_RNDSEED_FORCE | (i&NVREG_RNDSEED_MASK), base + NvRegRandomSeed);
9744e218
AA
4853 writel(NVREG_TX_DEFERRAL_DEFAULT, base + NvRegTxDeferral);
4854 writel(NVREG_RX_DEFERRAL_DEFAULT, base + NvRegRxDeferral);
a971c324
AA
4855 if (poll_interval == -1) {
4856 if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT)
4857 writel(NVREG_POLL_DEFAULT_THROUGHPUT, base + NvRegPollingInterval);
4858 else
4859 writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval);
4860 }
4861 else
4862 writel(poll_interval & 0xFFFF, base + NvRegPollingInterval);
1da177e4
LT
4863 writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
4864 writel((np->phyaddr << NVREG_ADAPTCTL_PHYSHIFT)|NVREG_ADAPTCTL_PHYVALID|NVREG_ADAPTCTL_RUNNING,
4865 base + NvRegAdapterControl);
4866 writel(NVREG_MIISPEED_BIT8|NVREG_MIIDELAY, base + NvRegMIISpeed);
7e680c22 4867 writel(NVREG_MII_LINKCHANGE, base + NvRegMIIMask);
c42d9df9
AA
4868 if (np->wolenabled)
4869 writel(NVREG_WAKEUPFLAGS_ENABLE , base + NvRegWakeUpFlags);
1da177e4
LT
4870
4871 i = readl(base + NvRegPowerState);
4872 if ( (i & NVREG_POWERSTATE_POWEREDUP) == 0)
4873 writel(NVREG_POWERSTATE_POWEREDUP|i, base + NvRegPowerState);
4874
4875 pci_push(base);
4876 udelay(10);
4877 writel(readl(base + NvRegPowerState) | NVREG_POWERSTATE_VALID, base + NvRegPowerState);
4878
84b3932b 4879 nv_disable_hw_interrupts(dev, np->irqmask);
1da177e4
LT
4880 pci_push(base);
4881 writel(NVREG_MIISTAT_MASK2, base + NvRegMIIStatus);
4882 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
4883 pci_push(base);
4884
9589c77a 4885 if (nv_request_irq(dev, 0)) {
84b3932b 4886 goto out_drain;
d33a73c8 4887 }
1da177e4
LT
4888
4889 /* ask for interrupts */
84b3932b 4890 nv_enable_hw_interrupts(dev, np->irqmask);
1da177e4
LT
4891
4892 spin_lock_irq(&np->lock);
4893 writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA);
4894 writel(0, base + NvRegMulticastAddrB);
4895 writel(0, base + NvRegMulticastMaskA);
4896 writel(0, base + NvRegMulticastMaskB);
4897 writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags);
4898 /* One manual link speed update: Interrupts are enabled, future link
4899 * speed changes cause interrupts and are handled by nv_link_irq().
4900 */
4901 {
4902 u32 miistat;
4903 miistat = readl(base + NvRegMIIStatus);
4904 writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus);
4905 dprintk(KERN_INFO "startup: got 0x%08x.\n", miistat);
4906 }
1b1b3c9b
MS
4907 /* set linkspeed to invalid value, thus force nv_update_linkspeed
4908 * to init hw */
4909 np->linkspeed = 0;
1da177e4
LT
4910 ret = nv_update_linkspeed(dev);
4911 nv_start_rx(dev);
4912 nv_start_tx(dev);
4913 netif_start_queue(dev);
e27cdba5
SH
4914 netif_poll_enable(dev);
4915
1da177e4
LT
4916 if (ret) {
4917 netif_carrier_on(dev);
4918 } else {
4919 printk("%s: no link during initialization.\n", dev->name);
4920 netif_carrier_off(dev);
4921 }
4922 if (oom)
4923 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
52da3578
AA
4924
4925 /* start statistics timer */
57fff698 4926 if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2))
52da3578
AA
4927 mod_timer(&np->stats_poll, jiffies + STATS_INTERVAL);
4928
1da177e4
LT
4929 spin_unlock_irq(&np->lock);
4930
4931 return 0;
4932out_drain:
4933 drain_ring(dev);
4934 return ret;
4935}
4936
4937static int nv_close(struct net_device *dev)
4938{
ac9c1897 4939 struct fe_priv *np = netdev_priv(dev);
1da177e4
LT
4940 u8 __iomem *base;
4941
4942 spin_lock_irq(&np->lock);
4943 np->in_shutdown = 1;
4944 spin_unlock_irq(&np->lock);
e27cdba5 4945 netif_poll_disable(dev);
1da177e4
LT
4946 synchronize_irq(dev->irq);
4947
4948 del_timer_sync(&np->oom_kick);
4949 del_timer_sync(&np->nic_poll);
52da3578 4950 del_timer_sync(&np->stats_poll);
1da177e4
LT
4951
4952 netif_stop_queue(dev);
4953 spin_lock_irq(&np->lock);
4954 nv_stop_tx(dev);
4955 nv_stop_rx(dev);
4956 nv_txrx_reset(dev);
4957
4958 /* disable interrupts on the nic or we will lock up */
4959 base = get_hwbase(dev);
84b3932b 4960 nv_disable_hw_interrupts(dev, np->irqmask);
1da177e4
LT
4961 pci_push(base);
4962 dprintk(KERN_INFO "%s: Irqmask is zero again\n", dev->name);
4963
4964 spin_unlock_irq(&np->lock);
4965
84b3932b 4966 nv_free_irq(dev);
1da177e4
LT
4967
4968 drain_ring(dev);
4969
2cc49a5c
TM
4970 if (np->wolenabled) {
4971 writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags);
1da177e4 4972 nv_start_rx(dev);
2cc49a5c 4973 }
1da177e4
LT
4974
4975 /* FIXME: power down nic */
4976
4977 return 0;
4978}
4979
4980static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
4981{
4982 struct net_device *dev;
4983 struct fe_priv *np;
4984 unsigned long addr;
4985 u8 __iomem *base;
4986 int err, i;
5070d340 4987 u32 powerstate, txreg;
7e680c22
AA
4988 u32 phystate_orig = 0, phystate;
4989 int phyinitialized = 0;
1da177e4
LT
4990
4991 dev = alloc_etherdev(sizeof(struct fe_priv));
4992 err = -ENOMEM;
4993 if (!dev)
4994 goto out;
4995
ac9c1897 4996 np = netdev_priv(dev);
1da177e4
LT
4997 np->pci_dev = pci_dev;
4998 spin_lock_init(&np->lock);
4999 SET_MODULE_OWNER(dev);
5000 SET_NETDEV_DEV(dev, &pci_dev->dev);
5001
5002 init_timer(&np->oom_kick);
5003 np->oom_kick.data = (unsigned long) dev;
5004 np->oom_kick.function = &nv_do_rx_refill; /* timer handler */
5005 init_timer(&np->nic_poll);
5006 np->nic_poll.data = (unsigned long) dev;
5007 np->nic_poll.function = &nv_do_nic_poll; /* timer handler */
52da3578
AA
5008 init_timer(&np->stats_poll);
5009 np->stats_poll.data = (unsigned long) dev;
5010 np->stats_poll.function = &nv_do_stats_poll; /* timer handler */
1da177e4
LT
5011
5012 err = pci_enable_device(pci_dev);
5013 if (err) {
5014 printk(KERN_INFO "forcedeth: pci_enable_dev failed (%d) for device %s\n",
5015 err, pci_name(pci_dev));
5016 goto out_free;
5017 }
5018
5019 pci_set_master(pci_dev);
5020
5021 err = pci_request_regions(pci_dev, DRV_NAME);
5022 if (err < 0)
5023 goto out_disable;
5024
57fff698
AA
5025 if (id->driver_data & (DEV_HAS_VLAN|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V2))
5026 np->register_size = NV_PCI_REGSZ_VER3;
5027 else if (id->driver_data & DEV_HAS_STATISTICS_V1)
86a0f043
AA
5028 np->register_size = NV_PCI_REGSZ_VER2;
5029 else
5030 np->register_size = NV_PCI_REGSZ_VER1;
5031
1da177e4
LT
5032 err = -EINVAL;
5033 addr = 0;
5034 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
5035 dprintk(KERN_DEBUG "%s: resource %d start %p len %ld flags 0x%08lx.\n",
5036 pci_name(pci_dev), i, (void*)pci_resource_start(pci_dev, i),
5037 pci_resource_len(pci_dev, i),
5038 pci_resource_flags(pci_dev, i));
5039 if (pci_resource_flags(pci_dev, i) & IORESOURCE_MEM &&
86a0f043 5040 pci_resource_len(pci_dev, i) >= np->register_size) {
1da177e4
LT
5041 addr = pci_resource_start(pci_dev, i);
5042 break;
5043 }
5044 }
5045 if (i == DEVICE_COUNT_RESOURCE) {
5046 printk(KERN_INFO "forcedeth: Couldn't find register window for device %s.\n",
5047 pci_name(pci_dev));
5048 goto out_relreg;
5049 }
5050
86a0f043
AA
5051 /* copy of driver data */
5052 np->driver_data = id->driver_data;
5053
1da177e4 5054 /* handle different descriptor versions */
ee73362c
MS
5055 if (id->driver_data & DEV_HAS_HIGH_DMA) {
5056 /* packet format 3: supports 40-bit addressing */
5057 np->desc_ver = DESC_VER_3;
84b3932b 5058 np->txrxctl_bits = NVREG_TXRXCTL_DESC_3;
69fe3fd7
AA
5059 if (dma_64bit) {
5060 if (pci_set_dma_mask(pci_dev, DMA_39BIT_MASK)) {
5061 printk(KERN_INFO "forcedeth: 64-bit DMA failed, using 32-bit addressing for device %s.\n",
5062 pci_name(pci_dev));
5063 } else {
5064 dev->features |= NETIF_F_HIGHDMA;
5065 printk(KERN_INFO "forcedeth: using HIGHDMA\n");
5066 }
5067 if (pci_set_consistent_dma_mask(pci_dev, DMA_39BIT_MASK)) {
5068 printk(KERN_INFO "forcedeth: 64-bit DMA (consistent) failed, using 32-bit ring buffers for device %s.\n",
5069 pci_name(pci_dev));
5070 }
ee73362c
MS
5071 }
5072 } else if (id->driver_data & DEV_HAS_LARGEDESC) {
5073 /* packet format 2: supports jumbo frames */
1da177e4 5074 np->desc_ver = DESC_VER_2;
8a4ae7f2 5075 np->txrxctl_bits = NVREG_TXRXCTL_DESC_2;
ee73362c
MS
5076 } else {
5077 /* original packet format */
5078 np->desc_ver = DESC_VER_1;
8a4ae7f2 5079 np->txrxctl_bits = NVREG_TXRXCTL_DESC_1;
d81c0983 5080 }
ee73362c
MS
5081
5082 np->pkt_limit = NV_PKTLIMIT_1;
5083 if (id->driver_data & DEV_HAS_LARGEDESC)
5084 np->pkt_limit = NV_PKTLIMIT_2;
5085
8a4ae7f2 5086 if (id->driver_data & DEV_HAS_CHECKSUM) {
f2ad2d9b 5087 np->rx_csum = 1;
8a4ae7f2 5088 np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
ac9c1897 5089 dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
fa45459e 5090 dev->features |= NETIF_F_TSO;
21828163 5091 }
8a4ae7f2 5092
ee407b02
AA
5093 np->vlanctl_bits = 0;
5094 if (id->driver_data & DEV_HAS_VLAN) {
5095 np->vlanctl_bits = NVREG_VLANCONTROL_ENABLE;
5096 dev->features |= NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX;
5097 dev->vlan_rx_register = nv_vlan_rx_register;
ee407b02
AA
5098 }
5099
d33a73c8 5100 np->msi_flags = 0;
69fe3fd7 5101 if ((id->driver_data & DEV_HAS_MSI) && msi) {
d33a73c8
AA
5102 np->msi_flags |= NV_MSI_CAPABLE;
5103 }
69fe3fd7 5104 if ((id->driver_data & DEV_HAS_MSI_X) && msix) {
d33a73c8
AA
5105 np->msi_flags |= NV_MSI_X_CAPABLE;
5106 }
5107
b6d0773f 5108 np->pause_flags = NV_PAUSEFRAME_RX_CAPABLE | NV_PAUSEFRAME_RX_REQ | NV_PAUSEFRAME_AUTONEG;
eb91f61b 5109 if (id->driver_data & DEV_HAS_PAUSEFRAME_TX) {
b6d0773f 5110 np->pause_flags |= NV_PAUSEFRAME_TX_CAPABLE | NV_PAUSEFRAME_TX_REQ;
eb91f61b 5111 }
f3b197ac 5112
eb91f61b 5113
1da177e4 5114 err = -ENOMEM;
86a0f043 5115 np->base = ioremap(addr, np->register_size);
1da177e4
LT
5116 if (!np->base)
5117 goto out_relreg;
5118 dev->base_addr = (unsigned long)np->base;
ee73362c 5119
1da177e4 5120 dev->irq = pci_dev->irq;
ee73362c 5121
eafa59f6
AA
5122 np->rx_ring_size = RX_RING_DEFAULT;
5123 np->tx_ring_size = TX_RING_DEFAULT;
eafa59f6 5124
ee73362c
MS
5125 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
5126 np->rx_ring.orig = pci_alloc_consistent(pci_dev,
eafa59f6 5127 sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size),
ee73362c
MS
5128 &np->ring_addr);
5129 if (!np->rx_ring.orig)
5130 goto out_unmap;
eafa59f6 5131 np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size];
ee73362c
MS
5132 } else {
5133 np->rx_ring.ex = pci_alloc_consistent(pci_dev,
eafa59f6 5134 sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size),
ee73362c
MS
5135 &np->ring_addr);
5136 if (!np->rx_ring.ex)
5137 goto out_unmap;
eafa59f6
AA
5138 np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size];
5139 }
dd00cc48
YP
5140 np->rx_skb = kcalloc(np->rx_ring_size, sizeof(struct nv_skb_map), GFP_KERNEL);
5141 np->tx_skb = kcalloc(np->tx_ring_size, sizeof(struct nv_skb_map), GFP_KERNEL);
761fcd9e 5142 if (!np->rx_skb || !np->tx_skb)
eafa59f6 5143 goto out_freering;
1da177e4
LT
5144
5145 dev->open = nv_open;
5146 dev->stop = nv_close;
86b22b0d
AA
5147 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
5148 dev->hard_start_xmit = nv_start_xmit;
5149 else
5150 dev->hard_start_xmit = nv_start_xmit_optimized;
1da177e4
LT
5151 dev->get_stats = nv_get_stats;
5152 dev->change_mtu = nv_change_mtu;
72b31782 5153 dev->set_mac_address = nv_set_mac_address;
1da177e4 5154 dev->set_multicast_list = nv_set_multicast;
2918c35d
MS
5155#ifdef CONFIG_NET_POLL_CONTROLLER
5156 dev->poll_controller = nv_poll_controller;
e27cdba5 5157#endif
f0734ab6 5158 dev->weight = RX_WORK_PER_LOOP;
e27cdba5
SH
5159#ifdef CONFIG_FORCEDETH_NAPI
5160 dev->poll = nv_napi_poll;
2918c35d 5161#endif
1da177e4
LT
5162 SET_ETHTOOL_OPS(dev, &ops);
5163 dev->tx_timeout = nv_tx_timeout;
5164 dev->watchdog_timeo = NV_WATCHDOG_TIMEO;
5165
5166 pci_set_drvdata(pci_dev, dev);
5167
5168 /* read the mac address */
5169 base = get_hwbase(dev);
5170 np->orig_mac[0] = readl(base + NvRegMacAddrA);
5171 np->orig_mac[1] = readl(base + NvRegMacAddrB);
5172
5070d340
AA
5173 /* check the workaround bit for correct mac address order */
5174 txreg = readl(base + NvRegTransmitPoll);
5175 if (txreg & NVREG_TRANSMITPOLL_MAC_ADDR_REV) {
5176 /* mac address is already in correct order */
5177 dev->dev_addr[0] = (np->orig_mac[0] >> 0) & 0xff;
5178 dev->dev_addr[1] = (np->orig_mac[0] >> 8) & 0xff;
5179 dev->dev_addr[2] = (np->orig_mac[0] >> 16) & 0xff;
5180 dev->dev_addr[3] = (np->orig_mac[0] >> 24) & 0xff;
5181 dev->dev_addr[4] = (np->orig_mac[1] >> 0) & 0xff;
5182 dev->dev_addr[5] = (np->orig_mac[1] >> 8) & 0xff;
5183 } else {
5184 /* need to reverse mac address to correct order */
5185 dev->dev_addr[0] = (np->orig_mac[1] >> 8) & 0xff;
5186 dev->dev_addr[1] = (np->orig_mac[1] >> 0) & 0xff;
5187 dev->dev_addr[2] = (np->orig_mac[0] >> 24) & 0xff;
5188 dev->dev_addr[3] = (np->orig_mac[0] >> 16) & 0xff;
5189 dev->dev_addr[4] = (np->orig_mac[0] >> 8) & 0xff;
5190 dev->dev_addr[5] = (np->orig_mac[0] >> 0) & 0xff;
5191 /* set permanent address to be correct aswell */
5192 np->orig_mac[0] = (dev->dev_addr[0] << 0) + (dev->dev_addr[1] << 8) +
5193 (dev->dev_addr[2] << 16) + (dev->dev_addr[3] << 24);
5194 np->orig_mac[1] = (dev->dev_addr[4] << 0) + (dev->dev_addr[5] << 8);
5195 writel(txreg|NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll);
5196 }
c704b856 5197 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
1da177e4 5198
c704b856 5199 if (!is_valid_ether_addr(dev->perm_addr)) {
1da177e4
LT
5200 /*
5201 * Bad mac address. At least one bios sets the mac address
5202 * to 01:23:45:67:89:ab
5203 */
5204 printk(KERN_ERR "%s: Invalid Mac address detected: %02x:%02x:%02x:%02x:%02x:%02x\n",
5205 pci_name(pci_dev),
5206 dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
5207 dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
5208 printk(KERN_ERR "Please complain to your hardware vendor. Switching to a random MAC.\n");
5209 dev->dev_addr[0] = 0x00;
5210 dev->dev_addr[1] = 0x00;
5211 dev->dev_addr[2] = 0x6c;
5212 get_random_bytes(&dev->dev_addr[3], 3);
5213 }
5214
5215 dprintk(KERN_DEBUG "%s: MAC Address %02x:%02x:%02x:%02x:%02x:%02x\n", pci_name(pci_dev),
5216 dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
5217 dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
5218
f1489653
AA
5219 /* set mac address */
5220 nv_copy_mac_to_hw(dev);
5221
1da177e4
LT
5222 /* disable WOL */
5223 writel(0, base + NvRegWakeUpFlags);
5224 np->wolenabled = 0;
5225
86a0f043 5226 if (id->driver_data & DEV_HAS_POWER_CNTRL) {
86a0f043
AA
5227
5228 /* take phy and nic out of low power mode */
5229 powerstate = readl(base + NvRegPowerState2);
5230 powerstate &= ~NVREG_POWERSTATE2_POWERUP_MASK;
5231 if ((id->device == PCI_DEVICE_ID_NVIDIA_NVENET_12 ||
5232 id->device == PCI_DEVICE_ID_NVIDIA_NVENET_13) &&
44c10138 5233 pci_dev->revision >= 0xA3)
86a0f043
AA
5234 powerstate |= NVREG_POWERSTATE2_POWERUP_REV_A3;
5235 writel(powerstate, base + NvRegPowerState2);
5236 }
5237
1da177e4 5238 if (np->desc_ver == DESC_VER_1) {
ac9c1897 5239 np->tx_flags = NV_TX_VALID;
1da177e4 5240 } else {
ac9c1897 5241 np->tx_flags = NV_TX2_VALID;
1da177e4 5242 }
d33a73c8 5243 if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT) {
a971c324 5244 np->irqmask = NVREG_IRQMASK_THROUGHPUT;
d33a73c8
AA
5245 if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */
5246 np->msi_flags |= 0x0003;
5247 } else {
a971c324 5248 np->irqmask = NVREG_IRQMASK_CPU;
d33a73c8
AA
5249 if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */
5250 np->msi_flags |= 0x0001;
5251 }
a971c324 5252
1da177e4
LT
5253 if (id->driver_data & DEV_NEED_TIMERIRQ)
5254 np->irqmask |= NVREG_IRQ_TIMER;
5255 if (id->driver_data & DEV_NEED_LINKTIMER) {
5256 dprintk(KERN_INFO "%s: link timer on.\n", pci_name(pci_dev));
5257 np->need_linktimer = 1;
5258 np->link_timeout = jiffies + LINK_TIMEOUT;
5259 } else {
5260 dprintk(KERN_INFO "%s: link timer off.\n", pci_name(pci_dev));
5261 np->need_linktimer = 0;
5262 }
5263
7e680c22
AA
5264 /* clear phy state and temporarily halt phy interrupts */
5265 writel(0, base + NvRegMIIMask);
5266 phystate = readl(base + NvRegAdapterControl);
5267 if (phystate & NVREG_ADAPTCTL_RUNNING) {
5268 phystate_orig = 1;
5269 phystate &= ~NVREG_ADAPTCTL_RUNNING;
5270 writel(phystate, base + NvRegAdapterControl);
5271 }
5272 writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus);
5273
5274 if (id->driver_data & DEV_HAS_MGMT_UNIT) {
7e680c22 5275 /* management unit running on the mac? */
f35723ec
AA
5276 if (readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_PHY_INIT) {
5277 np->mac_in_use = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_ST;
5278 dprintk(KERN_INFO "%s: mgmt unit is running. mac in use %x.\n", pci_name(pci_dev), np->mac_in_use);
5279 for (i = 0; i < 5000; i++) {
5280 msleep(1);
5281 if (nv_mgmt_acquire_sema(dev)) {
5282 /* management unit setup the phy already? */
5283 if ((readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_MASK) ==
5284 NVREG_XMITCTL_SYNC_PHY_INIT) {
5285 /* phy is inited by mgmt unit */
5286 phyinitialized = 1;
5287 dprintk(KERN_INFO "%s: Phy already initialized by mgmt unit.\n", pci_name(pci_dev));
5288 } else {
5289 /* we need to init the phy */
7e680c22 5290 }
f35723ec 5291 break;
7e680c22 5292 }
7e680c22
AA
5293 }
5294 }
5295 }
5296
1da177e4 5297 /* find a suitable phy */
7a33e45a 5298 for (i = 1; i <= 32; i++) {
1da177e4 5299 int id1, id2;
7a33e45a 5300 int phyaddr = i & 0x1F;
1da177e4
LT
5301
5302 spin_lock_irq(&np->lock);
7a33e45a 5303 id1 = mii_rw(dev, phyaddr, MII_PHYSID1, MII_READ);
1da177e4
LT
5304 spin_unlock_irq(&np->lock);
5305 if (id1 < 0 || id1 == 0xffff)
5306 continue;
5307 spin_lock_irq(&np->lock);
7a33e45a 5308 id2 = mii_rw(dev, phyaddr, MII_PHYSID2, MII_READ);
1da177e4
LT
5309 spin_unlock_irq(&np->lock);
5310 if (id2 < 0 || id2 == 0xffff)
5311 continue;
5312
edf7e5ec 5313 np->phy_model = id2 & PHYID2_MODEL_MASK;
1da177e4
LT
5314 id1 = (id1 & PHYID1_OUI_MASK) << PHYID1_OUI_SHFT;
5315 id2 = (id2 & PHYID2_OUI_MASK) >> PHYID2_OUI_SHFT;
5316 dprintk(KERN_DEBUG "%s: open: Found PHY %04x:%04x at address %d.\n",
7a33e45a
AA
5317 pci_name(pci_dev), id1, id2, phyaddr);
5318 np->phyaddr = phyaddr;
1da177e4
LT
5319 np->phy_oui = id1 | id2;
5320 break;
5321 }
7a33e45a 5322 if (i == 33) {
1da177e4 5323 printk(KERN_INFO "%s: open: Could not find a valid PHY.\n",
7a33e45a 5324 pci_name(pci_dev));
eafa59f6 5325 goto out_error;
1da177e4 5326 }
f3b197ac 5327
7e680c22
AA
5328 if (!phyinitialized) {
5329 /* reset it */
5330 phy_init(dev);
f35723ec
AA
5331 } else {
5332 /* see if it is a gigabit phy */
5333 u32 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
5334 if (mii_status & PHY_GIGABIT) {
5335 np->gigabit = PHY_GIGABIT;
5336 }
7e680c22 5337 }
1da177e4
LT
5338
5339 /* set default link speed settings */
5340 np->linkspeed = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
5341 np->duplex = 0;
5342 np->autoneg = 1;
5343
5344 err = register_netdev(dev);
5345 if (err) {
5346 printk(KERN_INFO "forcedeth: unable to register netdev: %d\n", err);
eafa59f6 5347 goto out_error;
1da177e4
LT
5348 }
5349 printk(KERN_INFO "%s: forcedeth.c: subsystem: %05x:%04x bound to %s\n",
5350 dev->name, pci_dev->subsystem_vendor, pci_dev->subsystem_device,
5351 pci_name(pci_dev));
5352
5353 return 0;
5354
eafa59f6 5355out_error:
7e680c22
AA
5356 if (phystate_orig)
5357 writel(phystate|NVREG_ADAPTCTL_RUNNING, base + NvRegAdapterControl);
1da177e4 5358 pci_set_drvdata(pci_dev, NULL);
eafa59f6
AA
5359out_freering:
5360 free_rings(dev);
1da177e4
LT
5361out_unmap:
5362 iounmap(get_hwbase(dev));
5363out_relreg:
5364 pci_release_regions(pci_dev);
5365out_disable:
5366 pci_disable_device(pci_dev);
5367out_free:
5368 free_netdev(dev);
5369out:
5370 return err;
5371}
5372
5373static void __devexit nv_remove(struct pci_dev *pci_dev)
5374{
5375 struct net_device *dev = pci_get_drvdata(pci_dev);
f1489653
AA
5376 struct fe_priv *np = netdev_priv(dev);
5377 u8 __iomem *base = get_hwbase(dev);
1da177e4
LT
5378
5379 unregister_netdev(dev);
5380
f1489653
AA
5381 /* special op: write back the misordered MAC address - otherwise
5382 * the next nv_probe would see a wrong address.
5383 */
5384 writel(np->orig_mac[0], base + NvRegMacAddrA);
5385 writel(np->orig_mac[1], base + NvRegMacAddrB);
5386
1da177e4 5387 /* free all structures */
eafa59f6 5388 free_rings(dev);
1da177e4
LT
5389 iounmap(get_hwbase(dev));
5390 pci_release_regions(pci_dev);
5391 pci_disable_device(pci_dev);
5392 free_netdev(dev);
5393 pci_set_drvdata(pci_dev, NULL);
5394}
5395
a189317f
FR
5396#ifdef CONFIG_PM
5397static int nv_suspend(struct pci_dev *pdev, pm_message_t state)
5398{
5399 struct net_device *dev = pci_get_drvdata(pdev);
5400 struct fe_priv *np = netdev_priv(dev);
5401
5402 if (!netif_running(dev))
5403 goto out;
5404
5405 netif_device_detach(dev);
5406
5407 // Gross.
5408 nv_close(dev);
5409
5410 pci_save_state(pdev);
5411 pci_enable_wake(pdev, pci_choose_state(pdev, state), np->wolenabled);
5412 pci_set_power_state(pdev, pci_choose_state(pdev, state));
5413out:
5414 return 0;
5415}
5416
5417static int nv_resume(struct pci_dev *pdev)
5418{
5419 struct net_device *dev = pci_get_drvdata(pdev);
5420 int rc = 0;
5421
5422 if (!netif_running(dev))
5423 goto out;
5424
5425 netif_device_attach(dev);
5426
5427 pci_set_power_state(pdev, PCI_D0);
5428 pci_restore_state(pdev);
5429 pci_enable_wake(pdev, PCI_D0, 0);
5430
5431 rc = nv_open(dev);
5432out:
5433 return rc;
5434}
5435#else
5436#define nv_suspend NULL
5437#define nv_resume NULL
5438#endif /* CONFIG_PM */
5439
1da177e4
LT
5440static struct pci_device_id pci_tbl[] = {
5441 { /* nForce Ethernet Controller */
dc8216c1 5442 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_1),
c2dba06d 5443 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
1da177e4
LT
5444 },
5445 { /* nForce2 Ethernet Controller */
dc8216c1 5446 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_2),
c2dba06d 5447 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
1da177e4
LT
5448 },
5449 { /* nForce3 Ethernet Controller */
dc8216c1 5450 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_3),
c2dba06d 5451 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
1da177e4
LT
5452 },
5453 { /* nForce3 Ethernet Controller */
dc8216c1 5454 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_4),
8a4ae7f2 5455 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
1da177e4
LT
5456 },
5457 { /* nForce3 Ethernet Controller */
dc8216c1 5458 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_5),
8a4ae7f2 5459 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
1da177e4
LT
5460 },
5461 { /* nForce3 Ethernet Controller */
dc8216c1 5462 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_6),
8a4ae7f2 5463 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
1da177e4
LT
5464 },
5465 { /* nForce3 Ethernet Controller */
dc8216c1 5466 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_7),
8a4ae7f2 5467 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
1da177e4
LT
5468 },
5469 { /* CK804 Ethernet Controller */
dc8216c1 5470 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_8),
57fff698 5471 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1,
1da177e4
LT
5472 },
5473 { /* CK804 Ethernet Controller */
dc8216c1 5474 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_9),
57fff698 5475 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1,
1da177e4
LT
5476 },
5477 { /* MCP04 Ethernet Controller */
dc8216c1 5478 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_10),
57fff698 5479 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1,
1da177e4
LT
5480 },
5481 { /* MCP04 Ethernet Controller */
dc8216c1 5482 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_11),
57fff698 5483 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1,
1da177e4 5484 },
9992d4aa 5485 { /* MCP51 Ethernet Controller */
dc8216c1 5486 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_12),
57fff698 5487 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V1,
9992d4aa
MS
5488 },
5489 { /* MCP51 Ethernet Controller */
dc8216c1 5490 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_13),
57fff698 5491 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V1,
9992d4aa 5492 },
f49d16ef 5493 { /* MCP55 Ethernet Controller */
dc8216c1 5494 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_14),
57fff698 5495 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
f49d16ef
MS
5496 },
5497 { /* MCP55 Ethernet Controller */
dc8216c1 5498 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_15),
57fff698 5499 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
f49d16ef 5500 },
c99ce7ee
AA
5501 { /* MCP61 Ethernet Controller */
5502 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_16),
57fff698 5503 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
c99ce7ee
AA
5504 },
5505 { /* MCP61 Ethernet Controller */
5506 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_17),
57fff698 5507 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
c99ce7ee
AA
5508 },
5509 { /* MCP61 Ethernet Controller */
5510 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_18),
57fff698 5511 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
c99ce7ee
AA
5512 },
5513 { /* MCP61 Ethernet Controller */
5514 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_19),
57fff698 5515 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
c99ce7ee
AA
5516 },
5517 { /* MCP65 Ethernet Controller */
5518 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_20),
6fedae1f 5519 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
c99ce7ee
AA
5520 },
5521 { /* MCP65 Ethernet Controller */
5522 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_21),
6fedae1f 5523 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
c99ce7ee
AA
5524 },
5525 { /* MCP65 Ethernet Controller */
5526 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_22),
6fedae1f 5527 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
c99ce7ee
AA
5528 },
5529 { /* MCP65 Ethernet Controller */
5530 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_23),
6fedae1f 5531 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
c99ce7ee 5532 },
f4344848
AA
5533 { /* MCP67 Ethernet Controller */
5534 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_24),
57fff698 5535 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
f4344848
AA
5536 },
5537 { /* MCP67 Ethernet Controller */
5538 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_25),
57fff698 5539 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
f4344848
AA
5540 },
5541 { /* MCP67 Ethernet Controller */
5542 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_26),
57fff698 5543 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
f4344848
AA
5544 },
5545 { /* MCP67 Ethernet Controller */
5546 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_27),
57fff698 5547 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
f4344848 5548 },
1da177e4
LT
5549 {0,},
5550};
5551
5552static struct pci_driver driver = {
5553 .name = "forcedeth",
5554 .id_table = pci_tbl,
5555 .probe = nv_probe,
5556 .remove = __devexit_p(nv_remove),
a189317f
FR
5557 .suspend = nv_suspend,
5558 .resume = nv_resume,
1da177e4
LT
5559};
5560
1da177e4
LT
5561static int __init init_nic(void)
5562{
5563 printk(KERN_INFO "forcedeth.c: Reverse Engineered nForce ethernet driver. Version %s.\n", FORCEDETH_VERSION);
29917620 5564 return pci_register_driver(&driver);
1da177e4
LT
5565}
5566
5567static void __exit exit_nic(void)
5568{
5569 pci_unregister_driver(&driver);
5570}
5571
5572module_param(max_interrupt_work, int, 0);
5573MODULE_PARM_DESC(max_interrupt_work, "forcedeth maximum events handled per interrupt");
a971c324
AA
5574module_param(optimization_mode, int, 0);
5575MODULE_PARM_DESC(optimization_mode, "In throughput mode (0), every tx & rx packet will generate an interrupt. In CPU mode (1), interrupts are controlled by a timer.");
5576module_param(poll_interval, int, 0);
5577MODULE_PARM_DESC(poll_interval, "Interval determines how frequent timer interrupt is generated by [(time_in_micro_secs * 100) / (2^10)]. Min is 0 and Max is 65535.");
69fe3fd7
AA
5578module_param(msi, int, 0);
5579MODULE_PARM_DESC(msi, "MSI interrupts are enabled by setting to 1 and disabled by setting to 0.");
5580module_param(msix, int, 0);
5581MODULE_PARM_DESC(msix, "MSIX interrupts are enabled by setting to 1 and disabled by setting to 0.");
5582module_param(dma_64bit, int, 0);
5583MODULE_PARM_DESC(dma_64bit, "High DMA is enabled by setting to 1 and disabled by setting to 0.");
1da177e4
LT
5584
5585MODULE_AUTHOR("Manfred Spraul <manfred@colorfullife.com>");
5586MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver");
5587MODULE_LICENSE("GPL");
5588
5589MODULE_DEVICE_TABLE(pci, pci_tbl);
5590
5591module_init(init_nic);
5592module_exit(exit_nic);