2 * forcedeth: Ethernet driver for NVIDIA nForce media access controllers.
4 * Note: This driver is a cleanroom reimplementation based on reverse
5 * engineered documentation written by Carl-Daniel Hailfinger
6 * and Andrew de Quincey.
8 * NVIDIA, nForce and other NVIDIA marks are trademarks or registered
9 * trademarks of NVIDIA Corporation in the United States and other
12 * Copyright (C) 2003,4,5 Manfred Spraul
13 * Copyright (C) 2004 Andrew de Quincey (wol support)
14 * Copyright (C) 2004 Carl-Daniel Hailfinger (invalid MAC handling, insane
15 * IRQ rate fixes, bigendian fixes, cleanups, verification)
16 * Copyright (c) 2004,5,6 NVIDIA Corporation
18 * This program is free software; you can redistribute it and/or modify
19 * it under the terms of the GNU General Public License as published by
20 * the Free Software Foundation; either version 2 of the License, or
21 * (at your option) any later version.
23 * This program is distributed in the hope that it will be useful,
24 * but WITHOUT ANY WARRANTY; without even the implied warranty of
25 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
26 * GNU General Public License for more details.
28 * You should have received a copy of the GNU General Public License
29 * along with this program; if not, write to the Free Software
30 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
33 * 0.01: 05 Oct 2003: First release that compiles without warnings.
34 * 0.02: 05 Oct 2003: Fix bug for nv_drain_tx: do not try to free NULL skbs.
35 * Check all PCI BARs for the register window.
36 * udelay added to mii_rw.
37 * 0.03: 06 Oct 2003: Initialize dev->irq.
38 * 0.04: 07 Oct 2003: Initialize np->lock, reduce handled irqs, add printks.
39 * 0.05: 09 Oct 2003: printk removed again, irq status print tx_timeout.
40 * 0.06: 10 Oct 2003: MAC Address read updated, pff flag generation updated,
42 * 0.07: 14 Oct 2003: Further irq mask updates.
43 * 0.08: 20 Oct 2003: rx_desc.Length initialization added, nv_alloc_rx refill
44 * added into irq handler, NULL check for drain_ring.
45 * 0.09: 20 Oct 2003: Basic link speed irq implementation. Only handle the
46 * requested interrupt sources.
47 * 0.10: 20 Oct 2003: First cleanup for release.
48 * 0.11: 21 Oct 2003: hexdump for tx added, rx buffer sizes increased.
49 * MAC Address init fix, set_multicast cleanup.
50 * 0.12: 23 Oct 2003: Cleanups for release.
51 * 0.13: 25 Oct 2003: Limit for concurrent tx packets increased to 10.
52 * Set link speed correctly. start rx before starting
53 * tx (nv_start_rx sets the link speed).
54 * 0.14: 25 Oct 2003: Nic dependant irq mask.
55 * 0.15: 08 Nov 2003: fix smp deadlock with set_multicast_list during
57 * 0.16: 15 Nov 2003: include file cleanup for ppc64, rx buffer size
58 * increased to 1628 bytes.
59 * 0.17: 16 Nov 2003: undo rx buffer size increase. Substract 1 from
61 * 0.18: 17 Nov 2003: fix oops due to late initialization of dev_stats
62 * 0.19: 29 Nov 2003: Handle RxNoBuf, detect & handle invalid mac
63 * addresses, really stop rx if already running
64 * in nv_start_rx, clean up a bit.
65 * 0.20: 07 Dec 2003: alloc fixes
66 * 0.21: 12 Jan 2004: additional alloc fix, nic polling fix.
67 * 0.22: 19 Jan 2004: reprogram timer to a sane rate, avoid lockup
69 * 0.23: 26 Jan 2004: various small cleanups
70 * 0.24: 27 Feb 2004: make driver even less anonymous in backtraces
71 * 0.25: 09 Mar 2004: wol support
72 * 0.26: 03 Jun 2004: netdriver specific annotation, sparse-related fixes
73 * 0.27: 19 Jun 2004: Gigabit support, new descriptor rings,
74 * added CK804/MCP04 device IDs, code fixes
75 * for registers, link status and other minor fixes.
76 * 0.28: 21 Jun 2004: Big cleanup, making driver mostly endian safe
77 * 0.29: 31 Aug 2004: Add backup timer for link change notification.
78 * 0.30: 25 Sep 2004: rx checksum support for nf 250 Gb. Add rx reset
79 * into nv_close, otherwise reenabling for wol can
80 * cause DMA to kfree'd memory.
81 * 0.31: 14 Nov 2004: ethtool support for getting/setting link
83 * 0.32: 16 Apr 2005: RX_ERROR4 handling added.
84 * 0.33: 16 May 2005: Support for MCP51 added.
85 * 0.34: 18 Jun 2005: Add DEV_NEED_LINKTIMER to all nForce nics.
86 * 0.35: 26 Jun 2005: Support for MCP55 added.
87 * 0.36: 28 Jun 2005: Add jumbo frame support.
88 * 0.37: 10 Jul 2005: Additional ethtool support, cleanup of pci id list
89 * 0.38: 16 Jul 2005: tx irq rewrite: Use global flags instead of
91 * 0.39: 18 Jul 2005: Add 64bit descriptor support.
92 * 0.40: 19 Jul 2005: Add support for mac address change.
93 * 0.41: 30 Jul 2005: Write back original MAC in nv_close instead
95 * 0.42: 06 Aug 2005: Fix lack of link speed initialization
96 * in the second (and later) nv_open call
97 * 0.43: 10 Aug 2005: Add support for tx checksum.
98 * 0.44: 20 Aug 2005: Add support for scatter gather and segmentation.
99 * 0.45: 18 Sep 2005: Remove nv_stop/start_rx from every link check
100 * 0.46: 20 Oct 2005: Add irq optimization modes.
101 * 0.47: 26 Oct 2005: Add phyaddr 0 in phy scan.
102 * 0.48: 24 Dec 2005: Disable TSO, bugfix for pci_map_single
103 * 0.49: 10 Dec 2005: Fix tso for large buffers.
104 * 0.50: 20 Jan 2006: Add 8021pq tagging support.
105 * 0.51: 20 Jan 2006: Add 64bit consistent memory allocation for rings.
106 * 0.52: 20 Jan 2006: Add MSI/MSIX support.
107 * 0.53: 19 Mar 2006: Fix init from low power mode and add hw reset.
108 * 0.54: 21 Mar 2006: Fix spin locks for multi irqs and cleanup.
109 * 0.55: 22 Mar 2006: Add flow control (pause frame).
110 * 0.56: 22 Mar 2006: Additional ethtool config and moduleparam support.
111 * 0.57: 14 May 2006: Mac address set in probe/remove and order corrections.
112 * 0.58: 30 Oct 2006: Added support for sideband management unit.
113 * 0.59: 30 Oct 2006: Added support for recoverable error.
114 * 0.60: 20 Jan 2007: Code optimizations for rings, rx & tx data paths, and stats.
117 * We suspect that on some hardware no TX done interrupts are generated.
118 * This means recovery from netif_stop_queue only happens if the hw timer
119 * interrupt fires (100 times/second, configurable with NVREG_POLL_DEFAULT)
120 * and the timer is active in the IRQMask, or if a rx packet arrives by chance.
121 * If your hardware reliably generates tx done interrupts, then you can remove
122 * DEV_NEED_TIMERIRQ from the driver_data flags.
123 * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few
124 * superfluous timer interrupts from the nic.
126 #ifdef CONFIG_FORCEDETH_NAPI
127 #define DRIVERNAPI "-NAPI"
131 #define FORCEDETH_VERSION "0.60"
132 #define DRV_NAME "forcedeth"
134 #include <linux/module.h>
135 #include <linux/types.h>
136 #include <linux/pci.h>
137 #include <linux/interrupt.h>
138 #include <linux/netdevice.h>
139 #include <linux/etherdevice.h>
140 #include <linux/delay.h>
141 #include <linux/spinlock.h>
142 #include <linux/ethtool.h>
143 #include <linux/timer.h>
144 #include <linux/skbuff.h>
145 #include <linux/mii.h>
146 #include <linux/random.h>
147 #include <linux/init.h>
148 #include <linux/if_vlan.h>
149 #include <linux/dma-mapping.h>
153 #include <asm/uaccess.h>
154 #include <asm/system.h>
157 #define dprintk printk
159 #define dprintk(x...) do { } while (0)
167 #define DEV_NEED_TIMERIRQ 0x0001 /* set the timer irq flag in the irq mask */
168 #define DEV_NEED_LINKTIMER 0x0002 /* poll link settings. Relies on the timer irq */
169 #define DEV_HAS_LARGEDESC 0x0004 /* device supports jumbo frames and needs packet format 2 */
170 #define DEV_HAS_HIGH_DMA 0x0008 /* device supports 64bit dma */
171 #define DEV_HAS_CHECKSUM 0x0010 /* device supports tx and rx checksum offloads */
172 #define DEV_HAS_VLAN 0x0020 /* device supports vlan tagging and striping */
173 #define DEV_HAS_MSI 0x0040 /* device supports MSI */
174 #define DEV_HAS_MSI_X 0x0080 /* device supports MSI-X */
175 #define DEV_HAS_POWER_CNTRL 0x0100 /* device supports power savings */
176 #define DEV_HAS_PAUSEFRAME_TX 0x0200 /* device supports tx pause frames */
177 #define DEV_HAS_STATISTICS_V1 0x0400 /* device supports hw statistics version 1 */
178 #define DEV_HAS_STATISTICS_V2 0x0800 /* device supports hw statistics version 2 */
179 #define DEV_HAS_TEST_EXTENDED 0x1000 /* device supports extended diagnostic test */
180 #define DEV_HAS_MGMT_UNIT 0x2000 /* device supports management unit */
183 NvRegIrqStatus
= 0x000,
184 #define NVREG_IRQSTAT_MIIEVENT 0x040
185 #define NVREG_IRQSTAT_MASK 0x81ff
186 NvRegIrqMask
= 0x004,
187 #define NVREG_IRQ_RX_ERROR 0x0001
188 #define NVREG_IRQ_RX 0x0002
189 #define NVREG_IRQ_RX_NOBUF 0x0004
190 #define NVREG_IRQ_TX_ERR 0x0008
191 #define NVREG_IRQ_TX_OK 0x0010
192 #define NVREG_IRQ_TIMER 0x0020
193 #define NVREG_IRQ_LINK 0x0040
194 #define NVREG_IRQ_RX_FORCED 0x0080
195 #define NVREG_IRQ_TX_FORCED 0x0100
196 #define NVREG_IRQ_RECOVER_ERROR 0x8000
197 #define NVREG_IRQMASK_THROUGHPUT 0x00df
198 #define NVREG_IRQMASK_CPU 0x0060
199 #define NVREG_IRQ_TX_ALL (NVREG_IRQ_TX_ERR|NVREG_IRQ_TX_OK|NVREG_IRQ_TX_FORCED)
200 #define NVREG_IRQ_RX_ALL (NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_RX_FORCED)
201 #define NVREG_IRQ_OTHER (NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_RECOVER_ERROR)
203 #define NVREG_IRQ_UNKNOWN (~(NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_TX_ERR| \
204 NVREG_IRQ_TX_OK|NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_RX_FORCED| \
205 NVREG_IRQ_TX_FORCED|NVREG_IRQ_RECOVER_ERROR))
207 NvRegUnknownSetupReg6
= 0x008,
208 #define NVREG_UNKSETUP6_VAL 3
211 * NVREG_POLL_DEFAULT is the interval length of the timer source on the nic
212 * NVREG_POLL_DEFAULT=97 would result in an interval length of 1 ms
214 NvRegPollingInterval
= 0x00c,
215 #define NVREG_POLL_DEFAULT_THROUGHPUT 970 /* backup tx cleanup if loop max reached */
216 #define NVREG_POLL_DEFAULT_CPU 13
217 NvRegMSIMap0
= 0x020,
218 NvRegMSIMap1
= 0x024,
219 NvRegMSIIrqMask
= 0x030,
220 #define NVREG_MSI_VECTOR_0_ENABLED 0x01
222 #define NVREG_MISC1_PAUSE_TX 0x01
223 #define NVREG_MISC1_HD 0x02
224 #define NVREG_MISC1_FORCE 0x3b0f3c
226 NvRegMacReset
= 0x3c,
227 #define NVREG_MAC_RESET_ASSERT 0x0F3
228 NvRegTransmitterControl
= 0x084,
229 #define NVREG_XMITCTL_START 0x01
230 #define NVREG_XMITCTL_MGMT_ST 0x40000000
231 #define NVREG_XMITCTL_SYNC_MASK 0x000f0000
232 #define NVREG_XMITCTL_SYNC_NOT_READY 0x0
233 #define NVREG_XMITCTL_SYNC_PHY_INIT 0x00040000
234 #define NVREG_XMITCTL_MGMT_SEMA_MASK 0x00000f00
235 #define NVREG_XMITCTL_MGMT_SEMA_FREE 0x0
236 #define NVREG_XMITCTL_HOST_SEMA_MASK 0x0000f000
237 #define NVREG_XMITCTL_HOST_SEMA_ACQ 0x0000f000
238 #define NVREG_XMITCTL_HOST_LOADED 0x00004000
239 #define NVREG_XMITCTL_TX_PATH_EN 0x01000000
240 NvRegTransmitterStatus
= 0x088,
241 #define NVREG_XMITSTAT_BUSY 0x01
243 NvRegPacketFilterFlags
= 0x8c,
244 #define NVREG_PFF_PAUSE_RX 0x08
245 #define NVREG_PFF_ALWAYS 0x7F0000
246 #define NVREG_PFF_PROMISC 0x80
247 #define NVREG_PFF_MYADDR 0x20
248 #define NVREG_PFF_LOOPBACK 0x10
250 NvRegOffloadConfig
= 0x90,
251 #define NVREG_OFFLOAD_HOMEPHY 0x601
252 #define NVREG_OFFLOAD_NORMAL RX_NIC_BUFSIZE
253 NvRegReceiverControl
= 0x094,
254 #define NVREG_RCVCTL_START 0x01
255 #define NVREG_RCVCTL_RX_PATH_EN 0x01000000
256 NvRegReceiverStatus
= 0x98,
257 #define NVREG_RCVSTAT_BUSY 0x01
259 NvRegRandomSeed
= 0x9c,
260 #define NVREG_RNDSEED_MASK 0x00ff
261 #define NVREG_RNDSEED_FORCE 0x7f00
262 #define NVREG_RNDSEED_FORCE2 0x2d00
263 #define NVREG_RNDSEED_FORCE3 0x7400
265 NvRegTxDeferral
= 0xA0,
266 #define NVREG_TX_DEFERRAL_DEFAULT 0x15050f
267 #define NVREG_TX_DEFERRAL_RGMII_10_100 0x16070f
268 #define NVREG_TX_DEFERRAL_RGMII_1000 0x14050f
269 NvRegRxDeferral
= 0xA4,
270 #define NVREG_RX_DEFERRAL_DEFAULT 0x16
271 NvRegMacAddrA
= 0xA8,
272 NvRegMacAddrB
= 0xAC,
273 NvRegMulticastAddrA
= 0xB0,
274 #define NVREG_MCASTADDRA_FORCE 0x01
275 NvRegMulticastAddrB
= 0xB4,
276 NvRegMulticastMaskA
= 0xB8,
277 NvRegMulticastMaskB
= 0xBC,
279 NvRegPhyInterface
= 0xC0,
280 #define PHY_RGMII 0x10000000
282 NvRegTxRingPhysAddr
= 0x100,
283 NvRegRxRingPhysAddr
= 0x104,
284 NvRegRingSizes
= 0x108,
285 #define NVREG_RINGSZ_TXSHIFT 0
286 #define NVREG_RINGSZ_RXSHIFT 16
287 NvRegTransmitPoll
= 0x10c,
288 #define NVREG_TRANSMITPOLL_MAC_ADDR_REV 0x00008000
289 NvRegLinkSpeed
= 0x110,
290 #define NVREG_LINKSPEED_FORCE 0x10000
291 #define NVREG_LINKSPEED_10 1000
292 #define NVREG_LINKSPEED_100 100
293 #define NVREG_LINKSPEED_1000 50
294 #define NVREG_LINKSPEED_MASK (0xFFF)
295 NvRegUnknownSetupReg5
= 0x130,
296 #define NVREG_UNKSETUP5_BIT31 (1<<31)
297 NvRegTxWatermark
= 0x13c,
298 #define NVREG_TX_WM_DESC1_DEFAULT 0x0200010
299 #define NVREG_TX_WM_DESC2_3_DEFAULT 0x1e08000
300 #define NVREG_TX_WM_DESC2_3_1000 0xfe08000
301 NvRegTxRxControl
= 0x144,
302 #define NVREG_TXRXCTL_KICK 0x0001
303 #define NVREG_TXRXCTL_BIT1 0x0002
304 #define NVREG_TXRXCTL_BIT2 0x0004
305 #define NVREG_TXRXCTL_IDLE 0x0008
306 #define NVREG_TXRXCTL_RESET 0x0010
307 #define NVREG_TXRXCTL_RXCHECK 0x0400
308 #define NVREG_TXRXCTL_DESC_1 0
309 #define NVREG_TXRXCTL_DESC_2 0x002100
310 #define NVREG_TXRXCTL_DESC_3 0xc02200
311 #define NVREG_TXRXCTL_VLANSTRIP 0x00040
312 #define NVREG_TXRXCTL_VLANINS 0x00080
313 NvRegTxRingPhysAddrHigh
= 0x148,
314 NvRegRxRingPhysAddrHigh
= 0x14C,
315 NvRegTxPauseFrame
= 0x170,
316 #define NVREG_TX_PAUSEFRAME_DISABLE 0x1ff0080
317 #define NVREG_TX_PAUSEFRAME_ENABLE 0x0c00030
318 NvRegMIIStatus
= 0x180,
319 #define NVREG_MIISTAT_ERROR 0x0001
320 #define NVREG_MIISTAT_LINKCHANGE 0x0008
321 #define NVREG_MIISTAT_MASK 0x000f
322 #define NVREG_MIISTAT_MASK2 0x000f
323 NvRegMIIMask
= 0x184,
324 #define NVREG_MII_LINKCHANGE 0x0008
326 NvRegAdapterControl
= 0x188,
327 #define NVREG_ADAPTCTL_START 0x02
328 #define NVREG_ADAPTCTL_LINKUP 0x04
329 #define NVREG_ADAPTCTL_PHYVALID 0x40000
330 #define NVREG_ADAPTCTL_RUNNING 0x100000
331 #define NVREG_ADAPTCTL_PHYSHIFT 24
332 NvRegMIISpeed
= 0x18c,
333 #define NVREG_MIISPEED_BIT8 (1<<8)
334 #define NVREG_MIIDELAY 5
335 NvRegMIIControl
= 0x190,
336 #define NVREG_MIICTL_INUSE 0x08000
337 #define NVREG_MIICTL_WRITE 0x00400
338 #define NVREG_MIICTL_ADDRSHIFT 5
339 NvRegMIIData
= 0x194,
340 NvRegWakeUpFlags
= 0x200,
341 #define NVREG_WAKEUPFLAGS_VAL 0x7770
342 #define NVREG_WAKEUPFLAGS_BUSYSHIFT 24
343 #define NVREG_WAKEUPFLAGS_ENABLESHIFT 16
344 #define NVREG_WAKEUPFLAGS_D3SHIFT 12
345 #define NVREG_WAKEUPFLAGS_D2SHIFT 8
346 #define NVREG_WAKEUPFLAGS_D1SHIFT 4
347 #define NVREG_WAKEUPFLAGS_D0SHIFT 0
348 #define NVREG_WAKEUPFLAGS_ACCEPT_MAGPAT 0x01
349 #define NVREG_WAKEUPFLAGS_ACCEPT_WAKEUPPAT 0x02
350 #define NVREG_WAKEUPFLAGS_ACCEPT_LINKCHANGE 0x04
351 #define NVREG_WAKEUPFLAGS_ENABLE 0x1111
353 NvRegPatternCRC
= 0x204,
354 NvRegPatternMask
= 0x208,
355 NvRegPowerCap
= 0x268,
356 #define NVREG_POWERCAP_D3SUPP (1<<30)
357 #define NVREG_POWERCAP_D2SUPP (1<<26)
358 #define NVREG_POWERCAP_D1SUPP (1<<25)
359 NvRegPowerState
= 0x26c,
360 #define NVREG_POWERSTATE_POWEREDUP 0x8000
361 #define NVREG_POWERSTATE_VALID 0x0100
362 #define NVREG_POWERSTATE_MASK 0x0003
363 #define NVREG_POWERSTATE_D0 0x0000
364 #define NVREG_POWERSTATE_D1 0x0001
365 #define NVREG_POWERSTATE_D2 0x0002
366 #define NVREG_POWERSTATE_D3 0x0003
368 NvRegTxZeroReXmt
= 0x284,
369 NvRegTxOneReXmt
= 0x288,
370 NvRegTxManyReXmt
= 0x28c,
371 NvRegTxLateCol
= 0x290,
372 NvRegTxUnderflow
= 0x294,
373 NvRegTxLossCarrier
= 0x298,
374 NvRegTxExcessDef
= 0x29c,
375 NvRegTxRetryErr
= 0x2a0,
376 NvRegRxFrameErr
= 0x2a4,
377 NvRegRxExtraByte
= 0x2a8,
378 NvRegRxLateCol
= 0x2ac,
380 NvRegRxFrameTooLong
= 0x2b4,
381 NvRegRxOverflow
= 0x2b8,
382 NvRegRxFCSErr
= 0x2bc,
383 NvRegRxFrameAlignErr
= 0x2c0,
384 NvRegRxLenErr
= 0x2c4,
385 NvRegRxUnicast
= 0x2c8,
386 NvRegRxMulticast
= 0x2cc,
387 NvRegRxBroadcast
= 0x2d0,
389 NvRegTxFrame
= 0x2d8,
391 NvRegTxPause
= 0x2e0,
392 NvRegRxPause
= 0x2e4,
393 NvRegRxDropFrame
= 0x2e8,
394 NvRegVlanControl
= 0x300,
395 #define NVREG_VLANCONTROL_ENABLE 0x2000
396 NvRegMSIXMap0
= 0x3e0,
397 NvRegMSIXMap1
= 0x3e4,
398 NvRegMSIXIrqStatus
= 0x3f0,
400 NvRegPowerState2
= 0x600,
401 #define NVREG_POWERSTATE2_POWERUP_MASK 0x0F11
402 #define NVREG_POWERSTATE2_POWERUP_REV_A3 0x0001
405 /* Big endian: should work, but is untested */
411 struct ring_desc_ex
{
419 struct ring_desc
* orig
;
420 struct ring_desc_ex
* ex
;
423 #define FLAG_MASK_V1 0xffff0000
424 #define FLAG_MASK_V2 0xffffc000
425 #define LEN_MASK_V1 (0xffffffff ^ FLAG_MASK_V1)
426 #define LEN_MASK_V2 (0xffffffff ^ FLAG_MASK_V2)
428 #define NV_TX_LASTPACKET (1<<16)
429 #define NV_TX_RETRYERROR (1<<19)
430 #define NV_TX_FORCED_INTERRUPT (1<<24)
431 #define NV_TX_DEFERRED (1<<26)
432 #define NV_TX_CARRIERLOST (1<<27)
433 #define NV_TX_LATECOLLISION (1<<28)
434 #define NV_TX_UNDERFLOW (1<<29)
435 #define NV_TX_ERROR (1<<30)
436 #define NV_TX_VALID (1<<31)
438 #define NV_TX2_LASTPACKET (1<<29)
439 #define NV_TX2_RETRYERROR (1<<18)
440 #define NV_TX2_FORCED_INTERRUPT (1<<30)
441 #define NV_TX2_DEFERRED (1<<25)
442 #define NV_TX2_CARRIERLOST (1<<26)
443 #define NV_TX2_LATECOLLISION (1<<27)
444 #define NV_TX2_UNDERFLOW (1<<28)
445 /* error and valid are the same for both */
446 #define NV_TX2_ERROR (1<<30)
447 #define NV_TX2_VALID (1<<31)
448 #define NV_TX2_TSO (1<<28)
449 #define NV_TX2_TSO_SHIFT 14
450 #define NV_TX2_TSO_MAX_SHIFT 14
451 #define NV_TX2_TSO_MAX_SIZE (1<<NV_TX2_TSO_MAX_SHIFT)
452 #define NV_TX2_CHECKSUM_L3 (1<<27)
453 #define NV_TX2_CHECKSUM_L4 (1<<26)
455 #define NV_TX3_VLAN_TAG_PRESENT (1<<18)
457 #define NV_RX_DESCRIPTORVALID (1<<16)
458 #define NV_RX_MISSEDFRAME (1<<17)
459 #define NV_RX_SUBSTRACT1 (1<<18)
460 #define NV_RX_ERROR1 (1<<23)
461 #define NV_RX_ERROR2 (1<<24)
462 #define NV_RX_ERROR3 (1<<25)
463 #define NV_RX_ERROR4 (1<<26)
464 #define NV_RX_CRCERR (1<<27)
465 #define NV_RX_OVERFLOW (1<<28)
466 #define NV_RX_FRAMINGERR (1<<29)
467 #define NV_RX_ERROR (1<<30)
468 #define NV_RX_AVAIL (1<<31)
470 #define NV_RX2_CHECKSUMMASK (0x1C000000)
471 #define NV_RX2_CHECKSUMOK1 (0x10000000)
472 #define NV_RX2_CHECKSUMOK2 (0x14000000)
473 #define NV_RX2_CHECKSUMOK3 (0x18000000)
474 #define NV_RX2_DESCRIPTORVALID (1<<29)
475 #define NV_RX2_SUBSTRACT1 (1<<25)
476 #define NV_RX2_ERROR1 (1<<18)
477 #define NV_RX2_ERROR2 (1<<19)
478 #define NV_RX2_ERROR3 (1<<20)
479 #define NV_RX2_ERROR4 (1<<21)
480 #define NV_RX2_CRCERR (1<<22)
481 #define NV_RX2_OVERFLOW (1<<23)
482 #define NV_RX2_FRAMINGERR (1<<24)
483 /* error and avail are the same for both */
484 #define NV_RX2_ERROR (1<<30)
485 #define NV_RX2_AVAIL (1<<31)
487 #define NV_RX3_VLAN_TAG_PRESENT (1<<16)
488 #define NV_RX3_VLAN_TAG_MASK (0x0000FFFF)
490 /* Miscelaneous hardware related defines: */
491 #define NV_PCI_REGSZ_VER1 0x270
492 #define NV_PCI_REGSZ_VER2 0x2d4
493 #define NV_PCI_REGSZ_VER3 0x604
495 /* various timeout delays: all in usec */
496 #define NV_TXRX_RESET_DELAY 4
497 #define NV_TXSTOP_DELAY1 10
498 #define NV_TXSTOP_DELAY1MAX 500000
499 #define NV_TXSTOP_DELAY2 100
500 #define NV_RXSTOP_DELAY1 10
501 #define NV_RXSTOP_DELAY1MAX 500000
502 #define NV_RXSTOP_DELAY2 100
503 #define NV_SETUP5_DELAY 5
504 #define NV_SETUP5_DELAYMAX 50000
505 #define NV_POWERUP_DELAY 5
506 #define NV_POWERUP_DELAYMAX 5000
507 #define NV_MIIBUSY_DELAY 50
508 #define NV_MIIPHY_DELAY 10
509 #define NV_MIIPHY_DELAYMAX 10000
510 #define NV_MAC_RESET_DELAY 64
512 #define NV_WAKEUPPATTERNS 5
513 #define NV_WAKEUPMASKENTRIES 4
515 /* General driver defaults */
516 #define NV_WATCHDOG_TIMEO (5*HZ)
518 #define RX_RING_DEFAULT 128
519 #define TX_RING_DEFAULT 256
520 #define RX_RING_MIN 128
521 #define TX_RING_MIN 64
522 #define RING_MAX_DESC_VER_1 1024
523 #define RING_MAX_DESC_VER_2_3 16384
525 /* rx/tx mac addr + type + vlan + align + slack*/
526 #define NV_RX_HEADERS (64)
527 /* even more slack. */
528 #define NV_RX_ALLOC_PAD (64)
530 /* maximum mtu size */
531 #define NV_PKTLIMIT_1 ETH_DATA_LEN /* hard limit not known */
532 #define NV_PKTLIMIT_2 9100 /* Actual limit according to NVidia: 9202 */
534 #define OOM_REFILL (1+HZ/20)
535 #define POLL_WAIT (1+HZ/100)
536 #define LINK_TIMEOUT (3*HZ)
537 #define STATS_INTERVAL (10*HZ)
541 * The nic supports three different descriptor types:
542 * - DESC_VER_1: Original
543 * - DESC_VER_2: support for jumbo frames.
544 * - DESC_VER_3: 64-bit format.
551 #define PHY_OUI_MARVELL 0x5043
552 #define PHY_OUI_CICADA 0x03f1
553 #define PHY_OUI_VITESSE 0x01c1
554 #define PHY_OUI_REALTEK 0x01c1
555 #define PHYID1_OUI_MASK 0x03ff
556 #define PHYID1_OUI_SHFT 6
557 #define PHYID2_OUI_MASK 0xfc00
558 #define PHYID2_OUI_SHFT 10
559 #define PHYID2_MODEL_MASK 0x03f0
560 #define PHY_MODEL_MARVELL_E3016 0x220
561 #define PHY_MARVELL_E3016_INITMASK 0x0300
562 #define PHY_CICADA_INIT1 0x0f000
563 #define PHY_CICADA_INIT2 0x0e00
564 #define PHY_CICADA_INIT3 0x01000
565 #define PHY_CICADA_INIT4 0x0200
566 #define PHY_CICADA_INIT5 0x0004
567 #define PHY_CICADA_INIT6 0x02000
568 #define PHY_VITESSE_INIT_REG1 0x1f
569 #define PHY_VITESSE_INIT_REG2 0x10
570 #define PHY_VITESSE_INIT_REG3 0x11
571 #define PHY_VITESSE_INIT_REG4 0x12
572 #define PHY_VITESSE_INIT_MSK1 0xc
573 #define PHY_VITESSE_INIT_MSK2 0x0180
574 #define PHY_VITESSE_INIT1 0x52b5
575 #define PHY_VITESSE_INIT2 0xaf8a
576 #define PHY_VITESSE_INIT3 0x8
577 #define PHY_VITESSE_INIT4 0x8f8a
578 #define PHY_VITESSE_INIT5 0xaf86
579 #define PHY_VITESSE_INIT6 0x8f86
580 #define PHY_VITESSE_INIT7 0xaf82
581 #define PHY_VITESSE_INIT8 0x0100
582 #define PHY_VITESSE_INIT9 0x8f82
583 #define PHY_VITESSE_INIT10 0x0
584 #define PHY_REALTEK_INIT_REG1 0x1f
585 #define PHY_REALTEK_INIT_REG2 0x19
586 #define PHY_REALTEK_INIT_REG3 0x13
587 #define PHY_REALTEK_INIT1 0x0000
588 #define PHY_REALTEK_INIT2 0x8e00
589 #define PHY_REALTEK_INIT3 0x0001
590 #define PHY_REALTEK_INIT4 0xad17
592 #define PHY_GIGABIT 0x0100
594 #define PHY_TIMEOUT 0x1
595 #define PHY_ERROR 0x2
599 #define PHY_HALF 0x100
601 #define NV_PAUSEFRAME_RX_CAPABLE 0x0001
602 #define NV_PAUSEFRAME_TX_CAPABLE 0x0002
603 #define NV_PAUSEFRAME_RX_ENABLE 0x0004
604 #define NV_PAUSEFRAME_TX_ENABLE 0x0008
605 #define NV_PAUSEFRAME_RX_REQ 0x0010
606 #define NV_PAUSEFRAME_TX_REQ 0x0020
607 #define NV_PAUSEFRAME_AUTONEG 0x0040
609 /* MSI/MSI-X defines */
610 #define NV_MSI_X_MAX_VECTORS 8
611 #define NV_MSI_X_VECTORS_MASK 0x000f
612 #define NV_MSI_CAPABLE 0x0010
613 #define NV_MSI_X_CAPABLE 0x0020
614 #define NV_MSI_ENABLED 0x0040
615 #define NV_MSI_X_ENABLED 0x0080
617 #define NV_MSI_X_VECTOR_ALL 0x0
618 #define NV_MSI_X_VECTOR_RX 0x0
619 #define NV_MSI_X_VECTOR_TX 0x1
620 #define NV_MSI_X_VECTOR_OTHER 0x2
623 struct nv_ethtool_str
{
624 char name
[ETH_GSTRING_LEN
];
627 static const struct nv_ethtool_str nv_estats_str
[] = {
632 { "tx_late_collision" },
633 { "tx_fifo_errors" },
634 { "tx_carrier_errors" },
635 { "tx_excess_deferral" },
636 { "tx_retry_error" },
637 { "rx_frame_error" },
639 { "rx_late_collision" },
641 { "rx_frame_too_long" },
642 { "rx_over_errors" },
644 { "rx_frame_align_error" },
645 { "rx_length_error" },
650 { "rx_errors_total" },
651 { "tx_errors_total" },
653 /* version 2 stats */
662 struct nv_ethtool_stats
{
667 u64 tx_late_collision
;
669 u64 tx_carrier_errors
;
670 u64 tx_excess_deferral
;
674 u64 rx_late_collision
;
676 u64 rx_frame_too_long
;
679 u64 rx_frame_align_error
;
688 /* version 2 stats */
697 #define NV_DEV_STATISTICS_V2_COUNT (sizeof(struct nv_ethtool_stats)/sizeof(u64))
698 #define NV_DEV_STATISTICS_V1_COUNT (NV_DEV_STATISTICS_V2_COUNT - 6)
701 #define NV_TEST_COUNT_BASE 3
702 #define NV_TEST_COUNT_EXTENDED 4
704 static const struct nv_ethtool_str nv_etests_str
[] = {
705 { "link (online/offline)" },
706 { "register (offline) " },
707 { "interrupt (offline) " },
708 { "loopback (offline) " }
711 struct register_test
{
716 static const struct register_test nv_registers_test
[] = {
717 { NvRegUnknownSetupReg6
, 0x01 },
718 { NvRegMisc1
, 0x03c },
719 { NvRegOffloadConfig
, 0x03ff },
720 { NvRegMulticastAddrA
, 0xffffffff },
721 { NvRegTxWatermark
, 0x0ff },
722 { NvRegWakeUpFlags
, 0x07777 },
729 unsigned int dma_len
;
734 * All hardware access under dev->priv->lock, except the performance
736 * - rx is (pseudo-) lockless: it relies on the single-threading provided
737 * by the arch code for interrupts.
738 * - tx setup is lockless: it relies on netif_tx_lock. Actual submission
739 * needs dev->priv->lock :-(
740 * - set_multicast_list: preparation lockless, relies on netif_tx_lock.
743 /* in dev: base, irq */
748 * Locking: spin_lock(&np->lock); */
749 struct net_device_stats stats
;
750 struct nv_ethtool_stats estats
;
758 unsigned int phy_oui
;
759 unsigned int phy_model
;
764 /* General data: RO fields */
765 dma_addr_t ring_addr
;
766 struct pci_dev
*pci_dev
;
779 /* rx specific fields.
780 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
782 union ring_type get_rx
, put_rx
, first_rx
, last_rx
;
783 struct nv_skb_map
*get_rx_ctx
, *put_rx_ctx
;
784 struct nv_skb_map
*first_rx_ctx
, *last_rx_ctx
;
785 struct nv_skb_map
*rx_skb
;
787 union ring_type rx_ring
;
788 unsigned int rx_buf_sz
;
789 unsigned int pkt_limit
;
790 struct timer_list oom_kick
;
791 struct timer_list nic_poll
;
792 struct timer_list stats_poll
;
796 /* media detection workaround.
797 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
800 unsigned long link_timeout
;
802 * tx specific fields.
804 union ring_type get_tx
, put_tx
, first_tx
, last_tx
;
805 struct nv_skb_map
*get_tx_ctx
, *put_tx_ctx
;
806 struct nv_skb_map
*first_tx_ctx
, *last_tx_ctx
;
807 struct nv_skb_map
*tx_skb
;
809 union ring_type tx_ring
;
815 struct vlan_group
*vlangrp
;
817 /* msi/msi-x fields */
819 struct msix_entry msi_x_entry
[NV_MSI_X_MAX_VECTORS
];
826 * Maximum number of loops until we assume that a bit in the irq mask
827 * is stuck. Overridable with module param.
829 static int max_interrupt_work
= 5;
832 * Optimization can be either throuput mode or cpu mode
834 * Throughput Mode: Every tx and rx packet will generate an interrupt.
835 * CPU Mode: Interrupts are controlled by a timer.
838 NV_OPTIMIZATION_MODE_THROUGHPUT
,
839 NV_OPTIMIZATION_MODE_CPU
841 static int optimization_mode
= NV_OPTIMIZATION_MODE_THROUGHPUT
;
844 * Poll interval for timer irq
846 * This interval determines how frequent an interrupt is generated.
847 * The is value is determined by [(time_in_micro_secs * 100) / (2^10)]
848 * Min = 0, and Max = 65535
850 static int poll_interval
= -1;
859 static int msi
= NV_MSI_INT_ENABLED
;
865 NV_MSIX_INT_DISABLED
,
868 static int msix
= NV_MSIX_INT_DISABLED
;
874 NV_DMA_64BIT_DISABLED
,
877 static int dma_64bit
= NV_DMA_64BIT_ENABLED
;
879 static inline struct fe_priv
*get_nvpriv(struct net_device
*dev
)
881 return netdev_priv(dev
);
884 static inline u8 __iomem
*get_hwbase(struct net_device
*dev
)
886 return ((struct fe_priv
*)netdev_priv(dev
))->base
;
889 static inline void pci_push(u8 __iomem
*base
)
891 /* force out pending posted writes */
895 static inline u32
nv_descr_getlength(struct ring_desc
*prd
, u32 v
)
897 return le32_to_cpu(prd
->flaglen
)
898 & ((v
== DESC_VER_1
) ? LEN_MASK_V1
: LEN_MASK_V2
);
901 static inline u32
nv_descr_getlength_ex(struct ring_desc_ex
*prd
, u32 v
)
903 return le32_to_cpu(prd
->flaglen
) & LEN_MASK_V2
;
906 static int reg_delay(struct net_device
*dev
, int offset
, u32 mask
, u32 target
,
907 int delay
, int delaymax
, const char *msg
)
909 u8 __iomem
*base
= get_hwbase(dev
);
920 } while ((readl(base
+ offset
) & mask
) != target
);
924 #define NV_SETUP_RX_RING 0x01
925 #define NV_SETUP_TX_RING 0x02
927 static void setup_hw_rings(struct net_device
*dev
, int rxtx_flags
)
929 struct fe_priv
*np
= get_nvpriv(dev
);
930 u8 __iomem
*base
= get_hwbase(dev
);
932 if (np
->desc_ver
== DESC_VER_1
|| np
->desc_ver
== DESC_VER_2
) {
933 if (rxtx_flags
& NV_SETUP_RX_RING
) {
934 writel((u32
) cpu_to_le64(np
->ring_addr
), base
+ NvRegRxRingPhysAddr
);
936 if (rxtx_flags
& NV_SETUP_TX_RING
) {
937 writel((u32
) cpu_to_le64(np
->ring_addr
+ np
->rx_ring_size
*sizeof(struct ring_desc
)), base
+ NvRegTxRingPhysAddr
);
940 if (rxtx_flags
& NV_SETUP_RX_RING
) {
941 writel((u32
) cpu_to_le64(np
->ring_addr
), base
+ NvRegRxRingPhysAddr
);
942 writel((u32
) (cpu_to_le64(np
->ring_addr
) >> 32), base
+ NvRegRxRingPhysAddrHigh
);
944 if (rxtx_flags
& NV_SETUP_TX_RING
) {
945 writel((u32
) cpu_to_le64(np
->ring_addr
+ np
->rx_ring_size
*sizeof(struct ring_desc_ex
)), base
+ NvRegTxRingPhysAddr
);
946 writel((u32
) (cpu_to_le64(np
->ring_addr
+ np
->rx_ring_size
*sizeof(struct ring_desc_ex
)) >> 32), base
+ NvRegTxRingPhysAddrHigh
);
951 static void free_rings(struct net_device
*dev
)
953 struct fe_priv
*np
= get_nvpriv(dev
);
955 if (np
->desc_ver
== DESC_VER_1
|| np
->desc_ver
== DESC_VER_2
) {
956 if (np
->rx_ring
.orig
)
957 pci_free_consistent(np
->pci_dev
, sizeof(struct ring_desc
) * (np
->rx_ring_size
+ np
->tx_ring_size
),
958 np
->rx_ring
.orig
, np
->ring_addr
);
961 pci_free_consistent(np
->pci_dev
, sizeof(struct ring_desc_ex
) * (np
->rx_ring_size
+ np
->tx_ring_size
),
962 np
->rx_ring
.ex
, np
->ring_addr
);
970 static int using_multi_irqs(struct net_device
*dev
)
972 struct fe_priv
*np
= get_nvpriv(dev
);
974 if (!(np
->msi_flags
& NV_MSI_X_ENABLED
) ||
975 ((np
->msi_flags
& NV_MSI_X_ENABLED
) &&
976 ((np
->msi_flags
& NV_MSI_X_VECTORS_MASK
) == 0x1)))
982 static void nv_enable_irq(struct net_device
*dev
)
984 struct fe_priv
*np
= get_nvpriv(dev
);
986 if (!using_multi_irqs(dev
)) {
987 if (np
->msi_flags
& NV_MSI_X_ENABLED
)
988 enable_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_ALL
].vector
);
990 enable_irq(dev
->irq
);
992 enable_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_RX
].vector
);
993 enable_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_TX
].vector
);
994 enable_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_OTHER
].vector
);
998 static void nv_disable_irq(struct net_device
*dev
)
1000 struct fe_priv
*np
= get_nvpriv(dev
);
1002 if (!using_multi_irqs(dev
)) {
1003 if (np
->msi_flags
& NV_MSI_X_ENABLED
)
1004 disable_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_ALL
].vector
);
1006 disable_irq(dev
->irq
);
1008 disable_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_RX
].vector
);
1009 disable_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_TX
].vector
);
1010 disable_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_OTHER
].vector
);
1014 /* In MSIX mode, a write to irqmask behaves as XOR */
1015 static void nv_enable_hw_interrupts(struct net_device
*dev
, u32 mask
)
1017 u8 __iomem
*base
= get_hwbase(dev
);
1019 writel(mask
, base
+ NvRegIrqMask
);
1022 static void nv_disable_hw_interrupts(struct net_device
*dev
, u32 mask
)
1024 struct fe_priv
*np
= get_nvpriv(dev
);
1025 u8 __iomem
*base
= get_hwbase(dev
);
1027 if (np
->msi_flags
& NV_MSI_X_ENABLED
) {
1028 writel(mask
, base
+ NvRegIrqMask
);
1030 if (np
->msi_flags
& NV_MSI_ENABLED
)
1031 writel(0, base
+ NvRegMSIIrqMask
);
1032 writel(0, base
+ NvRegIrqMask
);
1036 #define MII_READ (-1)
1037 /* mii_rw: read/write a register on the PHY.
1039 * Caller must guarantee serialization
1041 static int mii_rw(struct net_device
*dev
, int addr
, int miireg
, int value
)
1043 u8 __iomem
*base
= get_hwbase(dev
);
1047 writel(NVREG_MIISTAT_MASK
, base
+ NvRegMIIStatus
);
1049 reg
= readl(base
+ NvRegMIIControl
);
1050 if (reg
& NVREG_MIICTL_INUSE
) {
1051 writel(NVREG_MIICTL_INUSE
, base
+ NvRegMIIControl
);
1052 udelay(NV_MIIBUSY_DELAY
);
1055 reg
= (addr
<< NVREG_MIICTL_ADDRSHIFT
) | miireg
;
1056 if (value
!= MII_READ
) {
1057 writel(value
, base
+ NvRegMIIData
);
1058 reg
|= NVREG_MIICTL_WRITE
;
1060 writel(reg
, base
+ NvRegMIIControl
);
1062 if (reg_delay(dev
, NvRegMIIControl
, NVREG_MIICTL_INUSE
, 0,
1063 NV_MIIPHY_DELAY
, NV_MIIPHY_DELAYMAX
, NULL
)) {
1064 dprintk(KERN_DEBUG
"%s: mii_rw of reg %d at PHY %d timed out.\n",
1065 dev
->name
, miireg
, addr
);
1067 } else if (value
!= MII_READ
) {
1068 /* it was a write operation - fewer failures are detectable */
1069 dprintk(KERN_DEBUG
"%s: mii_rw wrote 0x%x to reg %d at PHY %d\n",
1070 dev
->name
, value
, miireg
, addr
);
1072 } else if (readl(base
+ NvRegMIIStatus
) & NVREG_MIISTAT_ERROR
) {
1073 dprintk(KERN_DEBUG
"%s: mii_rw of reg %d at PHY %d failed.\n",
1074 dev
->name
, miireg
, addr
);
1077 retval
= readl(base
+ NvRegMIIData
);
1078 dprintk(KERN_DEBUG
"%s: mii_rw read from reg %d at PHY %d: 0x%x.\n",
1079 dev
->name
, miireg
, addr
, retval
);
1085 static int phy_reset(struct net_device
*dev
, u32 bmcr_setup
)
1087 struct fe_priv
*np
= netdev_priv(dev
);
1089 unsigned int tries
= 0;
1091 miicontrol
= BMCR_RESET
| bmcr_setup
;
1092 if (mii_rw(dev
, np
->phyaddr
, MII_BMCR
, miicontrol
)) {
1096 /* wait for 500ms */
1099 /* must wait till reset is deasserted */
1100 while (miicontrol
& BMCR_RESET
) {
1102 miicontrol
= mii_rw(dev
, np
->phyaddr
, MII_BMCR
, MII_READ
);
1103 /* FIXME: 100 tries seem excessive */
1110 static int phy_init(struct net_device
*dev
)
1112 struct fe_priv
*np
= get_nvpriv(dev
);
1113 u8 __iomem
*base
= get_hwbase(dev
);
1114 u32 phyinterface
, phy_reserved
, mii_status
, mii_control
, mii_control_1000
,reg
;
1116 /* phy errata for E3016 phy */
1117 if (np
->phy_model
== PHY_MODEL_MARVELL_E3016
) {
1118 reg
= mii_rw(dev
, np
->phyaddr
, MII_NCONFIG
, MII_READ
);
1119 reg
&= ~PHY_MARVELL_E3016_INITMASK
;
1120 if (mii_rw(dev
, np
->phyaddr
, MII_NCONFIG
, reg
)) {
1121 printk(KERN_INFO
"%s: phy write to errata reg failed.\n", pci_name(np
->pci_dev
));
1125 if (np
->phy_oui
== PHY_OUI_REALTEK
) {
1126 if (mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG1
, PHY_REALTEK_INIT1
)) {
1127 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1130 if (mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG2
, PHY_REALTEK_INIT2
)) {
1131 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1134 if (mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG1
, PHY_REALTEK_INIT3
)) {
1135 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1138 if (mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG3
, PHY_REALTEK_INIT4
)) {
1139 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1142 if (mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG1
, PHY_REALTEK_INIT1
)) {
1143 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1148 /* set advertise register */
1149 reg
= mii_rw(dev
, np
->phyaddr
, MII_ADVERTISE
, MII_READ
);
1150 reg
|= (ADVERTISE_10HALF
|ADVERTISE_10FULL
|ADVERTISE_100HALF
|ADVERTISE_100FULL
|ADVERTISE_PAUSE_ASYM
|ADVERTISE_PAUSE_CAP
);
1151 if (mii_rw(dev
, np
->phyaddr
, MII_ADVERTISE
, reg
)) {
1152 printk(KERN_INFO
"%s: phy write to advertise failed.\n", pci_name(np
->pci_dev
));
1156 /* get phy interface type */
1157 phyinterface
= readl(base
+ NvRegPhyInterface
);
1159 /* see if gigabit phy */
1160 mii_status
= mii_rw(dev
, np
->phyaddr
, MII_BMSR
, MII_READ
);
1161 if (mii_status
& PHY_GIGABIT
) {
1162 np
->gigabit
= PHY_GIGABIT
;
1163 mii_control_1000
= mii_rw(dev
, np
->phyaddr
, MII_CTRL1000
, MII_READ
);
1164 mii_control_1000
&= ~ADVERTISE_1000HALF
;
1165 if (phyinterface
& PHY_RGMII
)
1166 mii_control_1000
|= ADVERTISE_1000FULL
;
1168 mii_control_1000
&= ~ADVERTISE_1000FULL
;
1170 if (mii_rw(dev
, np
->phyaddr
, MII_CTRL1000
, mii_control_1000
)) {
1171 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1178 mii_control
= mii_rw(dev
, np
->phyaddr
, MII_BMCR
, MII_READ
);
1179 mii_control
|= BMCR_ANENABLE
;
1182 * (certain phys need bmcr to be setup with reset)
1184 if (phy_reset(dev
, mii_control
)) {
1185 printk(KERN_INFO
"%s: phy reset failed\n", pci_name(np
->pci_dev
));
1189 /* phy vendor specific configuration */
1190 if ((np
->phy_oui
== PHY_OUI_CICADA
) && (phyinterface
& PHY_RGMII
) ) {
1191 phy_reserved
= mii_rw(dev
, np
->phyaddr
, MII_RESV1
, MII_READ
);
1192 phy_reserved
&= ~(PHY_CICADA_INIT1
| PHY_CICADA_INIT2
);
1193 phy_reserved
|= (PHY_CICADA_INIT3
| PHY_CICADA_INIT4
);
1194 if (mii_rw(dev
, np
->phyaddr
, MII_RESV1
, phy_reserved
)) {
1195 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1198 phy_reserved
= mii_rw(dev
, np
->phyaddr
, MII_NCONFIG
, MII_READ
);
1199 phy_reserved
|= PHY_CICADA_INIT5
;
1200 if (mii_rw(dev
, np
->phyaddr
, MII_NCONFIG
, phy_reserved
)) {
1201 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1205 if (np
->phy_oui
== PHY_OUI_CICADA
) {
1206 phy_reserved
= mii_rw(dev
, np
->phyaddr
, MII_SREVISION
, MII_READ
);
1207 phy_reserved
|= PHY_CICADA_INIT6
;
1208 if (mii_rw(dev
, np
->phyaddr
, MII_SREVISION
, phy_reserved
)) {
1209 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1213 if (np
->phy_oui
== PHY_OUI_VITESSE
) {
1214 if (mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG1
, PHY_VITESSE_INIT1
)) {
1215 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1218 if (mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG2
, PHY_VITESSE_INIT2
)) {
1219 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1222 phy_reserved
= mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG4
, MII_READ
);
1223 if (mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG4
, phy_reserved
)) {
1224 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1227 phy_reserved
= mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG3
, MII_READ
);
1228 phy_reserved
&= ~PHY_VITESSE_INIT_MSK1
;
1229 phy_reserved
|= PHY_VITESSE_INIT3
;
1230 if (mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG3
, phy_reserved
)) {
1231 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1234 if (mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG2
, PHY_VITESSE_INIT4
)) {
1235 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1238 if (mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG2
, PHY_VITESSE_INIT5
)) {
1239 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1242 phy_reserved
= mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG4
, MII_READ
);
1243 phy_reserved
&= ~PHY_VITESSE_INIT_MSK1
;
1244 phy_reserved
|= PHY_VITESSE_INIT3
;
1245 if (mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG4
, phy_reserved
)) {
1246 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1249 phy_reserved
= mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG3
, MII_READ
);
1250 if (mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG3
, phy_reserved
)) {
1251 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1254 if (mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG2
, PHY_VITESSE_INIT6
)) {
1255 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1258 if (mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG2
, PHY_VITESSE_INIT7
)) {
1259 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1262 phy_reserved
= mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG4
, MII_READ
);
1263 if (mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG4
, phy_reserved
)) {
1264 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1267 phy_reserved
= mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG3
, MII_READ
);
1268 phy_reserved
&= ~PHY_VITESSE_INIT_MSK2
;
1269 phy_reserved
|= PHY_VITESSE_INIT8
;
1270 if (mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG3
, phy_reserved
)) {
1271 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1274 if (mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG2
, PHY_VITESSE_INIT9
)) {
1275 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1278 if (mii_rw(dev
, np
->phyaddr
, PHY_VITESSE_INIT_REG1
, PHY_VITESSE_INIT10
)) {
1279 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1283 if (np
->phy_oui
== PHY_OUI_REALTEK
) {
1284 /* reset could have cleared these out, set them back */
1285 if (mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG1
, PHY_REALTEK_INIT1
)) {
1286 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1289 if (mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG2
, PHY_REALTEK_INIT2
)) {
1290 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1293 if (mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG1
, PHY_REALTEK_INIT3
)) {
1294 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1297 if (mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG3
, PHY_REALTEK_INIT4
)) {
1298 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1301 if (mii_rw(dev
, np
->phyaddr
, PHY_REALTEK_INIT_REG1
, PHY_REALTEK_INIT1
)) {
1302 printk(KERN_INFO
"%s: phy init failed.\n", pci_name(np
->pci_dev
));
1307 /* some phys clear out pause advertisment on reset, set it back */
1308 mii_rw(dev
, np
->phyaddr
, MII_ADVERTISE
, reg
);
1310 /* restart auto negotiation */
1311 mii_control
= mii_rw(dev
, np
->phyaddr
, MII_BMCR
, MII_READ
);
1312 mii_control
|= (BMCR_ANRESTART
| BMCR_ANENABLE
);
1313 if (mii_rw(dev
, np
->phyaddr
, MII_BMCR
, mii_control
)) {
1320 static void nv_start_rx(struct net_device
*dev
)
1322 struct fe_priv
*np
= netdev_priv(dev
);
1323 u8 __iomem
*base
= get_hwbase(dev
);
1324 u32 rx_ctrl
= readl(base
+ NvRegReceiverControl
);
1326 dprintk(KERN_DEBUG
"%s: nv_start_rx\n", dev
->name
);
1327 /* Already running? Stop it. */
1328 if ((readl(base
+ NvRegReceiverControl
) & NVREG_RCVCTL_START
) && !np
->mac_in_use
) {
1329 rx_ctrl
&= ~NVREG_RCVCTL_START
;
1330 writel(rx_ctrl
, base
+ NvRegReceiverControl
);
1333 writel(np
->linkspeed
, base
+ NvRegLinkSpeed
);
1335 rx_ctrl
|= NVREG_RCVCTL_START
;
1337 rx_ctrl
&= ~NVREG_RCVCTL_RX_PATH_EN
;
1338 writel(rx_ctrl
, base
+ NvRegReceiverControl
);
1339 dprintk(KERN_DEBUG
"%s: nv_start_rx to duplex %d, speed 0x%08x.\n",
1340 dev
->name
, np
->duplex
, np
->linkspeed
);
1344 static void nv_stop_rx(struct net_device
*dev
)
1346 struct fe_priv
*np
= netdev_priv(dev
);
1347 u8 __iomem
*base
= get_hwbase(dev
);
1348 u32 rx_ctrl
= readl(base
+ NvRegReceiverControl
);
1350 dprintk(KERN_DEBUG
"%s: nv_stop_rx\n", dev
->name
);
1351 if (!np
->mac_in_use
)
1352 rx_ctrl
&= ~NVREG_RCVCTL_START
;
1354 rx_ctrl
|= NVREG_RCVCTL_RX_PATH_EN
;
1355 writel(rx_ctrl
, base
+ NvRegReceiverControl
);
1356 reg_delay(dev
, NvRegReceiverStatus
, NVREG_RCVSTAT_BUSY
, 0,
1357 NV_RXSTOP_DELAY1
, NV_RXSTOP_DELAY1MAX
,
1358 KERN_INFO
"nv_stop_rx: ReceiverStatus remained busy");
1360 udelay(NV_RXSTOP_DELAY2
);
1361 if (!np
->mac_in_use
)
1362 writel(0, base
+ NvRegLinkSpeed
);
1365 static void nv_start_tx(struct net_device
*dev
)
1367 struct fe_priv
*np
= netdev_priv(dev
);
1368 u8 __iomem
*base
= get_hwbase(dev
);
1369 u32 tx_ctrl
= readl(base
+ NvRegTransmitterControl
);
1371 dprintk(KERN_DEBUG
"%s: nv_start_tx\n", dev
->name
);
1372 tx_ctrl
|= NVREG_XMITCTL_START
;
1374 tx_ctrl
&= ~NVREG_XMITCTL_TX_PATH_EN
;
1375 writel(tx_ctrl
, base
+ NvRegTransmitterControl
);
1379 static void nv_stop_tx(struct net_device
*dev
)
1381 struct fe_priv
*np
= netdev_priv(dev
);
1382 u8 __iomem
*base
= get_hwbase(dev
);
1383 u32 tx_ctrl
= readl(base
+ NvRegTransmitterControl
);
1385 dprintk(KERN_DEBUG
"%s: nv_stop_tx\n", dev
->name
);
1386 if (!np
->mac_in_use
)
1387 tx_ctrl
&= ~NVREG_XMITCTL_START
;
1389 tx_ctrl
|= NVREG_XMITCTL_TX_PATH_EN
;
1390 writel(tx_ctrl
, base
+ NvRegTransmitterControl
);
1391 reg_delay(dev
, NvRegTransmitterStatus
, NVREG_XMITSTAT_BUSY
, 0,
1392 NV_TXSTOP_DELAY1
, NV_TXSTOP_DELAY1MAX
,
1393 KERN_INFO
"nv_stop_tx: TransmitterStatus remained busy");
1395 udelay(NV_TXSTOP_DELAY2
);
1396 if (!np
->mac_in_use
)
1397 writel(readl(base
+ NvRegTransmitPoll
) & NVREG_TRANSMITPOLL_MAC_ADDR_REV
,
1398 base
+ NvRegTransmitPoll
);
1401 static void nv_txrx_reset(struct net_device
*dev
)
1403 struct fe_priv
*np
= netdev_priv(dev
);
1404 u8 __iomem
*base
= get_hwbase(dev
);
1406 dprintk(KERN_DEBUG
"%s: nv_txrx_reset\n", dev
->name
);
1407 writel(NVREG_TXRXCTL_BIT2
| NVREG_TXRXCTL_RESET
| np
->txrxctl_bits
, base
+ NvRegTxRxControl
);
1409 udelay(NV_TXRX_RESET_DELAY
);
1410 writel(NVREG_TXRXCTL_BIT2
| np
->txrxctl_bits
, base
+ NvRegTxRxControl
);
1414 static void nv_mac_reset(struct net_device
*dev
)
1416 struct fe_priv
*np
= netdev_priv(dev
);
1417 u8 __iomem
*base
= get_hwbase(dev
);
1419 dprintk(KERN_DEBUG
"%s: nv_mac_reset\n", dev
->name
);
1420 writel(NVREG_TXRXCTL_BIT2
| NVREG_TXRXCTL_RESET
| np
->txrxctl_bits
, base
+ NvRegTxRxControl
);
1422 writel(NVREG_MAC_RESET_ASSERT
, base
+ NvRegMacReset
);
1424 udelay(NV_MAC_RESET_DELAY
);
1425 writel(0, base
+ NvRegMacReset
);
1427 udelay(NV_MAC_RESET_DELAY
);
1428 writel(NVREG_TXRXCTL_BIT2
| np
->txrxctl_bits
, base
+ NvRegTxRxControl
);
1432 static void nv_get_hw_stats(struct net_device
*dev
)
1434 struct fe_priv
*np
= netdev_priv(dev
);
1435 u8 __iomem
*base
= get_hwbase(dev
);
1437 np
->estats
.tx_bytes
+= readl(base
+ NvRegTxCnt
);
1438 np
->estats
.tx_zero_rexmt
+= readl(base
+ NvRegTxZeroReXmt
);
1439 np
->estats
.tx_one_rexmt
+= readl(base
+ NvRegTxOneReXmt
);
1440 np
->estats
.tx_many_rexmt
+= readl(base
+ NvRegTxManyReXmt
);
1441 np
->estats
.tx_late_collision
+= readl(base
+ NvRegTxLateCol
);
1442 np
->estats
.tx_fifo_errors
+= readl(base
+ NvRegTxUnderflow
);
1443 np
->estats
.tx_carrier_errors
+= readl(base
+ NvRegTxLossCarrier
);
1444 np
->estats
.tx_excess_deferral
+= readl(base
+ NvRegTxExcessDef
);
1445 np
->estats
.tx_retry_error
+= readl(base
+ NvRegTxRetryErr
);
1446 np
->estats
.rx_frame_error
+= readl(base
+ NvRegRxFrameErr
);
1447 np
->estats
.rx_extra_byte
+= readl(base
+ NvRegRxExtraByte
);
1448 np
->estats
.rx_late_collision
+= readl(base
+ NvRegRxLateCol
);
1449 np
->estats
.rx_runt
+= readl(base
+ NvRegRxRunt
);
1450 np
->estats
.rx_frame_too_long
+= readl(base
+ NvRegRxFrameTooLong
);
1451 np
->estats
.rx_over_errors
+= readl(base
+ NvRegRxOverflow
);
1452 np
->estats
.rx_crc_errors
+= readl(base
+ NvRegRxFCSErr
);
1453 np
->estats
.rx_frame_align_error
+= readl(base
+ NvRegRxFrameAlignErr
);
1454 np
->estats
.rx_length_error
+= readl(base
+ NvRegRxLenErr
);
1455 np
->estats
.rx_unicast
+= readl(base
+ NvRegRxUnicast
);
1456 np
->estats
.rx_multicast
+= readl(base
+ NvRegRxMulticast
);
1457 np
->estats
.rx_broadcast
+= readl(base
+ NvRegRxBroadcast
);
1458 np
->estats
.rx_packets
=
1459 np
->estats
.rx_unicast
+
1460 np
->estats
.rx_multicast
+
1461 np
->estats
.rx_broadcast
;
1462 np
->estats
.rx_errors_total
=
1463 np
->estats
.rx_crc_errors
+
1464 np
->estats
.rx_over_errors
+
1465 np
->estats
.rx_frame_error
+
1466 (np
->estats
.rx_frame_align_error
- np
->estats
.rx_extra_byte
) +
1467 np
->estats
.rx_late_collision
+
1468 np
->estats
.rx_runt
+
1469 np
->estats
.rx_frame_too_long
;
1470 np
->estats
.tx_errors_total
=
1471 np
->estats
.tx_late_collision
+
1472 np
->estats
.tx_fifo_errors
+
1473 np
->estats
.tx_carrier_errors
+
1474 np
->estats
.tx_excess_deferral
+
1475 np
->estats
.tx_retry_error
;
1477 if (np
->driver_data
& DEV_HAS_STATISTICS_V2
) {
1478 np
->estats
.tx_deferral
+= readl(base
+ NvRegTxDef
);
1479 np
->estats
.tx_packets
+= readl(base
+ NvRegTxFrame
);
1480 np
->estats
.rx_bytes
+= readl(base
+ NvRegRxCnt
);
1481 np
->estats
.tx_pause
+= readl(base
+ NvRegTxPause
);
1482 np
->estats
.rx_pause
+= readl(base
+ NvRegRxPause
);
1483 np
->estats
.rx_drop_frame
+= readl(base
+ NvRegRxDropFrame
);
1488 * nv_get_stats: dev->get_stats function
1489 * Get latest stats value from the nic.
1490 * Called with read_lock(&dev_base_lock) held for read -
1491 * only synchronized against unregister_netdevice.
1493 static struct net_device_stats
*nv_get_stats(struct net_device
*dev
)
1495 struct fe_priv
*np
= netdev_priv(dev
);
1497 /* If the nic supports hw counters then retrieve latest values */
1498 if (np
->driver_data
& (DEV_HAS_STATISTICS_V1
|DEV_HAS_STATISTICS_V2
)) {
1499 nv_get_hw_stats(dev
);
1501 /* copy to net_device stats */
1502 np
->stats
.tx_bytes
= np
->estats
.tx_bytes
;
1503 np
->stats
.tx_fifo_errors
= np
->estats
.tx_fifo_errors
;
1504 np
->stats
.tx_carrier_errors
= np
->estats
.tx_carrier_errors
;
1505 np
->stats
.rx_crc_errors
= np
->estats
.rx_crc_errors
;
1506 np
->stats
.rx_over_errors
= np
->estats
.rx_over_errors
;
1507 np
->stats
.rx_errors
= np
->estats
.rx_errors_total
;
1508 np
->stats
.tx_errors
= np
->estats
.tx_errors_total
;
1514 * nv_alloc_rx: fill rx ring entries.
1515 * Return 1 if the allocations for the skbs failed and the
1516 * rx engine is without Available descriptors
1518 static int nv_alloc_rx(struct net_device
*dev
)
1520 struct fe_priv
*np
= netdev_priv(dev
);
1521 struct ring_desc
* less_rx
;
1523 less_rx
= np
->get_rx
.orig
;
1524 if (less_rx
-- == np
->first_rx
.orig
)
1525 less_rx
= np
->last_rx
.orig
;
1527 while (np
->put_rx
.orig
!= less_rx
) {
1528 struct sk_buff
*skb
= dev_alloc_skb(np
->rx_buf_sz
+ NV_RX_ALLOC_PAD
);
1530 np
->put_rx_ctx
->skb
= skb
;
1531 np
->put_rx_ctx
->dma
= pci_map_single(np
->pci_dev
,
1534 PCI_DMA_FROMDEVICE
);
1535 np
->put_rx_ctx
->dma_len
= skb_tailroom(skb
);
1536 np
->put_rx
.orig
->buf
= cpu_to_le32(np
->put_rx_ctx
->dma
);
1538 np
->put_rx
.orig
->flaglen
= cpu_to_le32(np
->rx_buf_sz
| NV_RX_AVAIL
);
1539 if (unlikely(np
->put_rx
.orig
++ == np
->last_rx
.orig
))
1540 np
->put_rx
.orig
= np
->first_rx
.orig
;
1541 if (unlikely(np
->put_rx_ctx
++ == np
->last_rx_ctx
))
1542 np
->put_rx_ctx
= np
->first_rx_ctx
;
1550 static int nv_alloc_rx_optimized(struct net_device
*dev
)
1552 struct fe_priv
*np
= netdev_priv(dev
);
1553 struct ring_desc_ex
* less_rx
;
1555 less_rx
= np
->get_rx
.ex
;
1556 if (less_rx
-- == np
->first_rx
.ex
)
1557 less_rx
= np
->last_rx
.ex
;
1559 while (np
->put_rx
.ex
!= less_rx
) {
1560 struct sk_buff
*skb
= dev_alloc_skb(np
->rx_buf_sz
+ NV_RX_ALLOC_PAD
);
1562 np
->put_rx_ctx
->skb
= skb
;
1563 np
->put_rx_ctx
->dma
= pci_map_single(np
->pci_dev
,
1566 PCI_DMA_FROMDEVICE
);
1567 np
->put_rx_ctx
->dma_len
= skb_tailroom(skb
);
1568 np
->put_rx
.ex
->bufhigh
= cpu_to_le64(np
->put_rx_ctx
->dma
) >> 32;
1569 np
->put_rx
.ex
->buflow
= cpu_to_le64(np
->put_rx_ctx
->dma
) & 0x0FFFFFFFF;
1571 np
->put_rx
.ex
->flaglen
= cpu_to_le32(np
->rx_buf_sz
| NV_RX2_AVAIL
);
1572 if (unlikely(np
->put_rx
.ex
++ == np
->last_rx
.ex
))
1573 np
->put_rx
.ex
= np
->first_rx
.ex
;
1574 if (unlikely(np
->put_rx_ctx
++ == np
->last_rx_ctx
))
1575 np
->put_rx_ctx
= np
->first_rx_ctx
;
1583 /* If rx bufs are exhausted called after 50ms to attempt to refresh */
1584 #ifdef CONFIG_FORCEDETH_NAPI
1585 static void nv_do_rx_refill(unsigned long data
)
1587 struct net_device
*dev
= (struct net_device
*) data
;
1589 /* Just reschedule NAPI rx processing */
1590 netif_rx_schedule(dev
);
1593 static void nv_do_rx_refill(unsigned long data
)
1595 struct net_device
*dev
= (struct net_device
*) data
;
1596 struct fe_priv
*np
= netdev_priv(dev
);
1599 if (!using_multi_irqs(dev
)) {
1600 if (np
->msi_flags
& NV_MSI_X_ENABLED
)
1601 disable_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_ALL
].vector
);
1603 disable_irq(dev
->irq
);
1605 disable_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_RX
].vector
);
1607 if (np
->desc_ver
== DESC_VER_1
|| np
->desc_ver
== DESC_VER_2
)
1608 retcode
= nv_alloc_rx(dev
);
1610 retcode
= nv_alloc_rx_optimized(dev
);
1612 spin_lock_irq(&np
->lock
);
1613 if (!np
->in_shutdown
)
1614 mod_timer(&np
->oom_kick
, jiffies
+ OOM_REFILL
);
1615 spin_unlock_irq(&np
->lock
);
1617 if (!using_multi_irqs(dev
)) {
1618 if (np
->msi_flags
& NV_MSI_X_ENABLED
)
1619 enable_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_ALL
].vector
);
1621 enable_irq(dev
->irq
);
1623 enable_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_RX
].vector
);
1628 static void nv_init_rx(struct net_device
*dev
)
1630 struct fe_priv
*np
= netdev_priv(dev
);
1632 np
->get_rx
= np
->put_rx
= np
->first_rx
= np
->rx_ring
;
1633 if (np
->desc_ver
== DESC_VER_1
|| np
->desc_ver
== DESC_VER_2
)
1634 np
->last_rx
.orig
= &np
->rx_ring
.orig
[np
->rx_ring_size
-1];
1636 np
->last_rx
.ex
= &np
->rx_ring
.ex
[np
->rx_ring_size
-1];
1637 np
->get_rx_ctx
= np
->put_rx_ctx
= np
->first_rx_ctx
= np
->rx_skb
;
1638 np
->last_rx_ctx
= &np
->rx_skb
[np
->rx_ring_size
-1];
1640 for (i
= 0; i
< np
->rx_ring_size
; i
++) {
1641 if (np
->desc_ver
== DESC_VER_1
|| np
->desc_ver
== DESC_VER_2
) {
1642 np
->rx_ring
.orig
[i
].flaglen
= 0;
1643 np
->rx_ring
.orig
[i
].buf
= 0;
1645 np
->rx_ring
.ex
[i
].flaglen
= 0;
1646 np
->rx_ring
.ex
[i
].txvlan
= 0;
1647 np
->rx_ring
.ex
[i
].bufhigh
= 0;
1648 np
->rx_ring
.ex
[i
].buflow
= 0;
1650 np
->rx_skb
[i
].skb
= NULL
;
1651 np
->rx_skb
[i
].dma
= 0;
1655 static void nv_init_tx(struct net_device
*dev
)
1657 struct fe_priv
*np
= netdev_priv(dev
);
1659 np
->get_tx
= np
->put_tx
= np
->first_tx
= np
->tx_ring
;
1660 if (np
->desc_ver
== DESC_VER_1
|| np
->desc_ver
== DESC_VER_2
)
1661 np
->last_tx
.orig
= &np
->tx_ring
.orig
[np
->tx_ring_size
-1];
1663 np
->last_tx
.ex
= &np
->tx_ring
.ex
[np
->tx_ring_size
-1];
1664 np
->get_tx_ctx
= np
->put_tx_ctx
= np
->first_tx_ctx
= np
->tx_skb
;
1665 np
->last_tx_ctx
= &np
->tx_skb
[np
->tx_ring_size
-1];
1667 for (i
= 0; i
< np
->tx_ring_size
; i
++) {
1668 if (np
->desc_ver
== DESC_VER_1
|| np
->desc_ver
== DESC_VER_2
) {
1669 np
->tx_ring
.orig
[i
].flaglen
= 0;
1670 np
->tx_ring
.orig
[i
].buf
= 0;
1672 np
->tx_ring
.ex
[i
].flaglen
= 0;
1673 np
->tx_ring
.ex
[i
].txvlan
= 0;
1674 np
->tx_ring
.ex
[i
].bufhigh
= 0;
1675 np
->tx_ring
.ex
[i
].buflow
= 0;
1677 np
->tx_skb
[i
].skb
= NULL
;
1678 np
->tx_skb
[i
].dma
= 0;
1682 static int nv_init_ring(struct net_device
*dev
)
1684 struct fe_priv
*np
= netdev_priv(dev
);
1688 if (np
->desc_ver
== DESC_VER_1
|| np
->desc_ver
== DESC_VER_2
)
1689 return nv_alloc_rx(dev
);
1691 return nv_alloc_rx_optimized(dev
);
1694 static int nv_release_txskb(struct net_device
*dev
, struct nv_skb_map
* tx_skb
)
1696 struct fe_priv
*np
= netdev_priv(dev
);
1699 pci_unmap_page(np
->pci_dev
, tx_skb
->dma
,
1705 dev_kfree_skb_any(tx_skb
->skb
);
1713 static void nv_drain_tx(struct net_device
*dev
)
1715 struct fe_priv
*np
= netdev_priv(dev
);
1718 for (i
= 0; i
< np
->tx_ring_size
; i
++) {
1719 if (np
->desc_ver
== DESC_VER_1
|| np
->desc_ver
== DESC_VER_2
) {
1720 np
->tx_ring
.orig
[i
].flaglen
= 0;
1721 np
->tx_ring
.orig
[i
].buf
= 0;
1723 np
->tx_ring
.ex
[i
].flaglen
= 0;
1724 np
->tx_ring
.ex
[i
].txvlan
= 0;
1725 np
->tx_ring
.ex
[i
].bufhigh
= 0;
1726 np
->tx_ring
.ex
[i
].buflow
= 0;
1728 if (nv_release_txskb(dev
, &np
->tx_skb
[i
]))
1729 np
->stats
.tx_dropped
++;
1733 static void nv_drain_rx(struct net_device
*dev
)
1735 struct fe_priv
*np
= netdev_priv(dev
);
1738 for (i
= 0; i
< np
->rx_ring_size
; i
++) {
1739 if (np
->desc_ver
== DESC_VER_1
|| np
->desc_ver
== DESC_VER_2
) {
1740 np
->rx_ring
.orig
[i
].flaglen
= 0;
1741 np
->rx_ring
.orig
[i
].buf
= 0;
1743 np
->rx_ring
.ex
[i
].flaglen
= 0;
1744 np
->rx_ring
.ex
[i
].txvlan
= 0;
1745 np
->rx_ring
.ex
[i
].bufhigh
= 0;
1746 np
->rx_ring
.ex
[i
].buflow
= 0;
1749 if (np
->rx_skb
[i
].skb
) {
1750 pci_unmap_single(np
->pci_dev
, np
->rx_skb
[i
].dma
,
1751 (skb_end_pointer(np
->rx_skb
[i
].skb
) -
1752 np
->rx_skb
[i
].skb
->data
),
1753 PCI_DMA_FROMDEVICE
);
1754 dev_kfree_skb(np
->rx_skb
[i
].skb
);
1755 np
->rx_skb
[i
].skb
= NULL
;
1760 static void drain_ring(struct net_device
*dev
)
1766 static inline u32
nv_get_empty_tx_slots(struct fe_priv
*np
)
1768 return (u32
)(np
->tx_ring_size
- ((np
->tx_ring_size
+ (np
->put_tx_ctx
- np
->get_tx_ctx
)) % np
->tx_ring_size
));
1772 * nv_start_xmit: dev->hard_start_xmit function
1773 * Called with netif_tx_lock held.
1775 static int nv_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
1777 struct fe_priv
*np
= netdev_priv(dev
);
1779 u32 tx_flags_extra
= (np
->desc_ver
== DESC_VER_1
? NV_TX_LASTPACKET
: NV_TX2_LASTPACKET
);
1780 unsigned int fragments
= skb_shinfo(skb
)->nr_frags
;
1784 u32 size
= skb
->len
-skb
->data_len
;
1785 u32 entries
= (size
>> NV_TX2_TSO_MAX_SHIFT
) + ((size
& (NV_TX2_TSO_MAX_SIZE
-1)) ? 1 : 0);
1787 struct ring_desc
* put_tx
;
1788 struct ring_desc
* start_tx
;
1789 struct ring_desc
* prev_tx
;
1790 struct nv_skb_map
* prev_tx_ctx
;
1792 /* add fragments to entries count */
1793 for (i
= 0; i
< fragments
; i
++) {
1794 entries
+= (skb_shinfo(skb
)->frags
[i
].size
>> NV_TX2_TSO_MAX_SHIFT
) +
1795 ((skb_shinfo(skb
)->frags
[i
].size
& (NV_TX2_TSO_MAX_SIZE
-1)) ? 1 : 0);
1798 empty_slots
= nv_get_empty_tx_slots(np
);
1799 if (unlikely(empty_slots
<= entries
)) {
1800 spin_lock_irq(&np
->lock
);
1801 netif_stop_queue(dev
);
1803 spin_unlock_irq(&np
->lock
);
1804 return NETDEV_TX_BUSY
;
1807 start_tx
= put_tx
= np
->put_tx
.orig
;
1809 /* setup the header buffer */
1812 prev_tx_ctx
= np
->put_tx_ctx
;
1813 bcnt
= (size
> NV_TX2_TSO_MAX_SIZE
) ? NV_TX2_TSO_MAX_SIZE
: size
;
1814 np
->put_tx_ctx
->dma
= pci_map_single(np
->pci_dev
, skb
->data
+ offset
, bcnt
,
1816 np
->put_tx_ctx
->dma_len
= bcnt
;
1817 put_tx
->buf
= cpu_to_le32(np
->put_tx_ctx
->dma
);
1818 put_tx
->flaglen
= cpu_to_le32((bcnt
-1) | tx_flags
);
1820 tx_flags
= np
->tx_flags
;
1823 if (unlikely(put_tx
++ == np
->last_tx
.orig
))
1824 put_tx
= np
->first_tx
.orig
;
1825 if (unlikely(np
->put_tx_ctx
++ == np
->last_tx_ctx
))
1826 np
->put_tx_ctx
= np
->first_tx_ctx
;
1829 /* setup the fragments */
1830 for (i
= 0; i
< fragments
; i
++) {
1831 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
1832 u32 size
= frag
->size
;
1837 prev_tx_ctx
= np
->put_tx_ctx
;
1838 bcnt
= (size
> NV_TX2_TSO_MAX_SIZE
) ? NV_TX2_TSO_MAX_SIZE
: size
;
1839 np
->put_tx_ctx
->dma
= pci_map_page(np
->pci_dev
, frag
->page
, frag
->page_offset
+offset
, bcnt
,
1841 np
->put_tx_ctx
->dma_len
= bcnt
;
1842 put_tx
->buf
= cpu_to_le32(np
->put_tx_ctx
->dma
);
1843 put_tx
->flaglen
= cpu_to_le32((bcnt
-1) | tx_flags
);
1847 if (unlikely(put_tx
++ == np
->last_tx
.orig
))
1848 put_tx
= np
->first_tx
.orig
;
1849 if (unlikely(np
->put_tx_ctx
++ == np
->last_tx_ctx
))
1850 np
->put_tx_ctx
= np
->first_tx_ctx
;
1854 /* set last fragment flag */
1855 prev_tx
->flaglen
|= cpu_to_le32(tx_flags_extra
);
1857 /* save skb in this slot's context area */
1858 prev_tx_ctx
->skb
= skb
;
1860 if (skb_is_gso(skb
))
1861 tx_flags_extra
= NV_TX2_TSO
| (skb_shinfo(skb
)->gso_size
<< NV_TX2_TSO_SHIFT
);
1863 tx_flags_extra
= skb
->ip_summed
== CHECKSUM_PARTIAL
?
1864 NV_TX2_CHECKSUM_L3
| NV_TX2_CHECKSUM_L4
: 0;
1866 spin_lock_irq(&np
->lock
);
1869 start_tx
->flaglen
|= cpu_to_le32(tx_flags
| tx_flags_extra
);
1870 np
->put_tx
.orig
= put_tx
;
1872 spin_unlock_irq(&np
->lock
);
1874 dprintk(KERN_DEBUG
"%s: nv_start_xmit: entries %d queued for transmission. tx_flags_extra: %x\n",
1875 dev
->name
, entries
, tx_flags_extra
);
1878 for (j
=0; j
<64; j
++) {
1880 dprintk("\n%03x:", j
);
1881 dprintk(" %02x", ((unsigned char*)skb
->data
)[j
]);
1886 dev
->trans_start
= jiffies
;
1887 writel(NVREG_TXRXCTL_KICK
|np
->txrxctl_bits
, get_hwbase(dev
) + NvRegTxRxControl
);
1888 return NETDEV_TX_OK
;
1891 static int nv_start_xmit_optimized(struct sk_buff
*skb
, struct net_device
*dev
)
1893 struct fe_priv
*np
= netdev_priv(dev
);
1896 unsigned int fragments
= skb_shinfo(skb
)->nr_frags
;
1900 u32 size
= skb
->len
-skb
->data_len
;
1901 u32 entries
= (size
>> NV_TX2_TSO_MAX_SHIFT
) + ((size
& (NV_TX2_TSO_MAX_SIZE
-1)) ? 1 : 0);
1903 struct ring_desc_ex
* put_tx
;
1904 struct ring_desc_ex
* start_tx
;
1905 struct ring_desc_ex
* prev_tx
;
1906 struct nv_skb_map
* prev_tx_ctx
;
1908 /* add fragments to entries count */
1909 for (i
= 0; i
< fragments
; i
++) {
1910 entries
+= (skb_shinfo(skb
)->frags
[i
].size
>> NV_TX2_TSO_MAX_SHIFT
) +
1911 ((skb_shinfo(skb
)->frags
[i
].size
& (NV_TX2_TSO_MAX_SIZE
-1)) ? 1 : 0);
1914 empty_slots
= nv_get_empty_tx_slots(np
);
1915 if (unlikely(empty_slots
<= entries
)) {
1916 spin_lock_irq(&np
->lock
);
1917 netif_stop_queue(dev
);
1919 spin_unlock_irq(&np
->lock
);
1920 return NETDEV_TX_BUSY
;
1923 start_tx
= put_tx
= np
->put_tx
.ex
;
1925 /* setup the header buffer */
1928 prev_tx_ctx
= np
->put_tx_ctx
;
1929 bcnt
= (size
> NV_TX2_TSO_MAX_SIZE
) ? NV_TX2_TSO_MAX_SIZE
: size
;
1930 np
->put_tx_ctx
->dma
= pci_map_single(np
->pci_dev
, skb
->data
+ offset
, bcnt
,
1932 np
->put_tx_ctx
->dma_len
= bcnt
;
1933 put_tx
->bufhigh
= cpu_to_le64(np
->put_tx_ctx
->dma
) >> 32;
1934 put_tx
->buflow
= cpu_to_le64(np
->put_tx_ctx
->dma
) & 0x0FFFFFFFF;
1935 put_tx
->flaglen
= cpu_to_le32((bcnt
-1) | tx_flags
);
1937 tx_flags
= NV_TX2_VALID
;
1940 if (unlikely(put_tx
++ == np
->last_tx
.ex
))
1941 put_tx
= np
->first_tx
.ex
;
1942 if (unlikely(np
->put_tx_ctx
++ == np
->last_tx_ctx
))
1943 np
->put_tx_ctx
= np
->first_tx_ctx
;
1946 /* setup the fragments */
1947 for (i
= 0; i
< fragments
; i
++) {
1948 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
1949 u32 size
= frag
->size
;
1954 prev_tx_ctx
= np
->put_tx_ctx
;
1955 bcnt
= (size
> NV_TX2_TSO_MAX_SIZE
) ? NV_TX2_TSO_MAX_SIZE
: size
;
1956 np
->put_tx_ctx
->dma
= pci_map_page(np
->pci_dev
, frag
->page
, frag
->page_offset
+offset
, bcnt
,
1958 np
->put_tx_ctx
->dma_len
= bcnt
;
1959 put_tx
->bufhigh
= cpu_to_le64(np
->put_tx_ctx
->dma
) >> 32;
1960 put_tx
->buflow
= cpu_to_le64(np
->put_tx_ctx
->dma
) & 0x0FFFFFFFF;
1961 put_tx
->flaglen
= cpu_to_le32((bcnt
-1) | tx_flags
);
1965 if (unlikely(put_tx
++ == np
->last_tx
.ex
))
1966 put_tx
= np
->first_tx
.ex
;
1967 if (unlikely(np
->put_tx_ctx
++ == np
->last_tx_ctx
))
1968 np
->put_tx_ctx
= np
->first_tx_ctx
;
1972 /* set last fragment flag */
1973 prev_tx
->flaglen
|= cpu_to_le32(NV_TX2_LASTPACKET
);
1975 /* save skb in this slot's context area */
1976 prev_tx_ctx
->skb
= skb
;
1978 if (skb_is_gso(skb
))
1979 tx_flags_extra
= NV_TX2_TSO
| (skb_shinfo(skb
)->gso_size
<< NV_TX2_TSO_SHIFT
);
1981 tx_flags_extra
= skb
->ip_summed
== CHECKSUM_PARTIAL
?
1982 NV_TX2_CHECKSUM_L3
| NV_TX2_CHECKSUM_L4
: 0;
1985 if (likely(!np
->vlangrp
)) {
1986 start_tx
->txvlan
= 0;
1988 if (vlan_tx_tag_present(skb
))
1989 start_tx
->txvlan
= cpu_to_le32(NV_TX3_VLAN_TAG_PRESENT
| vlan_tx_tag_get(skb
));
1991 start_tx
->txvlan
= 0;
1994 spin_lock_irq(&np
->lock
);
1997 start_tx
->flaglen
|= cpu_to_le32(tx_flags
| tx_flags_extra
);
1998 np
->put_tx
.ex
= put_tx
;
2000 spin_unlock_irq(&np
->lock
);
2002 dprintk(KERN_DEBUG
"%s: nv_start_xmit_optimized: entries %d queued for transmission. tx_flags_extra: %x\n",
2003 dev
->name
, entries
, tx_flags_extra
);
2006 for (j
=0; j
<64; j
++) {
2008 dprintk("\n%03x:", j
);
2009 dprintk(" %02x", ((unsigned char*)skb
->data
)[j
]);
2014 dev
->trans_start
= jiffies
;
2015 writel(NVREG_TXRXCTL_KICK
|np
->txrxctl_bits
, get_hwbase(dev
) + NvRegTxRxControl
);
2016 return NETDEV_TX_OK
;
2020 * nv_tx_done: check for completed packets, release the skbs.
2022 * Caller must own np->lock.
2024 static void nv_tx_done(struct net_device
*dev
)
2026 struct fe_priv
*np
= netdev_priv(dev
);
2028 struct ring_desc
* orig_get_tx
= np
->get_tx
.orig
;
2030 while ((np
->get_tx
.orig
!= np
->put_tx
.orig
) &&
2031 !((flags
= le32_to_cpu(np
->get_tx
.orig
->flaglen
)) & NV_TX_VALID
)) {
2033 dprintk(KERN_DEBUG
"%s: nv_tx_done: flags 0x%x.\n",
2036 pci_unmap_page(np
->pci_dev
, np
->get_tx_ctx
->dma
,
2037 np
->get_tx_ctx
->dma_len
,
2039 np
->get_tx_ctx
->dma
= 0;
2041 if (np
->desc_ver
== DESC_VER_1
) {
2042 if (flags
& NV_TX_LASTPACKET
) {
2043 if (flags
& NV_TX_ERROR
) {
2044 if (flags
& NV_TX_UNDERFLOW
)
2045 np
->stats
.tx_fifo_errors
++;
2046 if (flags
& NV_TX_CARRIERLOST
)
2047 np
->stats
.tx_carrier_errors
++;
2048 np
->stats
.tx_errors
++;
2050 np
->stats
.tx_packets
++;
2051 np
->stats
.tx_bytes
+= np
->get_tx_ctx
->skb
->len
;
2053 dev_kfree_skb_any(np
->get_tx_ctx
->skb
);
2054 np
->get_tx_ctx
->skb
= NULL
;
2057 if (flags
& NV_TX2_LASTPACKET
) {
2058 if (flags
& NV_TX2_ERROR
) {
2059 if (flags
& NV_TX2_UNDERFLOW
)
2060 np
->stats
.tx_fifo_errors
++;
2061 if (flags
& NV_TX2_CARRIERLOST
)
2062 np
->stats
.tx_carrier_errors
++;
2063 np
->stats
.tx_errors
++;
2065 np
->stats
.tx_packets
++;
2066 np
->stats
.tx_bytes
+= np
->get_tx_ctx
->skb
->len
;
2068 dev_kfree_skb_any(np
->get_tx_ctx
->skb
);
2069 np
->get_tx_ctx
->skb
= NULL
;
2072 if (unlikely(np
->get_tx
.orig
++ == np
->last_tx
.orig
))
2073 np
->get_tx
.orig
= np
->first_tx
.orig
;
2074 if (unlikely(np
->get_tx_ctx
++ == np
->last_tx_ctx
))
2075 np
->get_tx_ctx
= np
->first_tx_ctx
;
2077 if (unlikely((np
->tx_stop
== 1) && (np
->get_tx
.orig
!= orig_get_tx
))) {
2079 netif_wake_queue(dev
);
2083 static void nv_tx_done_optimized(struct net_device
*dev
, int limit
)
2085 struct fe_priv
*np
= netdev_priv(dev
);
2087 struct ring_desc_ex
* orig_get_tx
= np
->get_tx
.ex
;
2089 while ((np
->get_tx
.ex
!= np
->put_tx
.ex
) &&
2090 !((flags
= le32_to_cpu(np
->get_tx
.ex
->flaglen
)) & NV_TX_VALID
) &&
2093 dprintk(KERN_DEBUG
"%s: nv_tx_done_optimized: flags 0x%x.\n",
2096 pci_unmap_page(np
->pci_dev
, np
->get_tx_ctx
->dma
,
2097 np
->get_tx_ctx
->dma_len
,
2099 np
->get_tx_ctx
->dma
= 0;
2101 if (flags
& NV_TX2_LASTPACKET
) {
2102 if (!(flags
& NV_TX2_ERROR
))
2103 np
->stats
.tx_packets
++;
2104 dev_kfree_skb_any(np
->get_tx_ctx
->skb
);
2105 np
->get_tx_ctx
->skb
= NULL
;
2107 if (unlikely(np
->get_tx
.ex
++ == np
->last_tx
.ex
))
2108 np
->get_tx
.ex
= np
->first_tx
.ex
;
2109 if (unlikely(np
->get_tx_ctx
++ == np
->last_tx_ctx
))
2110 np
->get_tx_ctx
= np
->first_tx_ctx
;
2112 if (unlikely((np
->tx_stop
== 1) && (np
->get_tx
.ex
!= orig_get_tx
))) {
2114 netif_wake_queue(dev
);
2119 * nv_tx_timeout: dev->tx_timeout function
2120 * Called with netif_tx_lock held.
2122 static void nv_tx_timeout(struct net_device
*dev
)
2124 struct fe_priv
*np
= netdev_priv(dev
);
2125 u8 __iomem
*base
= get_hwbase(dev
);
2128 if (np
->msi_flags
& NV_MSI_X_ENABLED
)
2129 status
= readl(base
+ NvRegMSIXIrqStatus
) & NVREG_IRQSTAT_MASK
;
2131 status
= readl(base
+ NvRegIrqStatus
) & NVREG_IRQSTAT_MASK
;
2133 printk(KERN_INFO
"%s: Got tx_timeout. irq: %08x\n", dev
->name
, status
);
2138 printk(KERN_INFO
"%s: Ring at %lx\n",
2139 dev
->name
, (unsigned long)np
->ring_addr
);
2140 printk(KERN_INFO
"%s: Dumping tx registers\n", dev
->name
);
2141 for (i
=0;i
<=np
->register_size
;i
+= 32) {
2142 printk(KERN_INFO
"%3x: %08x %08x %08x %08x %08x %08x %08x %08x\n",
2144 readl(base
+ i
+ 0), readl(base
+ i
+ 4),
2145 readl(base
+ i
+ 8), readl(base
+ i
+ 12),
2146 readl(base
+ i
+ 16), readl(base
+ i
+ 20),
2147 readl(base
+ i
+ 24), readl(base
+ i
+ 28));
2149 printk(KERN_INFO
"%s: Dumping tx ring\n", dev
->name
);
2150 for (i
=0;i
<np
->tx_ring_size
;i
+= 4) {
2151 if (np
->desc_ver
== DESC_VER_1
|| np
->desc_ver
== DESC_VER_2
) {
2152 printk(KERN_INFO
"%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n",
2154 le32_to_cpu(np
->tx_ring
.orig
[i
].buf
),
2155 le32_to_cpu(np
->tx_ring
.orig
[i
].flaglen
),
2156 le32_to_cpu(np
->tx_ring
.orig
[i
+1].buf
),
2157 le32_to_cpu(np
->tx_ring
.orig
[i
+1].flaglen
),
2158 le32_to_cpu(np
->tx_ring
.orig
[i
+2].buf
),
2159 le32_to_cpu(np
->tx_ring
.orig
[i
+2].flaglen
),
2160 le32_to_cpu(np
->tx_ring
.orig
[i
+3].buf
),
2161 le32_to_cpu(np
->tx_ring
.orig
[i
+3].flaglen
));
2163 printk(KERN_INFO
"%03x: %08x %08x %08x // %08x %08x %08x // %08x %08x %08x // %08x %08x %08x\n",
2165 le32_to_cpu(np
->tx_ring
.ex
[i
].bufhigh
),
2166 le32_to_cpu(np
->tx_ring
.ex
[i
].buflow
),
2167 le32_to_cpu(np
->tx_ring
.ex
[i
].flaglen
),
2168 le32_to_cpu(np
->tx_ring
.ex
[i
+1].bufhigh
),
2169 le32_to_cpu(np
->tx_ring
.ex
[i
+1].buflow
),
2170 le32_to_cpu(np
->tx_ring
.ex
[i
+1].flaglen
),
2171 le32_to_cpu(np
->tx_ring
.ex
[i
+2].bufhigh
),
2172 le32_to_cpu(np
->tx_ring
.ex
[i
+2].buflow
),
2173 le32_to_cpu(np
->tx_ring
.ex
[i
+2].flaglen
),
2174 le32_to_cpu(np
->tx_ring
.ex
[i
+3].bufhigh
),
2175 le32_to_cpu(np
->tx_ring
.ex
[i
+3].buflow
),
2176 le32_to_cpu(np
->tx_ring
.ex
[i
+3].flaglen
));
2181 spin_lock_irq(&np
->lock
);
2183 /* 1) stop tx engine */
2186 /* 2) check that the packets were not sent already: */
2187 if (np
->desc_ver
== DESC_VER_1
|| np
->desc_ver
== DESC_VER_2
)
2190 nv_tx_done_optimized(dev
, np
->tx_ring_size
);
2192 /* 3) if there are dead entries: clear everything */
2193 if (np
->get_tx_ctx
!= np
->put_tx_ctx
) {
2194 printk(KERN_DEBUG
"%s: tx_timeout: dead entries!\n", dev
->name
);
2197 setup_hw_rings(dev
, NV_SETUP_TX_RING
);
2200 netif_wake_queue(dev
);
2202 /* 4) restart tx engine */
2204 spin_unlock_irq(&np
->lock
);
2208 * Called when the nic notices a mismatch between the actual data len on the
2209 * wire and the len indicated in the 802 header
2211 static int nv_getlen(struct net_device
*dev
, void *packet
, int datalen
)
2213 int hdrlen
; /* length of the 802 header */
2214 int protolen
; /* length as stored in the proto field */
2216 /* 1) calculate len according to header */
2217 if ( ((struct vlan_ethhdr
*)packet
)->h_vlan_proto
== htons(ETH_P_8021Q
)) {
2218 protolen
= ntohs( ((struct vlan_ethhdr
*)packet
)->h_vlan_encapsulated_proto
);
2221 protolen
= ntohs( ((struct ethhdr
*)packet
)->h_proto
);
2224 dprintk(KERN_DEBUG
"%s: nv_getlen: datalen %d, protolen %d, hdrlen %d\n",
2225 dev
->name
, datalen
, protolen
, hdrlen
);
2226 if (protolen
> ETH_DATA_LEN
)
2227 return datalen
; /* Value in proto field not a len, no checks possible */
2230 /* consistency checks: */
2231 if (datalen
> ETH_ZLEN
) {
2232 if (datalen
>= protolen
) {
2233 /* more data on wire than in 802 header, trim of
2236 dprintk(KERN_DEBUG
"%s: nv_getlen: accepting %d bytes.\n",
2237 dev
->name
, protolen
);
2240 /* less data on wire than mentioned in header.
2241 * Discard the packet.
2243 dprintk(KERN_DEBUG
"%s: nv_getlen: discarding long packet.\n",
2248 /* short packet. Accept only if 802 values are also short */
2249 if (protolen
> ETH_ZLEN
) {
2250 dprintk(KERN_DEBUG
"%s: nv_getlen: discarding short packet.\n",
2254 dprintk(KERN_DEBUG
"%s: nv_getlen: accepting %d bytes.\n",
2255 dev
->name
, datalen
);
2260 static int nv_rx_process(struct net_device
*dev
, int limit
)
2262 struct fe_priv
*np
= netdev_priv(dev
);
2264 u32 rx_processed_cnt
= 0;
2265 struct sk_buff
*skb
;
2268 while((np
->get_rx
.orig
!= np
->put_rx
.orig
) &&
2269 !((flags
= le32_to_cpu(np
->get_rx
.orig
->flaglen
)) & NV_RX_AVAIL
) &&
2270 (rx_processed_cnt
++ < limit
)) {
2272 dprintk(KERN_DEBUG
"%s: nv_rx_process: flags 0x%x.\n",
2276 * the packet is for us - immediately tear down the pci mapping.
2277 * TODO: check if a prefetch of the first cacheline improves
2280 pci_unmap_single(np
->pci_dev
, np
->get_rx_ctx
->dma
,
2281 np
->get_rx_ctx
->dma_len
,
2282 PCI_DMA_FROMDEVICE
);
2283 skb
= np
->get_rx_ctx
->skb
;
2284 np
->get_rx_ctx
->skb
= NULL
;
2288 dprintk(KERN_DEBUG
"Dumping packet (flags 0x%x).",flags
);
2289 for (j
=0; j
<64; j
++) {
2291 dprintk("\n%03x:", j
);
2292 dprintk(" %02x", ((unsigned char*)skb
->data
)[j
]);
2296 /* look at what we actually got: */
2297 if (np
->desc_ver
== DESC_VER_1
) {
2298 if (likely(flags
& NV_RX_DESCRIPTORVALID
)) {
2299 len
= flags
& LEN_MASK_V1
;
2300 if (unlikely(flags
& NV_RX_ERROR
)) {
2301 if (flags
& NV_RX_ERROR4
) {
2302 len
= nv_getlen(dev
, skb
->data
, len
);
2304 np
->stats
.rx_errors
++;
2309 /* framing errors are soft errors */
2310 else if (flags
& NV_RX_FRAMINGERR
) {
2311 if (flags
& NV_RX_SUBSTRACT1
) {
2315 /* the rest are hard errors */
2317 if (flags
& NV_RX_MISSEDFRAME
)
2318 np
->stats
.rx_missed_errors
++;
2319 if (flags
& NV_RX_CRCERR
)
2320 np
->stats
.rx_crc_errors
++;
2321 if (flags
& NV_RX_OVERFLOW
)
2322 np
->stats
.rx_over_errors
++;
2323 np
->stats
.rx_errors
++;
2333 if (likely(flags
& NV_RX2_DESCRIPTORVALID
)) {
2334 len
= flags
& LEN_MASK_V2
;
2335 if (unlikely(flags
& NV_RX2_ERROR
)) {
2336 if (flags
& NV_RX2_ERROR4
) {
2337 len
= nv_getlen(dev
, skb
->data
, len
);
2339 np
->stats
.rx_errors
++;
2344 /* framing errors are soft errors */
2345 else if (flags
& NV_RX2_FRAMINGERR
) {
2346 if (flags
& NV_RX2_SUBSTRACT1
) {
2350 /* the rest are hard errors */
2352 if (flags
& NV_RX2_CRCERR
)
2353 np
->stats
.rx_crc_errors
++;
2354 if (flags
& NV_RX2_OVERFLOW
)
2355 np
->stats
.rx_over_errors
++;
2356 np
->stats
.rx_errors
++;
2361 if ((flags
& NV_RX2_CHECKSUMMASK
) == NV_RX2_CHECKSUMOK2
)/*ip and tcp */ {
2362 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
2364 if ((flags
& NV_RX2_CHECKSUMMASK
) == NV_RX2_CHECKSUMOK1
||
2365 (flags
& NV_RX2_CHECKSUMMASK
) == NV_RX2_CHECKSUMOK3
) {
2366 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
2374 /* got a valid packet - forward it to the network core */
2376 skb
->protocol
= eth_type_trans(skb
, dev
);
2377 dprintk(KERN_DEBUG
"%s: nv_rx_process: %d bytes, proto %d accepted.\n",
2378 dev
->name
, len
, skb
->protocol
);
2379 #ifdef CONFIG_FORCEDETH_NAPI
2380 netif_receive_skb(skb
);
2384 dev
->last_rx
= jiffies
;
2385 np
->stats
.rx_packets
++;
2386 np
->stats
.rx_bytes
+= len
;
2388 if (unlikely(np
->get_rx
.orig
++ == np
->last_rx
.orig
))
2389 np
->get_rx
.orig
= np
->first_rx
.orig
;
2390 if (unlikely(np
->get_rx_ctx
++ == np
->last_rx_ctx
))
2391 np
->get_rx_ctx
= np
->first_rx_ctx
;
2394 return rx_processed_cnt
;
2397 static int nv_rx_process_optimized(struct net_device
*dev
, int limit
)
2399 struct fe_priv
*np
= netdev_priv(dev
);
2402 u32 rx_processed_cnt
= 0;
2403 struct sk_buff
*skb
;
2406 while((np
->get_rx
.ex
!= np
->put_rx
.ex
) &&
2407 !((flags
= le32_to_cpu(np
->get_rx
.ex
->flaglen
)) & NV_RX2_AVAIL
) &&
2408 (rx_processed_cnt
++ < limit
)) {
2410 dprintk(KERN_DEBUG
"%s: nv_rx_process_optimized: flags 0x%x.\n",
2414 * the packet is for us - immediately tear down the pci mapping.
2415 * TODO: check if a prefetch of the first cacheline improves
2418 pci_unmap_single(np
->pci_dev
, np
->get_rx_ctx
->dma
,
2419 np
->get_rx_ctx
->dma_len
,
2420 PCI_DMA_FROMDEVICE
);
2421 skb
= np
->get_rx_ctx
->skb
;
2422 np
->get_rx_ctx
->skb
= NULL
;
2426 dprintk(KERN_DEBUG
"Dumping packet (flags 0x%x).",flags
);
2427 for (j
=0; j
<64; j
++) {
2429 dprintk("\n%03x:", j
);
2430 dprintk(" %02x", ((unsigned char*)skb
->data
)[j
]);
2434 /* look at what we actually got: */
2435 if (likely(flags
& NV_RX2_DESCRIPTORVALID
)) {
2436 len
= flags
& LEN_MASK_V2
;
2437 if (unlikely(flags
& NV_RX2_ERROR
)) {
2438 if (flags
& NV_RX2_ERROR4
) {
2439 len
= nv_getlen(dev
, skb
->data
, len
);
2445 /* framing errors are soft errors */
2446 else if (flags
& NV_RX2_FRAMINGERR
) {
2447 if (flags
& NV_RX2_SUBSTRACT1
) {
2451 /* the rest are hard errors */
2458 if ((flags
& NV_RX2_CHECKSUMMASK
) == NV_RX2_CHECKSUMOK2
)/*ip and tcp */ {
2459 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
2461 if ((flags
& NV_RX2_CHECKSUMMASK
) == NV_RX2_CHECKSUMOK1
||
2462 (flags
& NV_RX2_CHECKSUMMASK
) == NV_RX2_CHECKSUMOK3
) {
2463 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
2467 /* got a valid packet - forward it to the network core */
2469 skb
->protocol
= eth_type_trans(skb
, dev
);
2470 prefetch(skb
->data
);
2472 dprintk(KERN_DEBUG
"%s: nv_rx_process_optimized: %d bytes, proto %d accepted.\n",
2473 dev
->name
, len
, skb
->protocol
);
2475 if (likely(!np
->vlangrp
)) {
2476 #ifdef CONFIG_FORCEDETH_NAPI
2477 netif_receive_skb(skb
);
2482 vlanflags
= le32_to_cpu(np
->get_rx
.ex
->buflow
);
2483 if (vlanflags
& NV_RX3_VLAN_TAG_PRESENT
) {
2484 #ifdef CONFIG_FORCEDETH_NAPI
2485 vlan_hwaccel_receive_skb(skb
, np
->vlangrp
,
2486 vlanflags
& NV_RX3_VLAN_TAG_MASK
);
2488 vlan_hwaccel_rx(skb
, np
->vlangrp
,
2489 vlanflags
& NV_RX3_VLAN_TAG_MASK
);
2492 #ifdef CONFIG_FORCEDETH_NAPI
2493 netif_receive_skb(skb
);
2500 dev
->last_rx
= jiffies
;
2501 np
->stats
.rx_packets
++;
2502 np
->stats
.rx_bytes
+= len
;
2507 if (unlikely(np
->get_rx
.ex
++ == np
->last_rx
.ex
))
2508 np
->get_rx
.ex
= np
->first_rx
.ex
;
2509 if (unlikely(np
->get_rx_ctx
++ == np
->last_rx_ctx
))
2510 np
->get_rx_ctx
= np
->first_rx_ctx
;
2513 return rx_processed_cnt
;
2516 static void set_bufsize(struct net_device
*dev
)
2518 struct fe_priv
*np
= netdev_priv(dev
);
2520 if (dev
->mtu
<= ETH_DATA_LEN
)
2521 np
->rx_buf_sz
= ETH_DATA_LEN
+ NV_RX_HEADERS
;
2523 np
->rx_buf_sz
= dev
->mtu
+ NV_RX_HEADERS
;
2527 * nv_change_mtu: dev->change_mtu function
2528 * Called with dev_base_lock held for read.
2530 static int nv_change_mtu(struct net_device
*dev
, int new_mtu
)
2532 struct fe_priv
*np
= netdev_priv(dev
);
2535 if (new_mtu
< 64 || new_mtu
> np
->pkt_limit
)
2541 /* return early if the buffer sizes will not change */
2542 if (old_mtu
<= ETH_DATA_LEN
&& new_mtu
<= ETH_DATA_LEN
)
2544 if (old_mtu
== new_mtu
)
2547 /* synchronized against open : rtnl_lock() held by caller */
2548 if (netif_running(dev
)) {
2549 u8 __iomem
*base
= get_hwbase(dev
);
2551 * It seems that the nic preloads valid ring entries into an
2552 * internal buffer. The procedure for flushing everything is
2553 * guessed, there is probably a simpler approach.
2554 * Changing the MTU is a rare event, it shouldn't matter.
2556 nv_disable_irq(dev
);
2557 netif_tx_lock_bh(dev
);
2558 spin_lock(&np
->lock
);
2563 /* drain rx queue */
2566 /* reinit driver view of the rx queue */
2568 if (nv_init_ring(dev
)) {
2569 if (!np
->in_shutdown
)
2570 mod_timer(&np
->oom_kick
, jiffies
+ OOM_REFILL
);
2572 /* reinit nic view of the rx queue */
2573 writel(np
->rx_buf_sz
, base
+ NvRegOffloadConfig
);
2574 setup_hw_rings(dev
, NV_SETUP_RX_RING
| NV_SETUP_TX_RING
);
2575 writel( ((np
->rx_ring_size
-1) << NVREG_RINGSZ_RXSHIFT
) + ((np
->tx_ring_size
-1) << NVREG_RINGSZ_TXSHIFT
),
2576 base
+ NvRegRingSizes
);
2578 writel(NVREG_TXRXCTL_KICK
|np
->txrxctl_bits
, get_hwbase(dev
) + NvRegTxRxControl
);
2581 /* restart rx engine */
2584 spin_unlock(&np
->lock
);
2585 netif_tx_unlock_bh(dev
);
2591 static void nv_copy_mac_to_hw(struct net_device
*dev
)
2593 u8 __iomem
*base
= get_hwbase(dev
);
2596 mac
[0] = (dev
->dev_addr
[0] << 0) + (dev
->dev_addr
[1] << 8) +
2597 (dev
->dev_addr
[2] << 16) + (dev
->dev_addr
[3] << 24);
2598 mac
[1] = (dev
->dev_addr
[4] << 0) + (dev
->dev_addr
[5] << 8);
2600 writel(mac
[0], base
+ NvRegMacAddrA
);
2601 writel(mac
[1], base
+ NvRegMacAddrB
);
2605 * nv_set_mac_address: dev->set_mac_address function
2606 * Called with rtnl_lock() held.
2608 static int nv_set_mac_address(struct net_device
*dev
, void *addr
)
2610 struct fe_priv
*np
= netdev_priv(dev
);
2611 struct sockaddr
*macaddr
= (struct sockaddr
*)addr
;
2613 if (!is_valid_ether_addr(macaddr
->sa_data
))
2614 return -EADDRNOTAVAIL
;
2616 /* synchronized against open : rtnl_lock() held by caller */
2617 memcpy(dev
->dev_addr
, macaddr
->sa_data
, ETH_ALEN
);
2619 if (netif_running(dev
)) {
2620 netif_tx_lock_bh(dev
);
2621 spin_lock_irq(&np
->lock
);
2623 /* stop rx engine */
2626 /* set mac address */
2627 nv_copy_mac_to_hw(dev
);
2629 /* restart rx engine */
2631 spin_unlock_irq(&np
->lock
);
2632 netif_tx_unlock_bh(dev
);
2634 nv_copy_mac_to_hw(dev
);
2640 * nv_set_multicast: dev->set_multicast function
2641 * Called with netif_tx_lock held.
2643 static void nv_set_multicast(struct net_device
*dev
)
2645 struct fe_priv
*np
= netdev_priv(dev
);
2646 u8 __iomem
*base
= get_hwbase(dev
);
2649 u32 pff
= readl(base
+ NvRegPacketFilterFlags
) & NVREG_PFF_PAUSE_RX
;
2651 memset(addr
, 0, sizeof(addr
));
2652 memset(mask
, 0, sizeof(mask
));
2654 if (dev
->flags
& IFF_PROMISC
) {
2655 pff
|= NVREG_PFF_PROMISC
;
2657 pff
|= NVREG_PFF_MYADDR
;
2659 if (dev
->flags
& IFF_ALLMULTI
|| dev
->mc_list
) {
2663 alwaysOn
[0] = alwaysOn
[1] = alwaysOff
[0] = alwaysOff
[1] = 0xffffffff;
2664 if (dev
->flags
& IFF_ALLMULTI
) {
2665 alwaysOn
[0] = alwaysOn
[1] = alwaysOff
[0] = alwaysOff
[1] = 0;
2667 struct dev_mc_list
*walk
;
2669 walk
= dev
->mc_list
;
2670 while (walk
!= NULL
) {
2672 a
= le32_to_cpu(*(u32
*) walk
->dmi_addr
);
2673 b
= le16_to_cpu(*(u16
*) (&walk
->dmi_addr
[4]));
2681 addr
[0] = alwaysOn
[0];
2682 addr
[1] = alwaysOn
[1];
2683 mask
[0] = alwaysOn
[0] | alwaysOff
[0];
2684 mask
[1] = alwaysOn
[1] | alwaysOff
[1];
2687 addr
[0] |= NVREG_MCASTADDRA_FORCE
;
2688 pff
|= NVREG_PFF_ALWAYS
;
2689 spin_lock_irq(&np
->lock
);
2691 writel(addr
[0], base
+ NvRegMulticastAddrA
);
2692 writel(addr
[1], base
+ NvRegMulticastAddrB
);
2693 writel(mask
[0], base
+ NvRegMulticastMaskA
);
2694 writel(mask
[1], base
+ NvRegMulticastMaskB
);
2695 writel(pff
, base
+ NvRegPacketFilterFlags
);
2696 dprintk(KERN_INFO
"%s: reconfiguration for multicast lists.\n",
2699 spin_unlock_irq(&np
->lock
);
2702 static void nv_update_pause(struct net_device
*dev
, u32 pause_flags
)
2704 struct fe_priv
*np
= netdev_priv(dev
);
2705 u8 __iomem
*base
= get_hwbase(dev
);
2707 np
->pause_flags
&= ~(NV_PAUSEFRAME_TX_ENABLE
| NV_PAUSEFRAME_RX_ENABLE
);
2709 if (np
->pause_flags
& NV_PAUSEFRAME_RX_CAPABLE
) {
2710 u32 pff
= readl(base
+ NvRegPacketFilterFlags
) & ~NVREG_PFF_PAUSE_RX
;
2711 if (pause_flags
& NV_PAUSEFRAME_RX_ENABLE
) {
2712 writel(pff
|NVREG_PFF_PAUSE_RX
, base
+ NvRegPacketFilterFlags
);
2713 np
->pause_flags
|= NV_PAUSEFRAME_RX_ENABLE
;
2715 writel(pff
, base
+ NvRegPacketFilterFlags
);
2718 if (np
->pause_flags
& NV_PAUSEFRAME_TX_CAPABLE
) {
2719 u32 regmisc
= readl(base
+ NvRegMisc1
) & ~NVREG_MISC1_PAUSE_TX
;
2720 if (pause_flags
& NV_PAUSEFRAME_TX_ENABLE
) {
2721 writel(NVREG_TX_PAUSEFRAME_ENABLE
, base
+ NvRegTxPauseFrame
);
2722 writel(regmisc
|NVREG_MISC1_PAUSE_TX
, base
+ NvRegMisc1
);
2723 np
->pause_flags
|= NV_PAUSEFRAME_TX_ENABLE
;
2725 writel(NVREG_TX_PAUSEFRAME_DISABLE
, base
+ NvRegTxPauseFrame
);
2726 writel(regmisc
, base
+ NvRegMisc1
);
2732 * nv_update_linkspeed: Setup the MAC according to the link partner
2733 * @dev: Network device to be configured
2735 * The function queries the PHY and checks if there is a link partner.
2736 * If yes, then it sets up the MAC accordingly. Otherwise, the MAC is
2737 * set to 10 MBit HD.
2739 * The function returns 0 if there is no link partner and 1 if there is
2740 * a good link partner.
2742 static int nv_update_linkspeed(struct net_device
*dev
)
2744 struct fe_priv
*np
= netdev_priv(dev
);
2745 u8 __iomem
*base
= get_hwbase(dev
);
2748 int adv_lpa
, adv_pause
, lpa_pause
;
2749 int newls
= np
->linkspeed
;
2750 int newdup
= np
->duplex
;
2753 u32 control_1000
, status_1000
, phyreg
, pause_flags
, txreg
;
2755 /* BMSR_LSTATUS is latched, read it twice:
2756 * we want the current value.
2758 mii_rw(dev
, np
->phyaddr
, MII_BMSR
, MII_READ
);
2759 mii_status
= mii_rw(dev
, np
->phyaddr
, MII_BMSR
, MII_READ
);
2761 if (!(mii_status
& BMSR_LSTATUS
)) {
2762 dprintk(KERN_DEBUG
"%s: no link detected by phy - falling back to 10HD.\n",
2764 newls
= NVREG_LINKSPEED_FORCE
|NVREG_LINKSPEED_10
;
2770 if (np
->autoneg
== 0) {
2771 dprintk(KERN_DEBUG
"%s: nv_update_linkspeed: autoneg off, PHY set to 0x%04x.\n",
2772 dev
->name
, np
->fixed_mode
);
2773 if (np
->fixed_mode
& LPA_100FULL
) {
2774 newls
= NVREG_LINKSPEED_FORCE
|NVREG_LINKSPEED_100
;
2776 } else if (np
->fixed_mode
& LPA_100HALF
) {
2777 newls
= NVREG_LINKSPEED_FORCE
|NVREG_LINKSPEED_100
;
2779 } else if (np
->fixed_mode
& LPA_10FULL
) {
2780 newls
= NVREG_LINKSPEED_FORCE
|NVREG_LINKSPEED_10
;
2783 newls
= NVREG_LINKSPEED_FORCE
|NVREG_LINKSPEED_10
;
2789 /* check auto negotiation is complete */
2790 if (!(mii_status
& BMSR_ANEGCOMPLETE
)) {
2791 /* still in autonegotiation - configure nic for 10 MBit HD and wait. */
2792 newls
= NVREG_LINKSPEED_FORCE
|NVREG_LINKSPEED_10
;
2795 dprintk(KERN_DEBUG
"%s: autoneg not completed - falling back to 10HD.\n", dev
->name
);
2799 adv
= mii_rw(dev
, np
->phyaddr
, MII_ADVERTISE
, MII_READ
);
2800 lpa
= mii_rw(dev
, np
->phyaddr
, MII_LPA
, MII_READ
);
2801 dprintk(KERN_DEBUG
"%s: nv_update_linkspeed: PHY advertises 0x%04x, lpa 0x%04x.\n",
2802 dev
->name
, adv
, lpa
);
2805 if (np
->gigabit
== PHY_GIGABIT
) {
2806 control_1000
= mii_rw(dev
, np
->phyaddr
, MII_CTRL1000
, MII_READ
);
2807 status_1000
= mii_rw(dev
, np
->phyaddr
, MII_STAT1000
, MII_READ
);
2809 if ((control_1000
& ADVERTISE_1000FULL
) &&
2810 (status_1000
& LPA_1000FULL
)) {
2811 dprintk(KERN_DEBUG
"%s: nv_update_linkspeed: GBit ethernet detected.\n",
2813 newls
= NVREG_LINKSPEED_FORCE
|NVREG_LINKSPEED_1000
;
2819 /* FIXME: handle parallel detection properly */
2820 adv_lpa
= lpa
& adv
;
2821 if (adv_lpa
& LPA_100FULL
) {
2822 newls
= NVREG_LINKSPEED_FORCE
|NVREG_LINKSPEED_100
;
2824 } else if (adv_lpa
& LPA_100HALF
) {
2825 newls
= NVREG_LINKSPEED_FORCE
|NVREG_LINKSPEED_100
;
2827 } else if (adv_lpa
& LPA_10FULL
) {
2828 newls
= NVREG_LINKSPEED_FORCE
|NVREG_LINKSPEED_10
;
2830 } else if (adv_lpa
& LPA_10HALF
) {
2831 newls
= NVREG_LINKSPEED_FORCE
|NVREG_LINKSPEED_10
;
2834 dprintk(KERN_DEBUG
"%s: bad ability %04x - falling back to 10HD.\n", dev
->name
, adv_lpa
);
2835 newls
= NVREG_LINKSPEED_FORCE
|NVREG_LINKSPEED_10
;
2840 if (np
->duplex
== newdup
&& np
->linkspeed
== newls
)
2843 dprintk(KERN_INFO
"%s: changing link setting from %d/%d to %d/%d.\n",
2844 dev
->name
, np
->linkspeed
, np
->duplex
, newls
, newdup
);
2846 np
->duplex
= newdup
;
2847 np
->linkspeed
= newls
;
2849 if (np
->gigabit
== PHY_GIGABIT
) {
2850 phyreg
= readl(base
+ NvRegRandomSeed
);
2851 phyreg
&= ~(0x3FF00);
2852 if ((np
->linkspeed
& 0xFFF) == NVREG_LINKSPEED_10
)
2853 phyreg
|= NVREG_RNDSEED_FORCE3
;
2854 else if ((np
->linkspeed
& 0xFFF) == NVREG_LINKSPEED_100
)
2855 phyreg
|= NVREG_RNDSEED_FORCE2
;
2856 else if ((np
->linkspeed
& 0xFFF) == NVREG_LINKSPEED_1000
)
2857 phyreg
|= NVREG_RNDSEED_FORCE
;
2858 writel(phyreg
, base
+ NvRegRandomSeed
);
2861 phyreg
= readl(base
+ NvRegPhyInterface
);
2862 phyreg
&= ~(PHY_HALF
|PHY_100
|PHY_1000
);
2863 if (np
->duplex
== 0)
2865 if ((np
->linkspeed
& NVREG_LINKSPEED_MASK
) == NVREG_LINKSPEED_100
)
2867 else if ((np
->linkspeed
& NVREG_LINKSPEED_MASK
) == NVREG_LINKSPEED_1000
)
2869 writel(phyreg
, base
+ NvRegPhyInterface
);
2871 if (phyreg
& PHY_RGMII
) {
2872 if ((np
->linkspeed
& NVREG_LINKSPEED_MASK
) == NVREG_LINKSPEED_1000
)
2873 txreg
= NVREG_TX_DEFERRAL_RGMII_1000
;
2875 txreg
= NVREG_TX_DEFERRAL_RGMII_10_100
;
2877 txreg
= NVREG_TX_DEFERRAL_DEFAULT
;
2879 writel(txreg
, base
+ NvRegTxDeferral
);
2881 if (np
->desc_ver
== DESC_VER_1
) {
2882 txreg
= NVREG_TX_WM_DESC1_DEFAULT
;
2884 if ((np
->linkspeed
& NVREG_LINKSPEED_MASK
) == NVREG_LINKSPEED_1000
)
2885 txreg
= NVREG_TX_WM_DESC2_3_1000
;
2887 txreg
= NVREG_TX_WM_DESC2_3_DEFAULT
;
2889 writel(txreg
, base
+ NvRegTxWatermark
);
2891 writel(NVREG_MISC1_FORCE
| ( np
->duplex
? 0 : NVREG_MISC1_HD
),
2894 writel(np
->linkspeed
, base
+ NvRegLinkSpeed
);
2898 /* setup pause frame */
2899 if (np
->duplex
!= 0) {
2900 if (np
->autoneg
&& np
->pause_flags
& NV_PAUSEFRAME_AUTONEG
) {
2901 adv_pause
= adv
& (ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
);
2902 lpa_pause
= lpa
& (LPA_PAUSE_CAP
| LPA_PAUSE_ASYM
);
2904 switch (adv_pause
) {
2905 case ADVERTISE_PAUSE_CAP
:
2906 if (lpa_pause
& LPA_PAUSE_CAP
) {
2907 pause_flags
|= NV_PAUSEFRAME_RX_ENABLE
;
2908 if (np
->pause_flags
& NV_PAUSEFRAME_TX_REQ
)
2909 pause_flags
|= NV_PAUSEFRAME_TX_ENABLE
;
2912 case ADVERTISE_PAUSE_ASYM
:
2913 if (lpa_pause
== (LPA_PAUSE_CAP
| LPA_PAUSE_ASYM
))
2915 pause_flags
|= NV_PAUSEFRAME_TX_ENABLE
;
2918 case ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
:
2919 if (lpa_pause
& LPA_PAUSE_CAP
)
2921 pause_flags
|= NV_PAUSEFRAME_RX_ENABLE
;
2922 if (np
->pause_flags
& NV_PAUSEFRAME_TX_REQ
)
2923 pause_flags
|= NV_PAUSEFRAME_TX_ENABLE
;
2925 if (lpa_pause
== LPA_PAUSE_ASYM
)
2927 pause_flags
|= NV_PAUSEFRAME_RX_ENABLE
;
2932 pause_flags
= np
->pause_flags
;
2935 nv_update_pause(dev
, pause_flags
);
2940 static void nv_linkchange(struct net_device
*dev
)
2942 if (nv_update_linkspeed(dev
)) {
2943 if (!netif_carrier_ok(dev
)) {
2944 netif_carrier_on(dev
);
2945 printk(KERN_INFO
"%s: link up.\n", dev
->name
);
2949 if (netif_carrier_ok(dev
)) {
2950 netif_carrier_off(dev
);
2951 printk(KERN_INFO
"%s: link down.\n", dev
->name
);
2957 static void nv_link_irq(struct net_device
*dev
)
2959 u8 __iomem
*base
= get_hwbase(dev
);
2962 miistat
= readl(base
+ NvRegMIIStatus
);
2963 writel(NVREG_MIISTAT_MASK
, base
+ NvRegMIIStatus
);
2964 dprintk(KERN_INFO
"%s: link change irq, status 0x%x.\n", dev
->name
, miistat
);
2966 if (miistat
& (NVREG_MIISTAT_LINKCHANGE
))
2968 dprintk(KERN_DEBUG
"%s: link change notification done.\n", dev
->name
);
2971 static irqreturn_t
nv_nic_irq(int foo
, void *data
)
2973 struct net_device
*dev
= (struct net_device
*) data
;
2974 struct fe_priv
*np
= netdev_priv(dev
);
2975 u8 __iomem
*base
= get_hwbase(dev
);
2979 dprintk(KERN_DEBUG
"%s: nv_nic_irq\n", dev
->name
);
2982 if (!(np
->msi_flags
& NV_MSI_X_ENABLED
)) {
2983 events
= readl(base
+ NvRegIrqStatus
) & NVREG_IRQSTAT_MASK
;
2984 writel(NVREG_IRQSTAT_MASK
, base
+ NvRegIrqStatus
);
2986 events
= readl(base
+ NvRegMSIXIrqStatus
) & NVREG_IRQSTAT_MASK
;
2987 writel(NVREG_IRQSTAT_MASK
, base
+ NvRegMSIXIrqStatus
);
2989 dprintk(KERN_DEBUG
"%s: irq: %08x\n", dev
->name
, events
);
2990 if (!(events
& np
->irqmask
))
2993 spin_lock(&np
->lock
);
2995 spin_unlock(&np
->lock
);
2997 #ifdef CONFIG_FORCEDETH_NAPI
2998 if (events
& NVREG_IRQ_RX_ALL
) {
2999 netif_rx_schedule(dev
);
3001 /* Disable furthur receive irq's */
3002 spin_lock(&np
->lock
);
3003 np
->irqmask
&= ~NVREG_IRQ_RX_ALL
;
3005 if (np
->msi_flags
& NV_MSI_X_ENABLED
)
3006 writel(NVREG_IRQ_RX_ALL
, base
+ NvRegIrqMask
);
3008 writel(np
->irqmask
, base
+ NvRegIrqMask
);
3009 spin_unlock(&np
->lock
);
3012 if (nv_rx_process(dev
, dev
->weight
)) {
3013 if (unlikely(nv_alloc_rx(dev
))) {
3014 spin_lock(&np
->lock
);
3015 if (!np
->in_shutdown
)
3016 mod_timer(&np
->oom_kick
, jiffies
+ OOM_REFILL
);
3017 spin_unlock(&np
->lock
);
3021 if (unlikely(events
& NVREG_IRQ_LINK
)) {
3022 spin_lock(&np
->lock
);
3024 spin_unlock(&np
->lock
);
3026 if (unlikely(np
->need_linktimer
&& time_after(jiffies
, np
->link_timeout
))) {
3027 spin_lock(&np
->lock
);
3029 spin_unlock(&np
->lock
);
3030 np
->link_timeout
= jiffies
+ LINK_TIMEOUT
;
3032 if (unlikely(events
& (NVREG_IRQ_TX_ERR
))) {
3033 dprintk(KERN_DEBUG
"%s: received irq with events 0x%x. Probably TX fail.\n",
3036 if (unlikely(events
& (NVREG_IRQ_UNKNOWN
))) {
3037 printk(KERN_DEBUG
"%s: received irq with unknown events 0x%x. Please report\n",
3040 if (unlikely(events
& NVREG_IRQ_RECOVER_ERROR
)) {
3041 spin_lock(&np
->lock
);
3042 /* disable interrupts on the nic */
3043 if (!(np
->msi_flags
& NV_MSI_X_ENABLED
))
3044 writel(0, base
+ NvRegIrqMask
);
3046 writel(np
->irqmask
, base
+ NvRegIrqMask
);
3049 if (!np
->in_shutdown
) {
3050 np
->nic_poll_irq
= np
->irqmask
;
3051 np
->recover_error
= 1;
3052 mod_timer(&np
->nic_poll
, jiffies
+ POLL_WAIT
);
3054 spin_unlock(&np
->lock
);
3057 if (unlikely(i
> max_interrupt_work
)) {
3058 spin_lock(&np
->lock
);
3059 /* disable interrupts on the nic */
3060 if (!(np
->msi_flags
& NV_MSI_X_ENABLED
))
3061 writel(0, base
+ NvRegIrqMask
);
3063 writel(np
->irqmask
, base
+ NvRegIrqMask
);
3066 if (!np
->in_shutdown
) {
3067 np
->nic_poll_irq
= np
->irqmask
;
3068 mod_timer(&np
->nic_poll
, jiffies
+ POLL_WAIT
);
3070 printk(KERN_DEBUG
"%s: too many iterations (%d) in nv_nic_irq.\n", dev
->name
, i
);
3071 spin_unlock(&np
->lock
);
3076 dprintk(KERN_DEBUG
"%s: nv_nic_irq completed\n", dev
->name
);
3078 return IRQ_RETVAL(i
);
3081 #define TX_WORK_PER_LOOP 64
3082 #define RX_WORK_PER_LOOP 64
3084 * All _optimized functions are used to help increase performance
3085 * (reduce CPU and increase throughput). They use descripter version 3,
3086 * compiler directives, and reduce memory accesses.
3088 static irqreturn_t
nv_nic_irq_optimized(int foo
, void *data
)
3090 struct net_device
*dev
= (struct net_device
*) data
;
3091 struct fe_priv
*np
= netdev_priv(dev
);
3092 u8 __iomem
*base
= get_hwbase(dev
);
3096 dprintk(KERN_DEBUG
"%s: nv_nic_irq_optimized\n", dev
->name
);
3099 if (!(np
->msi_flags
& NV_MSI_X_ENABLED
)) {
3100 events
= readl(base
+ NvRegIrqStatus
) & NVREG_IRQSTAT_MASK
;
3101 writel(NVREG_IRQSTAT_MASK
, base
+ NvRegIrqStatus
);
3103 events
= readl(base
+ NvRegMSIXIrqStatus
) & NVREG_IRQSTAT_MASK
;
3104 writel(NVREG_IRQSTAT_MASK
, base
+ NvRegMSIXIrqStatus
);
3106 dprintk(KERN_DEBUG
"%s: irq: %08x\n", dev
->name
, events
);
3107 if (!(events
& np
->irqmask
))
3110 spin_lock(&np
->lock
);
3111 nv_tx_done_optimized(dev
, TX_WORK_PER_LOOP
);
3112 spin_unlock(&np
->lock
);
3114 #ifdef CONFIG_FORCEDETH_NAPI
3115 if (events
& NVREG_IRQ_RX_ALL
) {
3116 netif_rx_schedule(dev
);
3118 /* Disable furthur receive irq's */
3119 spin_lock(&np
->lock
);
3120 np
->irqmask
&= ~NVREG_IRQ_RX_ALL
;
3122 if (np
->msi_flags
& NV_MSI_X_ENABLED
)
3123 writel(NVREG_IRQ_RX_ALL
, base
+ NvRegIrqMask
);
3125 writel(np
->irqmask
, base
+ NvRegIrqMask
);
3126 spin_unlock(&np
->lock
);
3129 if (nv_rx_process_optimized(dev
, dev
->weight
)) {
3130 if (unlikely(nv_alloc_rx_optimized(dev
))) {
3131 spin_lock(&np
->lock
);
3132 if (!np
->in_shutdown
)
3133 mod_timer(&np
->oom_kick
, jiffies
+ OOM_REFILL
);
3134 spin_unlock(&np
->lock
);
3138 if (unlikely(events
& NVREG_IRQ_LINK
)) {
3139 spin_lock(&np
->lock
);
3141 spin_unlock(&np
->lock
);
3143 if (unlikely(np
->need_linktimer
&& time_after(jiffies
, np
->link_timeout
))) {
3144 spin_lock(&np
->lock
);
3146 spin_unlock(&np
->lock
);
3147 np
->link_timeout
= jiffies
+ LINK_TIMEOUT
;
3149 if (unlikely(events
& (NVREG_IRQ_TX_ERR
))) {
3150 dprintk(KERN_DEBUG
"%s: received irq with events 0x%x. Probably TX fail.\n",
3153 if (unlikely(events
& (NVREG_IRQ_UNKNOWN
))) {
3154 printk(KERN_DEBUG
"%s: received irq with unknown events 0x%x. Please report\n",
3157 if (unlikely(events
& NVREG_IRQ_RECOVER_ERROR
)) {
3158 spin_lock(&np
->lock
);
3159 /* disable interrupts on the nic */
3160 if (!(np
->msi_flags
& NV_MSI_X_ENABLED
))
3161 writel(0, base
+ NvRegIrqMask
);
3163 writel(np
->irqmask
, base
+ NvRegIrqMask
);
3166 if (!np
->in_shutdown
) {
3167 np
->nic_poll_irq
= np
->irqmask
;
3168 np
->recover_error
= 1;
3169 mod_timer(&np
->nic_poll
, jiffies
+ POLL_WAIT
);
3171 spin_unlock(&np
->lock
);
3175 if (unlikely(i
> max_interrupt_work
)) {
3176 spin_lock(&np
->lock
);
3177 /* disable interrupts on the nic */
3178 if (!(np
->msi_flags
& NV_MSI_X_ENABLED
))
3179 writel(0, base
+ NvRegIrqMask
);
3181 writel(np
->irqmask
, base
+ NvRegIrqMask
);
3184 if (!np
->in_shutdown
) {
3185 np
->nic_poll_irq
= np
->irqmask
;
3186 mod_timer(&np
->nic_poll
, jiffies
+ POLL_WAIT
);
3188 printk(KERN_DEBUG
"%s: too many iterations (%d) in nv_nic_irq.\n", dev
->name
, i
);
3189 spin_unlock(&np
->lock
);
3194 dprintk(KERN_DEBUG
"%s: nv_nic_irq_optimized completed\n", dev
->name
);
3196 return IRQ_RETVAL(i
);
3199 static irqreturn_t
nv_nic_irq_tx(int foo
, void *data
)
3201 struct net_device
*dev
= (struct net_device
*) data
;
3202 struct fe_priv
*np
= netdev_priv(dev
);
3203 u8 __iomem
*base
= get_hwbase(dev
);
3206 unsigned long flags
;
3208 dprintk(KERN_DEBUG
"%s: nv_nic_irq_tx\n", dev
->name
);
3211 events
= readl(base
+ NvRegMSIXIrqStatus
) & NVREG_IRQ_TX_ALL
;
3212 writel(NVREG_IRQ_TX_ALL
, base
+ NvRegMSIXIrqStatus
);
3213 dprintk(KERN_DEBUG
"%s: tx irq: %08x\n", dev
->name
, events
);
3214 if (!(events
& np
->irqmask
))
3217 spin_lock_irqsave(&np
->lock
, flags
);
3218 nv_tx_done_optimized(dev
, TX_WORK_PER_LOOP
);
3219 spin_unlock_irqrestore(&np
->lock
, flags
);
3221 if (unlikely(events
& (NVREG_IRQ_TX_ERR
))) {
3222 dprintk(KERN_DEBUG
"%s: received irq with events 0x%x. Probably TX fail.\n",
3225 if (unlikely(i
> max_interrupt_work
)) {
3226 spin_lock_irqsave(&np
->lock
, flags
);
3227 /* disable interrupts on the nic */
3228 writel(NVREG_IRQ_TX_ALL
, base
+ NvRegIrqMask
);
3231 if (!np
->in_shutdown
) {
3232 np
->nic_poll_irq
|= NVREG_IRQ_TX_ALL
;
3233 mod_timer(&np
->nic_poll
, jiffies
+ POLL_WAIT
);
3235 printk(KERN_DEBUG
"%s: too many iterations (%d) in nv_nic_irq_tx.\n", dev
->name
, i
);
3236 spin_unlock_irqrestore(&np
->lock
, flags
);
3241 dprintk(KERN_DEBUG
"%s: nv_nic_irq_tx completed\n", dev
->name
);
3243 return IRQ_RETVAL(i
);
3246 #ifdef CONFIG_FORCEDETH_NAPI
3247 static int nv_napi_poll(struct net_device
*dev
, int *budget
)
3249 int pkts
, limit
= min(*budget
, dev
->quota
);
3250 struct fe_priv
*np
= netdev_priv(dev
);
3251 u8 __iomem
*base
= get_hwbase(dev
);
3252 unsigned long flags
;
3255 if (np
->desc_ver
== DESC_VER_1
|| np
->desc_ver
== DESC_VER_2
) {
3256 pkts
= nv_rx_process(dev
, limit
);
3257 retcode
= nv_alloc_rx(dev
);
3259 pkts
= nv_rx_process_optimized(dev
, limit
);
3260 retcode
= nv_alloc_rx_optimized(dev
);
3264 spin_lock_irqsave(&np
->lock
, flags
);
3265 if (!np
->in_shutdown
)
3266 mod_timer(&np
->oom_kick
, jiffies
+ OOM_REFILL
);
3267 spin_unlock_irqrestore(&np
->lock
, flags
);
3271 /* all done, no more packets present */
3272 netif_rx_complete(dev
);
3274 /* re-enable receive interrupts */
3275 spin_lock_irqsave(&np
->lock
, flags
);
3277 np
->irqmask
|= NVREG_IRQ_RX_ALL
;
3278 if (np
->msi_flags
& NV_MSI_X_ENABLED
)
3279 writel(NVREG_IRQ_RX_ALL
, base
+ NvRegIrqMask
);
3281 writel(np
->irqmask
, base
+ NvRegIrqMask
);
3283 spin_unlock_irqrestore(&np
->lock
, flags
);
3286 /* used up our quantum, so reschedule */
3294 #ifdef CONFIG_FORCEDETH_NAPI
3295 static irqreturn_t
nv_nic_irq_rx(int foo
, void *data
)
3297 struct net_device
*dev
= (struct net_device
*) data
;
3298 u8 __iomem
*base
= get_hwbase(dev
);
3301 events
= readl(base
+ NvRegMSIXIrqStatus
) & NVREG_IRQ_RX_ALL
;
3302 writel(NVREG_IRQ_RX_ALL
, base
+ NvRegMSIXIrqStatus
);
3305 netif_rx_schedule(dev
);
3306 /* disable receive interrupts on the nic */
3307 writel(NVREG_IRQ_RX_ALL
, base
+ NvRegIrqMask
);
3313 static irqreturn_t
nv_nic_irq_rx(int foo
, void *data
)
3315 struct net_device
*dev
= (struct net_device
*) data
;
3316 struct fe_priv
*np
= netdev_priv(dev
);
3317 u8 __iomem
*base
= get_hwbase(dev
);
3320 unsigned long flags
;
3322 dprintk(KERN_DEBUG
"%s: nv_nic_irq_rx\n", dev
->name
);
3325 events
= readl(base
+ NvRegMSIXIrqStatus
) & NVREG_IRQ_RX_ALL
;
3326 writel(NVREG_IRQ_RX_ALL
, base
+ NvRegMSIXIrqStatus
);
3327 dprintk(KERN_DEBUG
"%s: rx irq: %08x\n", dev
->name
, events
);
3328 if (!(events
& np
->irqmask
))
3331 if (nv_rx_process_optimized(dev
, dev
->weight
)) {
3332 if (unlikely(nv_alloc_rx_optimized(dev
))) {
3333 spin_lock_irqsave(&np
->lock
, flags
);
3334 if (!np
->in_shutdown
)
3335 mod_timer(&np
->oom_kick
, jiffies
+ OOM_REFILL
);
3336 spin_unlock_irqrestore(&np
->lock
, flags
);
3340 if (unlikely(i
> max_interrupt_work
)) {
3341 spin_lock_irqsave(&np
->lock
, flags
);
3342 /* disable interrupts on the nic */
3343 writel(NVREG_IRQ_RX_ALL
, base
+ NvRegIrqMask
);
3346 if (!np
->in_shutdown
) {
3347 np
->nic_poll_irq
|= NVREG_IRQ_RX_ALL
;
3348 mod_timer(&np
->nic_poll
, jiffies
+ POLL_WAIT
);
3350 printk(KERN_DEBUG
"%s: too many iterations (%d) in nv_nic_irq_rx.\n", dev
->name
, i
);
3351 spin_unlock_irqrestore(&np
->lock
, flags
);
3355 dprintk(KERN_DEBUG
"%s: nv_nic_irq_rx completed\n", dev
->name
);
3357 return IRQ_RETVAL(i
);
3361 static irqreturn_t
nv_nic_irq_other(int foo
, void *data
)
3363 struct net_device
*dev
= (struct net_device
*) data
;
3364 struct fe_priv
*np
= netdev_priv(dev
);
3365 u8 __iomem
*base
= get_hwbase(dev
);
3368 unsigned long flags
;
3370 dprintk(KERN_DEBUG
"%s: nv_nic_irq_other\n", dev
->name
);
3373 events
= readl(base
+ NvRegMSIXIrqStatus
) & NVREG_IRQ_OTHER
;
3374 writel(NVREG_IRQ_OTHER
, base
+ NvRegMSIXIrqStatus
);
3375 dprintk(KERN_DEBUG
"%s: irq: %08x\n", dev
->name
, events
);
3376 if (!(events
& np
->irqmask
))
3379 /* check tx in case we reached max loop limit in tx isr */
3380 spin_lock_irqsave(&np
->lock
, flags
);
3381 nv_tx_done_optimized(dev
, TX_WORK_PER_LOOP
);
3382 spin_unlock_irqrestore(&np
->lock
, flags
);
3384 if (events
& NVREG_IRQ_LINK
) {
3385 spin_lock_irqsave(&np
->lock
, flags
);
3387 spin_unlock_irqrestore(&np
->lock
, flags
);
3389 if (np
->need_linktimer
&& time_after(jiffies
, np
->link_timeout
)) {
3390 spin_lock_irqsave(&np
->lock
, flags
);
3392 spin_unlock_irqrestore(&np
->lock
, flags
);
3393 np
->link_timeout
= jiffies
+ LINK_TIMEOUT
;
3395 if (events
& NVREG_IRQ_RECOVER_ERROR
) {
3396 spin_lock_irq(&np
->lock
);
3397 /* disable interrupts on the nic */
3398 writel(NVREG_IRQ_OTHER
, base
+ NvRegIrqMask
);
3401 if (!np
->in_shutdown
) {
3402 np
->nic_poll_irq
|= NVREG_IRQ_OTHER
;
3403 np
->recover_error
= 1;
3404 mod_timer(&np
->nic_poll
, jiffies
+ POLL_WAIT
);
3406 spin_unlock_irq(&np
->lock
);
3409 if (events
& (NVREG_IRQ_UNKNOWN
)) {
3410 printk(KERN_DEBUG
"%s: received irq with unknown events 0x%x. Please report\n",
3413 if (unlikely(i
> max_interrupt_work
)) {
3414 spin_lock_irqsave(&np
->lock
, flags
);
3415 /* disable interrupts on the nic */
3416 writel(NVREG_IRQ_OTHER
, base
+ NvRegIrqMask
);
3419 if (!np
->in_shutdown
) {
3420 np
->nic_poll_irq
|= NVREG_IRQ_OTHER
;
3421 mod_timer(&np
->nic_poll
, jiffies
+ POLL_WAIT
);
3423 printk(KERN_DEBUG
"%s: too many iterations (%d) in nv_nic_irq_other.\n", dev
->name
, i
);
3424 spin_unlock_irqrestore(&np
->lock
, flags
);
3429 dprintk(KERN_DEBUG
"%s: nv_nic_irq_other completed\n", dev
->name
);
3431 return IRQ_RETVAL(i
);
3434 static irqreturn_t
nv_nic_irq_test(int foo
, void *data
)
3436 struct net_device
*dev
= (struct net_device
*) data
;
3437 struct fe_priv
*np
= netdev_priv(dev
);
3438 u8 __iomem
*base
= get_hwbase(dev
);
3441 dprintk(KERN_DEBUG
"%s: nv_nic_irq_test\n", dev
->name
);
3443 if (!(np
->msi_flags
& NV_MSI_X_ENABLED
)) {
3444 events
= readl(base
+ NvRegIrqStatus
) & NVREG_IRQSTAT_MASK
;
3445 writel(NVREG_IRQ_TIMER
, base
+ NvRegIrqStatus
);
3447 events
= readl(base
+ NvRegMSIXIrqStatus
) & NVREG_IRQSTAT_MASK
;
3448 writel(NVREG_IRQ_TIMER
, base
+ NvRegMSIXIrqStatus
);
3451 dprintk(KERN_DEBUG
"%s: irq: %08x\n", dev
->name
, events
);
3452 if (!(events
& NVREG_IRQ_TIMER
))
3453 return IRQ_RETVAL(0);
3455 spin_lock(&np
->lock
);
3457 spin_unlock(&np
->lock
);
3459 dprintk(KERN_DEBUG
"%s: nv_nic_irq_test completed\n", dev
->name
);
3461 return IRQ_RETVAL(1);
3464 static void set_msix_vector_map(struct net_device
*dev
, u32 vector
, u32 irqmask
)
3466 u8 __iomem
*base
= get_hwbase(dev
);
3470 /* Each interrupt bit can be mapped to a MSIX vector (4 bits).
3471 * MSIXMap0 represents the first 8 interrupts and MSIXMap1 represents
3472 * the remaining 8 interrupts.
3474 for (i
= 0; i
< 8; i
++) {
3475 if ((irqmask
>> i
) & 0x1) {
3476 msixmap
|= vector
<< (i
<< 2);
3479 writel(readl(base
+ NvRegMSIXMap0
) | msixmap
, base
+ NvRegMSIXMap0
);
3482 for (i
= 0; i
< 8; i
++) {
3483 if ((irqmask
>> (i
+ 8)) & 0x1) {
3484 msixmap
|= vector
<< (i
<< 2);
3487 writel(readl(base
+ NvRegMSIXMap1
) | msixmap
, base
+ NvRegMSIXMap1
);
3490 static int nv_request_irq(struct net_device
*dev
, int intr_test
)
3492 struct fe_priv
*np
= get_nvpriv(dev
);
3493 u8 __iomem
*base
= get_hwbase(dev
);
3496 irqreturn_t (*handler
)(int foo
, void *data
);
3499 handler
= nv_nic_irq_test
;
3501 if (np
->desc_ver
== DESC_VER_3
)
3502 handler
= nv_nic_irq_optimized
;
3504 handler
= nv_nic_irq
;
3507 if (np
->msi_flags
& NV_MSI_X_CAPABLE
) {
3508 for (i
= 0; i
< (np
->msi_flags
& NV_MSI_X_VECTORS_MASK
); i
++) {
3509 np
->msi_x_entry
[i
].entry
= i
;
3511 if ((ret
= pci_enable_msix(np
->pci_dev
, np
->msi_x_entry
, (np
->msi_flags
& NV_MSI_X_VECTORS_MASK
))) == 0) {
3512 np
->msi_flags
|= NV_MSI_X_ENABLED
;
3513 if (optimization_mode
== NV_OPTIMIZATION_MODE_THROUGHPUT
&& !intr_test
) {
3514 /* Request irq for rx handling */
3515 if (request_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_RX
].vector
, &nv_nic_irq_rx
, IRQF_SHARED
, dev
->name
, dev
) != 0) {
3516 printk(KERN_INFO
"forcedeth: request_irq failed for rx %d\n", ret
);
3517 pci_disable_msix(np
->pci_dev
);
3518 np
->msi_flags
&= ~NV_MSI_X_ENABLED
;
3521 /* Request irq for tx handling */
3522 if (request_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_TX
].vector
, &nv_nic_irq_tx
, IRQF_SHARED
, dev
->name
, dev
) != 0) {
3523 printk(KERN_INFO
"forcedeth: request_irq failed for tx %d\n", ret
);
3524 pci_disable_msix(np
->pci_dev
);
3525 np
->msi_flags
&= ~NV_MSI_X_ENABLED
;
3528 /* Request irq for link and timer handling */
3529 if (request_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_OTHER
].vector
, &nv_nic_irq_other
, IRQF_SHARED
, dev
->name
, dev
) != 0) {
3530 printk(KERN_INFO
"forcedeth: request_irq failed for link %d\n", ret
);
3531 pci_disable_msix(np
->pci_dev
);
3532 np
->msi_flags
&= ~NV_MSI_X_ENABLED
;
3535 /* map interrupts to their respective vector */
3536 writel(0, base
+ NvRegMSIXMap0
);
3537 writel(0, base
+ NvRegMSIXMap1
);
3538 set_msix_vector_map(dev
, NV_MSI_X_VECTOR_RX
, NVREG_IRQ_RX_ALL
);
3539 set_msix_vector_map(dev
, NV_MSI_X_VECTOR_TX
, NVREG_IRQ_TX_ALL
);
3540 set_msix_vector_map(dev
, NV_MSI_X_VECTOR_OTHER
, NVREG_IRQ_OTHER
);
3542 /* Request irq for all interrupts */
3543 if (request_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_ALL
].vector
, handler
, IRQF_SHARED
, dev
->name
, dev
) != 0) {
3544 printk(KERN_INFO
"forcedeth: request_irq failed %d\n", ret
);
3545 pci_disable_msix(np
->pci_dev
);
3546 np
->msi_flags
&= ~NV_MSI_X_ENABLED
;
3550 /* map interrupts to vector 0 */
3551 writel(0, base
+ NvRegMSIXMap0
);
3552 writel(0, base
+ NvRegMSIXMap1
);
3556 if (ret
!= 0 && np
->msi_flags
& NV_MSI_CAPABLE
) {
3557 if ((ret
= pci_enable_msi(np
->pci_dev
)) == 0) {
3558 np
->msi_flags
|= NV_MSI_ENABLED
;
3559 if (request_irq(np
->pci_dev
->irq
, handler
, IRQF_SHARED
, dev
->name
, dev
) != 0) {
3560 printk(KERN_INFO
"forcedeth: request_irq failed %d\n", ret
);
3561 pci_disable_msi(np
->pci_dev
);
3562 np
->msi_flags
&= ~NV_MSI_ENABLED
;
3566 /* map interrupts to vector 0 */
3567 writel(0, base
+ NvRegMSIMap0
);
3568 writel(0, base
+ NvRegMSIMap1
);
3569 /* enable msi vector 0 */
3570 writel(NVREG_MSI_VECTOR_0_ENABLED
, base
+ NvRegMSIIrqMask
);
3574 if (request_irq(np
->pci_dev
->irq
, handler
, IRQF_SHARED
, dev
->name
, dev
) != 0)
3581 free_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_TX
].vector
, dev
);
3583 free_irq(np
->msi_x_entry
[NV_MSI_X_VECTOR_RX
].vector
, dev
);
3588 static void nv_free_irq(struct net_device
*dev
)
3590 struct fe_priv
*np
= get_nvpriv(dev
);
3593 if (np
->msi_flags
& NV_MSI_X_ENABLED
) {
3594 for (i
= 0; i
< (np
->msi_flags
& NV_MSI_X_VECTORS_MASK
); i
++) {
3595 free_irq(np
->msi_x_entry
[i
].vector
, dev
);
3597 pci_disable_msix(np
->pci_dev
);
3598 np
->msi_flags
&= ~NV_MSI_X_ENABLED
;
3600 free_irq(np
->pci_dev
->irq
, dev
);
3601 if (np
->msi_flags
& NV_MSI_ENABLED
) {
3602 pci_disable_msi(np
->pci_dev
);
3603 np
->msi_flags
&= ~NV_MSI_ENABLED
;
3608 static void nv_do_nic_poll(unsigned long data
)
3610 struct net_device
*dev
= (struct net_device
*) data
;
3611 struct fe_priv
*np
= netdev_priv(dev
);
3612 u8 __iomem
*base
= get_hwbase(dev
);
3616 * First disable irq(s) and then
3617 * reenable interrupts on the nic, we have to do this before calling
3618 * nv_nic_irq because that may decide to do otherwise
3621 if (!using_multi_irqs(dev
)) {
3622 if (np
->msi_flags
& NV_MSI_X_ENABLED
)
3623 disable_irq_lockdep(np
->msi_x_entry
[NV_MSI_X_VECTOR_ALL
].vector
);
3625 disable_irq_lockdep(dev
->irq
);
3628 if (np
->nic_poll_irq
& NVREG_IRQ_RX_ALL
) {
3629 disable_irq_lockdep(np
->msi_x_entry
[NV_MSI_X_VECTOR_RX
].vector
);
3630 mask
|= NVREG_IRQ_RX_ALL
;
3632 if (np
->nic_poll_irq
& NVREG_IRQ_TX_ALL
) {
3633 disable_irq_lockdep(np
->msi_x_entry
[NV_MSI_X_VECTOR_TX
].vector
);
3634 mask
|= NVREG_IRQ_TX_ALL
;
3636 if (np
->nic_poll_irq
& NVREG_IRQ_OTHER
) {
3637 disable_irq_lockdep(np
->msi_x_entry
[NV_MSI_X_VECTOR_OTHER
].vector
);
3638 mask
|= NVREG_IRQ_OTHER
;
3641 np
->nic_poll_irq
= 0;
3643 if (np
->recover_error
) {
3644 np
->recover_error
= 0;
3645 printk(KERN_INFO
"forcedeth: MAC in recoverable error state\n");
3646 if (netif_running(dev
)) {
3647 netif_tx_lock_bh(dev
);
3648 spin_lock(&np
->lock
);
3653 /* drain rx queue */
3656 /* reinit driver view of the rx queue */
3658 if (nv_init_ring(dev
)) {
3659 if (!np
->in_shutdown
)
3660 mod_timer(&np
->oom_kick
, jiffies
+ OOM_REFILL
);
3662 /* reinit nic view of the rx queue */
3663 writel(np
->rx_buf_sz
, base
+ NvRegOffloadConfig
);
3664 setup_hw_rings(dev
, NV_SETUP_RX_RING
| NV_SETUP_TX_RING
);
3665 writel( ((np
->rx_ring_size
-1) << NVREG_RINGSZ_RXSHIFT
) + ((np
->tx_ring_size
-1) << NVREG_RINGSZ_TXSHIFT
),
3666 base
+ NvRegRingSizes
);
3668 writel(NVREG_TXRXCTL_KICK
|np
->txrxctl_bits
, get_hwbase(dev
) + NvRegTxRxControl
);
3671 /* restart rx engine */
3674 spin_unlock(&np
->lock
);
3675 netif_tx_unlock_bh(dev
);
3679 /* FIXME: Do we need synchronize_irq(dev->irq) here? */
3681 writel(mask
, base
+ NvRegIrqMask
);
3684 if (!using_multi_irqs(dev
)) {
3685 if (np
->desc_ver
== DESC_VER_3
)
3686 nv_nic_irq_optimized(0, dev
);
3689 if (np
->msi_flags
& NV_MSI_X_ENABLED
)
3690 enable_irq_lockdep(np
->msi_x_entry
[NV_MSI_X_VECTOR_ALL
].vector
);
3692 enable_irq_lockdep(dev
->irq
);
3694 if (np
->nic_poll_irq
& NVREG_IRQ_RX_ALL
) {
3695 nv_nic_irq_rx(0, dev
);
3696 enable_irq_lockdep(np
->msi_x_entry
[NV_MSI_X_VECTOR_RX
].vector
);
3698 if (np
->nic_poll_irq
& NVREG_IRQ_TX_ALL
) {
3699 nv_nic_irq_tx(0, dev
);
3700 enable_irq_lockdep(np
->msi_x_entry
[NV_MSI_X_VECTOR_TX
].vector
);
3702 if (np
->nic_poll_irq
& NVREG_IRQ_OTHER
) {
3703 nv_nic_irq_other(0, dev
);
3704 enable_irq_lockdep(np
->msi_x_entry
[NV_MSI_X_VECTOR_OTHER
].vector
);
3709 #ifdef CONFIG_NET_POLL_CONTROLLER
3710 static void nv_poll_controller(struct net_device
*dev
)
3712 nv_do_nic_poll((unsigned long) dev
);
3716 static void nv_do_stats_poll(unsigned long data
)
3718 struct net_device
*dev
= (struct net_device
*) data
;
3719 struct fe_priv
*np
= netdev_priv(dev
);
3721 nv_get_hw_stats(dev
);
3723 if (!np
->in_shutdown
)
3724 mod_timer(&np
->stats_poll
, jiffies
+ STATS_INTERVAL
);
3727 static void nv_get_drvinfo(struct net_device
*dev
, struct ethtool_drvinfo
*info
)
3729 struct fe_priv
*np
= netdev_priv(dev
);
3730 strcpy(info
->driver
, "forcedeth");
3731 strcpy(info
->version
, FORCEDETH_VERSION
);
3732 strcpy(info
->bus_info
, pci_name(np
->pci_dev
));
3735 static void nv_get_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wolinfo
)
3737 struct fe_priv
*np
= netdev_priv(dev
);
3738 wolinfo
->supported
= WAKE_MAGIC
;
3740 spin_lock_irq(&np
->lock
);
3742 wolinfo
->wolopts
= WAKE_MAGIC
;
3743 spin_unlock_irq(&np
->lock
);
3746 static int nv_set_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wolinfo
)
3748 struct fe_priv
*np
= netdev_priv(dev
);
3749 u8 __iomem
*base
= get_hwbase(dev
);
3752 if (wolinfo
->wolopts
== 0) {
3754 } else if (wolinfo
->wolopts
& WAKE_MAGIC
) {
3756 flags
= NVREG_WAKEUPFLAGS_ENABLE
;
3758 if (netif_running(dev
)) {
3759 spin_lock_irq(&np
->lock
);
3760 writel(flags
, base
+ NvRegWakeUpFlags
);
3761 spin_unlock_irq(&np
->lock
);
3766 static int nv_get_settings(struct net_device
*dev
, struct ethtool_cmd
*ecmd
)
3768 struct fe_priv
*np
= netdev_priv(dev
);
3771 spin_lock_irq(&np
->lock
);
3772 ecmd
->port
= PORT_MII
;
3773 if (!netif_running(dev
)) {
3774 /* We do not track link speed / duplex setting if the
3775 * interface is disabled. Force a link check */
3776 if (nv_update_linkspeed(dev
)) {
3777 if (!netif_carrier_ok(dev
))
3778 netif_carrier_on(dev
);
3780 if (netif_carrier_ok(dev
))
3781 netif_carrier_off(dev
);
3785 if (netif_carrier_ok(dev
)) {
3786 switch(np
->linkspeed
& (NVREG_LINKSPEED_MASK
)) {
3787 case NVREG_LINKSPEED_10
:
3788 ecmd
->speed
= SPEED_10
;
3790 case NVREG_LINKSPEED_100
:
3791 ecmd
->speed
= SPEED_100
;
3793 case NVREG_LINKSPEED_1000
:
3794 ecmd
->speed
= SPEED_1000
;
3797 ecmd
->duplex
= DUPLEX_HALF
;
3799 ecmd
->duplex
= DUPLEX_FULL
;
3805 ecmd
->autoneg
= np
->autoneg
;
3807 ecmd
->advertising
= ADVERTISED_MII
;
3809 ecmd
->advertising
|= ADVERTISED_Autoneg
;
3810 adv
= mii_rw(dev
, np
->phyaddr
, MII_ADVERTISE
, MII_READ
);
3811 if (adv
& ADVERTISE_10HALF
)
3812 ecmd
->advertising
|= ADVERTISED_10baseT_Half
;
3813 if (adv
& ADVERTISE_10FULL
)
3814 ecmd
->advertising
|= ADVERTISED_10baseT_Full
;
3815 if (adv
& ADVERTISE_100HALF
)
3816 ecmd
->advertising
|= ADVERTISED_100baseT_Half
;
3817 if (adv
& ADVERTISE_100FULL
)
3818 ecmd
->advertising
|= ADVERTISED_100baseT_Full
;
3819 if (np
->gigabit
== PHY_GIGABIT
) {
3820 adv
= mii_rw(dev
, np
->phyaddr
, MII_CTRL1000
, MII_READ
);
3821 if (adv
& ADVERTISE_1000FULL
)
3822 ecmd
->advertising
|= ADVERTISED_1000baseT_Full
;
3825 ecmd
->supported
= (SUPPORTED_Autoneg
|
3826 SUPPORTED_10baseT_Half
| SUPPORTED_10baseT_Full
|
3827 SUPPORTED_100baseT_Half
| SUPPORTED_100baseT_Full
|
3829 if (np
->gigabit
== PHY_GIGABIT
)
3830 ecmd
->supported
|= SUPPORTED_1000baseT_Full
;
3832 ecmd
->phy_address
= np
->phyaddr
;
3833 ecmd
->transceiver
= XCVR_EXTERNAL
;
3835 /* ignore maxtxpkt, maxrxpkt for now */
3836 spin_unlock_irq(&np
->lock
);
3840 static int nv_set_settings(struct net_device
*dev
, struct ethtool_cmd
*ecmd
)
3842 struct fe_priv
*np
= netdev_priv(dev
);
3844 if (ecmd
->port
!= PORT_MII
)
3846 if (ecmd
->transceiver
!= XCVR_EXTERNAL
)
3848 if (ecmd
->phy_address
!= np
->phyaddr
) {
3849 /* TODO: support switching between multiple phys. Should be
3850 * trivial, but not enabled due to lack of test hardware. */
3853 if (ecmd
->autoneg
== AUTONEG_ENABLE
) {
3856 mask
= ADVERTISED_10baseT_Half
| ADVERTISED_10baseT_Full
|
3857 ADVERTISED_100baseT_Half
| ADVERTISED_100baseT_Full
;
3858 if (np
->gigabit
== PHY_GIGABIT
)
3859 mask
|= ADVERTISED_1000baseT_Full
;
3861 if ((ecmd
->advertising
& mask
) == 0)
3864 } else if (ecmd
->autoneg
== AUTONEG_DISABLE
) {
3865 /* Note: autonegotiation disable, speed 1000 intentionally
3866 * forbidden - noone should need that. */
3868 if (ecmd
->speed
!= SPEED_10
&& ecmd
->speed
!= SPEED_100
)
3870 if (ecmd
->duplex
!= DUPLEX_HALF
&& ecmd
->duplex
!= DUPLEX_FULL
)
3876 netif_carrier_off(dev
);
3877 if (netif_running(dev
)) {
3878 nv_disable_irq(dev
);
3879 netif_tx_lock_bh(dev
);
3880 spin_lock(&np
->lock
);
3884 spin_unlock(&np
->lock
);
3885 netif_tx_unlock_bh(dev
);
3888 if (ecmd
->autoneg
== AUTONEG_ENABLE
) {
3893 /* advertise only what has been requested */
3894 adv
= mii_rw(dev
, np
->phyaddr
, MII_ADVERTISE
, MII_READ
);
3895 adv
&= ~(ADVERTISE_ALL
| ADVERTISE_100BASE4
| ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
);
3896 if (ecmd
->advertising
& ADVERTISED_10baseT_Half
)
3897 adv
|= ADVERTISE_10HALF
;
3898 if (ecmd
->advertising
& ADVERTISED_10baseT_Full
)
3899 adv
|= ADVERTISE_10FULL
;
3900 if (ecmd
->advertising
& ADVERTISED_100baseT_Half
)
3901 adv
|= ADVERTISE_100HALF
;
3902 if (ecmd
->advertising
& ADVERTISED_100baseT_Full
)
3903 adv
|= ADVERTISE_100FULL
;
3904 if (np
->pause_flags
& NV_PAUSEFRAME_RX_REQ
) /* for rx we set both advertisments but disable tx pause */
3905 adv
|= ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
;
3906 if (np
->pause_flags
& NV_PAUSEFRAME_TX_REQ
)
3907 adv
|= ADVERTISE_PAUSE_ASYM
;
3908 mii_rw(dev
, np
->phyaddr
, MII_ADVERTISE
, adv
);
3910 if (np
->gigabit
== PHY_GIGABIT
) {
3911 adv
= mii_rw(dev
, np
->phyaddr
, MII_CTRL1000
, MII_READ
);
3912 adv
&= ~ADVERTISE_1000FULL
;
3913 if (ecmd
->advertising
& ADVERTISED_1000baseT_Full
)
3914 adv
|= ADVERTISE_1000FULL
;
3915 mii_rw(dev
, np
->phyaddr
, MII_CTRL1000
, adv
);
3918 if (netif_running(dev
))
3919 printk(KERN_INFO
"%s: link down.\n", dev
->name
);
3920 bmcr
= mii_rw(dev
, np
->phyaddr
, MII_BMCR
, MII_READ
);
3921 if (np
->phy_model
== PHY_MODEL_MARVELL_E3016
) {
3922 bmcr
|= BMCR_ANENABLE
;
3923 /* reset the phy in order for settings to stick,
3924 * and cause autoneg to start */
3925 if (phy_reset(dev
, bmcr
)) {
3926 printk(KERN_INFO
"%s: phy reset failed\n", dev
->name
);
3930 bmcr
|= (BMCR_ANENABLE
| BMCR_ANRESTART
);
3931 mii_rw(dev
, np
->phyaddr
, MII_BMCR
, bmcr
);
3938 adv
= mii_rw(dev
, np
->phyaddr
, MII_ADVERTISE
, MII_READ
);
3939 adv
&= ~(ADVERTISE_ALL
| ADVERTISE_100BASE4
| ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
);
3940 if (ecmd
->speed
== SPEED_10
&& ecmd
->duplex
== DUPLEX_HALF
)
3941 adv
|= ADVERTISE_10HALF
;
3942 if (ecmd
->speed
== SPEED_10
&& ecmd
->duplex
== DUPLEX_FULL
)
3943 adv
|= ADVERTISE_10FULL
;
3944 if (ecmd
->speed
== SPEED_100
&& ecmd
->duplex
== DUPLEX_HALF
)
3945 adv
|= ADVERTISE_100HALF
;
3946 if (ecmd
->speed
== SPEED_100
&& ecmd
->duplex
== DUPLEX_FULL
)
3947 adv
|= ADVERTISE_100FULL
;
3948 np
->pause_flags
&= ~(NV_PAUSEFRAME_AUTONEG
|NV_PAUSEFRAME_RX_ENABLE
|NV_PAUSEFRAME_TX_ENABLE
);
3949 if (np
->pause_flags
& NV_PAUSEFRAME_RX_REQ
) {/* for rx we set both advertisments but disable tx pause */
3950 adv
|= ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
;
3951 np
->pause_flags
|= NV_PAUSEFRAME_RX_ENABLE
;
3953 if (np
->pause_flags
& NV_PAUSEFRAME_TX_REQ
) {
3954 adv
|= ADVERTISE_PAUSE_ASYM
;
3955 np
->pause_flags
|= NV_PAUSEFRAME_TX_ENABLE
;
3957 mii_rw(dev
, np
->phyaddr
, MII_ADVERTISE
, adv
);
3958 np
->fixed_mode
= adv
;
3960 if (np
->gigabit
== PHY_GIGABIT
) {
3961 adv
= mii_rw(dev
, np
->phyaddr
, MII_CTRL1000
, MII_READ
);
3962 adv
&= ~ADVERTISE_1000FULL
;
3963 mii_rw(dev
, np
->phyaddr
, MII_CTRL1000
, adv
);
3966 bmcr
= mii_rw(dev
, np
->phyaddr
, MII_BMCR
, MII_READ
);
3967 bmcr
&= ~(BMCR_ANENABLE
|BMCR_SPEED100
|BMCR_SPEED1000
|BMCR_FULLDPLX
);
3968 if (np
->fixed_mode
& (ADVERTISE_10FULL
|ADVERTISE_100FULL
))
3969 bmcr
|= BMCR_FULLDPLX
;
3970 if (np
->fixed_mode
& (ADVERTISE_100HALF
|ADVERTISE_100FULL
))
3971 bmcr
|= BMCR_SPEED100
;
3972 if (np
->phy_oui
== PHY_OUI_MARVELL
) {
3973 /* reset the phy in order for forced mode settings to stick */
3974 if (phy_reset(dev
, bmcr
)) {
3975 printk(KERN_INFO
"%s: phy reset failed\n", dev
->name
);
3979 mii_rw(dev
, np
->phyaddr
, MII_BMCR
, bmcr
);
3980 if (netif_running(dev
)) {
3981 /* Wait a bit and then reconfigure the nic. */
3988 if (netif_running(dev
)) {
3997 #define FORCEDETH_REGS_VER 1
3999 static int nv_get_regs_len(struct net_device
*dev
)
4001 struct fe_priv
*np
= netdev_priv(dev
);
4002 return np
->register_size
;
4005 static void nv_get_regs(struct net_device
*dev
, struct ethtool_regs
*regs
, void *buf
)
4007 struct fe_priv
*np
= netdev_priv(dev
);
4008 u8 __iomem
*base
= get_hwbase(dev
);
4012 regs
->version
= FORCEDETH_REGS_VER
;
4013 spin_lock_irq(&np
->lock
);
4014 for (i
= 0;i
<= np
->register_size
/sizeof(u32
); i
++)
4015 rbuf
[i
] = readl(base
+ i
*sizeof(u32
));
4016 spin_unlock_irq(&np
->lock
);
4019 static int nv_nway_reset(struct net_device
*dev
)
4021 struct fe_priv
*np
= netdev_priv(dev
);
4027 netif_carrier_off(dev
);
4028 if (netif_running(dev
)) {
4029 nv_disable_irq(dev
);
4030 netif_tx_lock_bh(dev
);
4031 spin_lock(&np
->lock
);
4035 spin_unlock(&np
->lock
);
4036 netif_tx_unlock_bh(dev
);
4037 printk(KERN_INFO
"%s: link down.\n", dev
->name
);
4040 bmcr
= mii_rw(dev
, np
->phyaddr
, MII_BMCR
, MII_READ
);
4041 if (np
->phy_model
== PHY_MODEL_MARVELL_E3016
) {
4042 bmcr
|= BMCR_ANENABLE
;
4043 /* reset the phy in order for settings to stick*/
4044 if (phy_reset(dev
, bmcr
)) {
4045 printk(KERN_INFO
"%s: phy reset failed\n", dev
->name
);
4049 bmcr
|= (BMCR_ANENABLE
| BMCR_ANRESTART
);
4050 mii_rw(dev
, np
->phyaddr
, MII_BMCR
, bmcr
);
4053 if (netif_running(dev
)) {
4066 static int nv_set_tso(struct net_device
*dev
, u32 value
)
4068 struct fe_priv
*np
= netdev_priv(dev
);
4070 if ((np
->driver_data
& DEV_HAS_CHECKSUM
))
4071 return ethtool_op_set_tso(dev
, value
);
4076 static void nv_get_ringparam(struct net_device
*dev
, struct ethtool_ringparam
* ring
)
4078 struct fe_priv
*np
= netdev_priv(dev
);
4080 ring
->rx_max_pending
= (np
->desc_ver
== DESC_VER_1
) ? RING_MAX_DESC_VER_1
: RING_MAX_DESC_VER_2_3
;
4081 ring
->rx_mini_max_pending
= 0;
4082 ring
->rx_jumbo_max_pending
= 0;
4083 ring
->tx_max_pending
= (np
->desc_ver
== DESC_VER_1
) ? RING_MAX_DESC_VER_1
: RING_MAX_DESC_VER_2_3
;
4085 ring
->rx_pending
= np
->rx_ring_size
;
4086 ring
->rx_mini_pending
= 0;
4087 ring
->rx_jumbo_pending
= 0;
4088 ring
->tx_pending
= np
->tx_ring_size
;
4091 static int nv_set_ringparam(struct net_device
*dev
, struct ethtool_ringparam
* ring
)
4093 struct fe_priv
*np
= netdev_priv(dev
);
4094 u8 __iomem
*base
= get_hwbase(dev
);
4095 u8
*rxtx_ring
, *rx_skbuff
, *tx_skbuff
;
4096 dma_addr_t ring_addr
;
4098 if (ring
->rx_pending
< RX_RING_MIN
||
4099 ring
->tx_pending
< TX_RING_MIN
||
4100 ring
->rx_mini_pending
!= 0 ||
4101 ring
->rx_jumbo_pending
!= 0 ||
4102 (np
->desc_ver
== DESC_VER_1
&&
4103 (ring
->rx_pending
> RING_MAX_DESC_VER_1
||
4104 ring
->tx_pending
> RING_MAX_DESC_VER_1
)) ||
4105 (np
->desc_ver
!= DESC_VER_1
&&
4106 (ring
->rx_pending
> RING_MAX_DESC_VER_2_3
||
4107 ring
->tx_pending
> RING_MAX_DESC_VER_2_3
))) {
4111 /* allocate new rings */
4112 if (np
->desc_ver
== DESC_VER_1
|| np
->desc_ver
== DESC_VER_2
) {
4113 rxtx_ring
= pci_alloc_consistent(np
->pci_dev
,
4114 sizeof(struct ring_desc
) * (ring
->rx_pending
+ ring
->tx_pending
),
4117 rxtx_ring
= pci_alloc_consistent(np
->pci_dev
,
4118 sizeof(struct ring_desc_ex
) * (ring
->rx_pending
+ ring
->tx_pending
),
4121 rx_skbuff
= kmalloc(sizeof(struct nv_skb_map
) * ring
->rx_pending
, GFP_KERNEL
);
4122 tx_skbuff
= kmalloc(sizeof(struct nv_skb_map
) * ring
->tx_pending
, GFP_KERNEL
);
4123 if (!rxtx_ring
|| !rx_skbuff
|| !tx_skbuff
) {
4124 /* fall back to old rings */
4125 if (np
->desc_ver
== DESC_VER_1
|| np
->desc_ver
== DESC_VER_2
) {
4127 pci_free_consistent(np
->pci_dev
, sizeof(struct ring_desc
) * (ring
->rx_pending
+ ring
->tx_pending
),
4128 rxtx_ring
, ring_addr
);
4131 pci_free_consistent(np
->pci_dev
, sizeof(struct ring_desc_ex
) * (ring
->rx_pending
+ ring
->tx_pending
),
4132 rxtx_ring
, ring_addr
);
4141 if (netif_running(dev
)) {
4142 nv_disable_irq(dev
);
4143 netif_tx_lock_bh(dev
);
4144 spin_lock(&np
->lock
);
4156 /* set new values */
4157 np
->rx_ring_size
= ring
->rx_pending
;
4158 np
->tx_ring_size
= ring
->tx_pending
;
4159 if (np
->desc_ver
== DESC_VER_1
|| np
->desc_ver
== DESC_VER_2
) {
4160 np
->rx_ring
.orig
= (struct ring_desc
*)rxtx_ring
;
4161 np
->tx_ring
.orig
= &np
->rx_ring
.orig
[np
->rx_ring_size
];
4163 np
->rx_ring
.ex
= (struct ring_desc_ex
*)rxtx_ring
;
4164 np
->tx_ring
.ex
= &np
->rx_ring
.ex
[np
->rx_ring_size
];
4166 np
->rx_skb
= (struct nv_skb_map
*)rx_skbuff
;
4167 np
->tx_skb
= (struct nv_skb_map
*)tx_skbuff
;
4168 np
->ring_addr
= ring_addr
;
4170 memset(np
->rx_skb
, 0, sizeof(struct nv_skb_map
) * np
->rx_ring_size
);
4171 memset(np
->tx_skb
, 0, sizeof(struct nv_skb_map
) * np
->tx_ring_size
);
4173 if (netif_running(dev
)) {
4174 /* reinit driver view of the queues */
4176 if (nv_init_ring(dev
)) {
4177 if (!np
->in_shutdown
)
4178 mod_timer(&np
->oom_kick
, jiffies
+ OOM_REFILL
);
4181 /* reinit nic view of the queues */
4182 writel(np
->rx_buf_sz
, base
+ NvRegOffloadConfig
);
4183 setup_hw_rings(dev
, NV_SETUP_RX_RING
| NV_SETUP_TX_RING
);
4184 writel( ((np
->rx_ring_size
-1) << NVREG_RINGSZ_RXSHIFT
) + ((np
->tx_ring_size
-1) << NVREG_RINGSZ_TXSHIFT
),
4185 base
+ NvRegRingSizes
);
4187 writel(NVREG_TXRXCTL_KICK
|np
->txrxctl_bits
, get_hwbase(dev
) + NvRegTxRxControl
);
4190 /* restart engines */
4193 spin_unlock(&np
->lock
);
4194 netif_tx_unlock_bh(dev
);
4202 static void nv_get_pauseparam(struct net_device
*dev
, struct ethtool_pauseparam
* pause
)
4204 struct fe_priv
*np
= netdev_priv(dev
);
4206 pause
->autoneg
= (np
->pause_flags
& NV_PAUSEFRAME_AUTONEG
) != 0;
4207 pause
->rx_pause
= (np
->pause_flags
& NV_PAUSEFRAME_RX_ENABLE
) != 0;
4208 pause
->tx_pause
= (np
->pause_flags
& NV_PAUSEFRAME_TX_ENABLE
) != 0;
4211 static int nv_set_pauseparam(struct net_device
*dev
, struct ethtool_pauseparam
* pause
)
4213 struct fe_priv
*np
= netdev_priv(dev
);
4216 if ((!np
->autoneg
&& np
->duplex
== 0) ||
4217 (np
->autoneg
&& !pause
->autoneg
&& np
->duplex
== 0)) {
4218 printk(KERN_INFO
"%s: can not set pause settings when forced link is in half duplex.\n",
4222 if (pause
->tx_pause
&& !(np
->pause_flags
& NV_PAUSEFRAME_TX_CAPABLE
)) {
4223 printk(KERN_INFO
"%s: hardware does not support tx pause frames.\n", dev
->name
);
4227 netif_carrier_off(dev
);
4228 if (netif_running(dev
)) {
4229 nv_disable_irq(dev
);
4230 netif_tx_lock_bh(dev
);
4231 spin_lock(&np
->lock
);
4235 spin_unlock(&np
->lock
);
4236 netif_tx_unlock_bh(dev
);
4239 np
->pause_flags
&= ~(NV_PAUSEFRAME_RX_REQ
|NV_PAUSEFRAME_TX_REQ
);
4240 if (pause
->rx_pause
)
4241 np
->pause_flags
|= NV_PAUSEFRAME_RX_REQ
;
4242 if (pause
->tx_pause
)
4243 np
->pause_flags
|= NV_PAUSEFRAME_TX_REQ
;
4245 if (np
->autoneg
&& pause
->autoneg
) {
4246 np
->pause_flags
|= NV_PAUSEFRAME_AUTONEG
;
4248 adv
= mii_rw(dev
, np
->phyaddr
, MII_ADVERTISE
, MII_READ
);
4249 adv
&= ~(ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
);
4250 if (np
->pause_flags
& NV_PAUSEFRAME_RX_REQ
) /* for rx we set both advertisments but disable tx pause */
4251 adv
|= ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
;
4252 if (np
->pause_flags
& NV_PAUSEFRAME_TX_REQ
)
4253 adv
|= ADVERTISE_PAUSE_ASYM
;
4254 mii_rw(dev
, np
->phyaddr
, MII_ADVERTISE
, adv
);
4256 if (netif_running(dev
))
4257 printk(KERN_INFO
"%s: link down.\n", dev
->name
);
4258 bmcr
= mii_rw(dev
, np
->phyaddr
, MII_BMCR
, MII_READ
);
4259 bmcr
|= (BMCR_ANENABLE
| BMCR_ANRESTART
);
4260 mii_rw(dev
, np
->phyaddr
, MII_BMCR
, bmcr
);
4262 np
->pause_flags
&= ~(NV_PAUSEFRAME_AUTONEG
|NV_PAUSEFRAME_RX_ENABLE
|NV_PAUSEFRAME_TX_ENABLE
);
4263 if (pause
->rx_pause
)
4264 np
->pause_flags
|= NV_PAUSEFRAME_RX_ENABLE
;
4265 if (pause
->tx_pause
)
4266 np
->pause_flags
|= NV_PAUSEFRAME_TX_ENABLE
;
4268 if (!netif_running(dev
))
4269 nv_update_linkspeed(dev
);
4271 nv_update_pause(dev
, np
->pause_flags
);
4274 if (netif_running(dev
)) {
4282 static u32
nv_get_rx_csum(struct net_device
*dev
)
4284 struct fe_priv
*np
= netdev_priv(dev
);
4285 return (np
->rx_csum
) != 0;
4288 static int nv_set_rx_csum(struct net_device
*dev
, u32 data
)
4290 struct fe_priv
*np
= netdev_priv(dev
);
4291 u8 __iomem
*base
= get_hwbase(dev
);
4294 if (np
->driver_data
& DEV_HAS_CHECKSUM
) {
4297 np
->txrxctl_bits
|= NVREG_TXRXCTL_RXCHECK
;
4300 /* vlan is dependent on rx checksum offload */
4301 if (!(np
->vlanctl_bits
& NVREG_VLANCONTROL_ENABLE
))
4302 np
->txrxctl_bits
&= ~NVREG_TXRXCTL_RXCHECK
;
4304 if (netif_running(dev
)) {
4305 spin_lock_irq(&np
->lock
);
4306 writel(np
->txrxctl_bits
, base
+ NvRegTxRxControl
);
4307 spin_unlock_irq(&np
->lock
);
4316 static int nv_set_tx_csum(struct net_device
*dev
, u32 data
)
4318 struct fe_priv
*np
= netdev_priv(dev
);
4320 if (np
->driver_data
& DEV_HAS_CHECKSUM
)
4321 return ethtool_op_set_tx_hw_csum(dev
, data
);
4326 static int nv_set_sg(struct net_device
*dev
, u32 data
)
4328 struct fe_priv
*np
= netdev_priv(dev
);
4330 if (np
->driver_data
& DEV_HAS_CHECKSUM
)
4331 return ethtool_op_set_sg(dev
, data
);
4336 static int nv_get_stats_count(struct net_device
*dev
)
4338 struct fe_priv
*np
= netdev_priv(dev
);
4340 if (np
->driver_data
& DEV_HAS_STATISTICS_V1
)
4341 return NV_DEV_STATISTICS_V1_COUNT
;
4342 else if (np
->driver_data
& DEV_HAS_STATISTICS_V2
)
4343 return NV_DEV_STATISTICS_V2_COUNT
;
4348 static void nv_get_ethtool_stats(struct net_device
*dev
, struct ethtool_stats
*estats
, u64
*buffer
)
4350 struct fe_priv
*np
= netdev_priv(dev
);
4353 nv_do_stats_poll((unsigned long)dev
);
4355 memcpy(buffer
, &np
->estats
, nv_get_stats_count(dev
)*sizeof(u64
));
4358 static int nv_self_test_count(struct net_device
*dev
)
4360 struct fe_priv
*np
= netdev_priv(dev
);
4362 if (np
->driver_data
& DEV_HAS_TEST_EXTENDED
)
4363 return NV_TEST_COUNT_EXTENDED
;
4365 return NV_TEST_COUNT_BASE
;
4368 static int nv_link_test(struct net_device
*dev
)
4370 struct fe_priv
*np
= netdev_priv(dev
);
4373 mii_rw(dev
, np
->phyaddr
, MII_BMSR
, MII_READ
);
4374 mii_status
= mii_rw(dev
, np
->phyaddr
, MII_BMSR
, MII_READ
);
4376 /* check phy link status */
4377 if (!(mii_status
& BMSR_LSTATUS
))
4383 static int nv_register_test(struct net_device
*dev
)
4385 u8 __iomem
*base
= get_hwbase(dev
);
4387 u32 orig_read
, new_read
;
4390 orig_read
= readl(base
+ nv_registers_test
[i
].reg
);
4392 /* xor with mask to toggle bits */
4393 orig_read
^= nv_registers_test
[i
].mask
;
4395 writel(orig_read
, base
+ nv_registers_test
[i
].reg
);
4397 new_read
= readl(base
+ nv_registers_test
[i
].reg
);
4399 if ((new_read
& nv_registers_test
[i
].mask
) != (orig_read
& nv_registers_test
[i
].mask
))
4402 /* restore original value */
4403 orig_read
^= nv_registers_test
[i
].mask
;
4404 writel(orig_read
, base
+ nv_registers_test
[i
].reg
);
4406 } while (nv_registers_test
[++i
].reg
!= 0);
4411 static int nv_interrupt_test(struct net_device
*dev
)
4413 struct fe_priv
*np
= netdev_priv(dev
);
4414 u8 __iomem
*base
= get_hwbase(dev
);
4417 u32 save_msi_flags
, save_poll_interval
= 0;
4419 if (netif_running(dev
)) {
4420 /* free current irq */
4422 save_poll_interval
= readl(base
+NvRegPollingInterval
);
4425 /* flag to test interrupt handler */
4428 /* setup test irq */
4429 save_msi_flags
= np
->msi_flags
;
4430 np
->msi_flags
&= ~NV_MSI_X_VECTORS_MASK
;
4431 np
->msi_flags
|= 0x001; /* setup 1 vector */
4432 if (nv_request_irq(dev
, 1))
4435 /* setup timer interrupt */
4436 writel(NVREG_POLL_DEFAULT_CPU
, base
+ NvRegPollingInterval
);
4437 writel(NVREG_UNKSETUP6_VAL
, base
+ NvRegUnknownSetupReg6
);
4439 nv_enable_hw_interrupts(dev
, NVREG_IRQ_TIMER
);
4441 /* wait for at least one interrupt */
4444 spin_lock_irq(&np
->lock
);
4446 /* flag should be set within ISR */
4447 testcnt
= np
->intr_test
;
4451 nv_disable_hw_interrupts(dev
, NVREG_IRQ_TIMER
);
4452 if (!(np
->msi_flags
& NV_MSI_X_ENABLED
))
4453 writel(NVREG_IRQSTAT_MASK
, base
+ NvRegIrqStatus
);
4455 writel(NVREG_IRQSTAT_MASK
, base
+ NvRegMSIXIrqStatus
);
4457 spin_unlock_irq(&np
->lock
);
4461 np
->msi_flags
= save_msi_flags
;
4463 if (netif_running(dev
)) {
4464 writel(save_poll_interval
, base
+ NvRegPollingInterval
);
4465 writel(NVREG_UNKSETUP6_VAL
, base
+ NvRegUnknownSetupReg6
);
4466 /* restore original irq */
4467 if (nv_request_irq(dev
, 0))
4474 static int nv_loopback_test(struct net_device
*dev
)
4476 struct fe_priv
*np
= netdev_priv(dev
);
4477 u8 __iomem
*base
= get_hwbase(dev
);
4478 struct sk_buff
*tx_skb
, *rx_skb
;
4479 dma_addr_t test_dma_addr
;
4480 u32 tx_flags_extra
= (np
->desc_ver
== DESC_VER_1
? NV_TX_LASTPACKET
: NV_TX2_LASTPACKET
);
4482 int len
, i
, pkt_len
;
4484 u32 filter_flags
= 0;
4485 u32 misc1_flags
= 0;
4488 if (netif_running(dev
)) {
4489 nv_disable_irq(dev
);
4490 filter_flags
= readl(base
+ NvRegPacketFilterFlags
);
4491 misc1_flags
= readl(base
+ NvRegMisc1
);
4496 /* reinit driver view of the rx queue */
4500 /* setup hardware for loopback */
4501 writel(NVREG_MISC1_FORCE
, base
+ NvRegMisc1
);
4502 writel(NVREG_PFF_ALWAYS
| NVREG_PFF_LOOPBACK
, base
+ NvRegPacketFilterFlags
);
4504 /* reinit nic view of the rx queue */
4505 writel(np
->rx_buf_sz
, base
+ NvRegOffloadConfig
);
4506 setup_hw_rings(dev
, NV_SETUP_RX_RING
| NV_SETUP_TX_RING
);
4507 writel( ((np
->rx_ring_size
-1) << NVREG_RINGSZ_RXSHIFT
) + ((np
->tx_ring_size
-1) << NVREG_RINGSZ_TXSHIFT
),
4508 base
+ NvRegRingSizes
);
4511 /* restart rx engine */
4515 /* setup packet for tx */
4516 pkt_len
= ETH_DATA_LEN
;
4517 tx_skb
= dev_alloc_skb(pkt_len
);
4519 printk(KERN_ERR
"dev_alloc_skb() failed during loopback test"
4520 " of %s\n", dev
->name
);
4524 test_dma_addr
= pci_map_single(np
->pci_dev
, tx_skb
->data
,
4525 skb_tailroom(tx_skb
),
4526 PCI_DMA_FROMDEVICE
);
4527 pkt_data
= skb_put(tx_skb
, pkt_len
);
4528 for (i
= 0; i
< pkt_len
; i
++)
4529 pkt_data
[i
] = (u8
)(i
& 0xff);
4531 if (np
->desc_ver
== DESC_VER_1
|| np
->desc_ver
== DESC_VER_2
) {
4532 np
->tx_ring
.orig
[0].buf
= cpu_to_le32(test_dma_addr
);
4533 np
->tx_ring
.orig
[0].flaglen
= cpu_to_le32((pkt_len
-1) | np
->tx_flags
| tx_flags_extra
);
4535 np
->tx_ring
.ex
[0].bufhigh
= cpu_to_le64(test_dma_addr
) >> 32;
4536 np
->tx_ring
.ex
[0].buflow
= cpu_to_le64(test_dma_addr
) & 0x0FFFFFFFF;
4537 np
->tx_ring
.ex
[0].flaglen
= cpu_to_le32((pkt_len
-1) | np
->tx_flags
| tx_flags_extra
);
4539 writel(NVREG_TXRXCTL_KICK
|np
->txrxctl_bits
, get_hwbase(dev
) + NvRegTxRxControl
);
4540 pci_push(get_hwbase(dev
));
4544 /* check for rx of the packet */
4545 if (np
->desc_ver
== DESC_VER_1
|| np
->desc_ver
== DESC_VER_2
) {
4546 flags
= le32_to_cpu(np
->rx_ring
.orig
[0].flaglen
);
4547 len
= nv_descr_getlength(&np
->rx_ring
.orig
[0], np
->desc_ver
);
4550 flags
= le32_to_cpu(np
->rx_ring
.ex
[0].flaglen
);
4551 len
= nv_descr_getlength_ex(&np
->rx_ring
.ex
[0], np
->desc_ver
);
4554 if (flags
& NV_RX_AVAIL
) {
4556 } else if (np
->desc_ver
== DESC_VER_1
) {
4557 if (flags
& NV_RX_ERROR
)
4560 if (flags
& NV_RX2_ERROR
) {
4566 if (len
!= pkt_len
) {
4568 dprintk(KERN_DEBUG
"%s: loopback len mismatch %d vs %d\n",
4569 dev
->name
, len
, pkt_len
);
4571 rx_skb
= np
->rx_skb
[0].skb
;
4572 for (i
= 0; i
< pkt_len
; i
++) {
4573 if (rx_skb
->data
[i
] != (u8
)(i
& 0xff)) {
4575 dprintk(KERN_DEBUG
"%s: loopback pattern check failed on byte %d\n",
4582 dprintk(KERN_DEBUG
"%s: loopback - did not receive test packet\n", dev
->name
);
4585 pci_unmap_page(np
->pci_dev
, test_dma_addr
,
4586 (skb_end_pointer(tx_skb
) - tx_skb
->data
),
4588 dev_kfree_skb_any(tx_skb
);
4594 /* drain rx queue */
4598 if (netif_running(dev
)) {
4599 writel(misc1_flags
, base
+ NvRegMisc1
);
4600 writel(filter_flags
, base
+ NvRegPacketFilterFlags
);
4607 static void nv_self_test(struct net_device
*dev
, struct ethtool_test
*test
, u64
*buffer
)
4609 struct fe_priv
*np
= netdev_priv(dev
);
4610 u8 __iomem
*base
= get_hwbase(dev
);
4612 memset(buffer
, 0, nv_self_test_count(dev
)*sizeof(u64
));
4614 if (!nv_link_test(dev
)) {
4615 test
->flags
|= ETH_TEST_FL_FAILED
;
4619 if (test
->flags
& ETH_TEST_FL_OFFLINE
) {
4620 if (netif_running(dev
)) {
4621 netif_stop_queue(dev
);
4622 netif_poll_disable(dev
);
4623 netif_tx_lock_bh(dev
);
4624 spin_lock_irq(&np
->lock
);
4625 nv_disable_hw_interrupts(dev
, np
->irqmask
);
4626 if (!(np
->msi_flags
& NV_MSI_X_ENABLED
)) {
4627 writel(NVREG_IRQSTAT_MASK
, base
+ NvRegIrqStatus
);
4629 writel(NVREG_IRQSTAT_MASK
, base
+ NvRegMSIXIrqStatus
);
4635 /* drain rx queue */
4638 spin_unlock_irq(&np
->lock
);
4639 netif_tx_unlock_bh(dev
);
4642 if (!nv_register_test(dev
)) {
4643 test
->flags
|= ETH_TEST_FL_FAILED
;
4647 result
= nv_interrupt_test(dev
);
4649 test
->flags
|= ETH_TEST_FL_FAILED
;
4657 if (!nv_loopback_test(dev
)) {
4658 test
->flags
|= ETH_TEST_FL_FAILED
;
4662 if (netif_running(dev
)) {
4663 /* reinit driver view of the rx queue */
4665 if (nv_init_ring(dev
)) {
4666 if (!np
->in_shutdown
)
4667 mod_timer(&np
->oom_kick
, jiffies
+ OOM_REFILL
);
4669 /* reinit nic view of the rx queue */
4670 writel(np
->rx_buf_sz
, base
+ NvRegOffloadConfig
);
4671 setup_hw_rings(dev
, NV_SETUP_RX_RING
| NV_SETUP_TX_RING
);
4672 writel( ((np
->rx_ring_size
-1) << NVREG_RINGSZ_RXSHIFT
) + ((np
->tx_ring_size
-1) << NVREG_RINGSZ_TXSHIFT
),
4673 base
+ NvRegRingSizes
);
4675 writel(NVREG_TXRXCTL_KICK
|np
->txrxctl_bits
, get_hwbase(dev
) + NvRegTxRxControl
);
4677 /* restart rx engine */
4680 netif_start_queue(dev
);
4681 netif_poll_enable(dev
);
4682 nv_enable_hw_interrupts(dev
, np
->irqmask
);
4687 static void nv_get_strings(struct net_device
*dev
, u32 stringset
, u8
*buffer
)
4689 switch (stringset
) {
4691 memcpy(buffer
, &nv_estats_str
, nv_get_stats_count(dev
)*sizeof(struct nv_ethtool_str
));
4694 memcpy(buffer
, &nv_etests_str
, nv_self_test_count(dev
)*sizeof(struct nv_ethtool_str
));
4699 static const struct ethtool_ops ops
= {
4700 .get_drvinfo
= nv_get_drvinfo
,
4701 .get_link
= ethtool_op_get_link
,
4702 .get_wol
= nv_get_wol
,
4703 .set_wol
= nv_set_wol
,
4704 .get_settings
= nv_get_settings
,
4705 .set_settings
= nv_set_settings
,
4706 .get_regs_len
= nv_get_regs_len
,
4707 .get_regs
= nv_get_regs
,
4708 .nway_reset
= nv_nway_reset
,
4709 .get_perm_addr
= ethtool_op_get_perm_addr
,
4710 .get_tso
= ethtool_op_get_tso
,
4711 .set_tso
= nv_set_tso
,
4712 .get_ringparam
= nv_get_ringparam
,
4713 .set_ringparam
= nv_set_ringparam
,
4714 .get_pauseparam
= nv_get_pauseparam
,
4715 .set_pauseparam
= nv_set_pauseparam
,
4716 .get_rx_csum
= nv_get_rx_csum
,
4717 .set_rx_csum
= nv_set_rx_csum
,
4718 .get_tx_csum
= ethtool_op_get_tx_csum
,
4719 .set_tx_csum
= nv_set_tx_csum
,
4720 .get_sg
= ethtool_op_get_sg
,
4721 .set_sg
= nv_set_sg
,
4722 .get_strings
= nv_get_strings
,
4723 .get_stats_count
= nv_get_stats_count
,
4724 .get_ethtool_stats
= nv_get_ethtool_stats
,
4725 .self_test_count
= nv_self_test_count
,
4726 .self_test
= nv_self_test
,
4729 static void nv_vlan_rx_register(struct net_device
*dev
, struct vlan_group
*grp
)
4731 struct fe_priv
*np
= get_nvpriv(dev
);
4733 spin_lock_irq(&np
->lock
);
4735 /* save vlan group */
4739 /* enable vlan on MAC */
4740 np
->txrxctl_bits
|= NVREG_TXRXCTL_VLANSTRIP
| NVREG_TXRXCTL_VLANINS
;
4742 /* disable vlan on MAC */
4743 np
->txrxctl_bits
&= ~NVREG_TXRXCTL_VLANSTRIP
;
4744 np
->txrxctl_bits
&= ~NVREG_TXRXCTL_VLANINS
;
4747 writel(np
->txrxctl_bits
, get_hwbase(dev
) + NvRegTxRxControl
);
4749 spin_unlock_irq(&np
->lock
);
4752 /* The mgmt unit and driver use a semaphore to access the phy during init */
4753 static int nv_mgmt_acquire_sema(struct net_device
*dev
)
4755 u8 __iomem
*base
= get_hwbase(dev
);
4757 u32 tx_ctrl
, mgmt_sema
;
4759 for (i
= 0; i
< 10; i
++) {
4760 mgmt_sema
= readl(base
+ NvRegTransmitterControl
) & NVREG_XMITCTL_MGMT_SEMA_MASK
;
4761 if (mgmt_sema
== NVREG_XMITCTL_MGMT_SEMA_FREE
)
4766 if (mgmt_sema
!= NVREG_XMITCTL_MGMT_SEMA_FREE
)
4769 for (i
= 0; i
< 2; i
++) {
4770 tx_ctrl
= readl(base
+ NvRegTransmitterControl
);
4771 tx_ctrl
|= NVREG_XMITCTL_HOST_SEMA_ACQ
;
4772 writel(tx_ctrl
, base
+ NvRegTransmitterControl
);
4774 /* verify that semaphore was acquired */
4775 tx_ctrl
= readl(base
+ NvRegTransmitterControl
);
4776 if (((tx_ctrl
& NVREG_XMITCTL_HOST_SEMA_MASK
) == NVREG_XMITCTL_HOST_SEMA_ACQ
) &&
4777 ((tx_ctrl
& NVREG_XMITCTL_MGMT_SEMA_MASK
) == NVREG_XMITCTL_MGMT_SEMA_FREE
))
4786 static int nv_open(struct net_device
*dev
)
4788 struct fe_priv
*np
= netdev_priv(dev
);
4789 u8 __iomem
*base
= get_hwbase(dev
);
4793 dprintk(KERN_DEBUG
"nv_open: begin\n");
4795 /* erase previous misconfiguration */
4796 if (np
->driver_data
& DEV_HAS_POWER_CNTRL
)
4798 writel(NVREG_MCASTADDRA_FORCE
, base
+ NvRegMulticastAddrA
);
4799 writel(0, base
+ NvRegMulticastAddrB
);
4800 writel(0, base
+ NvRegMulticastMaskA
);
4801 writel(0, base
+ NvRegMulticastMaskB
);
4802 writel(0, base
+ NvRegPacketFilterFlags
);
4804 writel(0, base
+ NvRegTransmitterControl
);
4805 writel(0, base
+ NvRegReceiverControl
);
4807 writel(0, base
+ NvRegAdapterControl
);
4809 if (np
->pause_flags
& NV_PAUSEFRAME_TX_CAPABLE
)
4810 writel(NVREG_TX_PAUSEFRAME_DISABLE
, base
+ NvRegTxPauseFrame
);
4812 /* initialize descriptor rings */
4814 oom
= nv_init_ring(dev
);
4816 writel(0, base
+ NvRegLinkSpeed
);
4817 writel(readl(base
+ NvRegTransmitPoll
) & NVREG_TRANSMITPOLL_MAC_ADDR_REV
, base
+ NvRegTransmitPoll
);
4819 writel(0, base
+ NvRegUnknownSetupReg6
);
4821 np
->in_shutdown
= 0;
4824 setup_hw_rings(dev
, NV_SETUP_RX_RING
| NV_SETUP_TX_RING
);
4825 writel( ((np
->rx_ring_size
-1) << NVREG_RINGSZ_RXSHIFT
) + ((np
->tx_ring_size
-1) << NVREG_RINGSZ_TXSHIFT
),
4826 base
+ NvRegRingSizes
);
4828 writel(np
->linkspeed
, base
+ NvRegLinkSpeed
);
4829 if (np
->desc_ver
== DESC_VER_1
)
4830 writel(NVREG_TX_WM_DESC1_DEFAULT
, base
+ NvRegTxWatermark
);
4832 writel(NVREG_TX_WM_DESC2_3_DEFAULT
, base
+ NvRegTxWatermark
);
4833 writel(np
->txrxctl_bits
, base
+ NvRegTxRxControl
);
4834 writel(np
->vlanctl_bits
, base
+ NvRegVlanControl
);
4836 writel(NVREG_TXRXCTL_BIT1
|np
->txrxctl_bits
, base
+ NvRegTxRxControl
);
4837 reg_delay(dev
, NvRegUnknownSetupReg5
, NVREG_UNKSETUP5_BIT31
, NVREG_UNKSETUP5_BIT31
,
4838 NV_SETUP5_DELAY
, NV_SETUP5_DELAYMAX
,
4839 KERN_INFO
"open: SetupReg5, Bit 31 remained off\n");
4841 writel(0, base
+ NvRegMIIMask
);
4842 writel(NVREG_IRQSTAT_MASK
, base
+ NvRegIrqStatus
);
4843 writel(NVREG_MIISTAT_MASK2
, base
+ NvRegMIIStatus
);
4845 writel(NVREG_MISC1_FORCE
| NVREG_MISC1_HD
, base
+ NvRegMisc1
);
4846 writel(readl(base
+ NvRegTransmitterStatus
), base
+ NvRegTransmitterStatus
);
4847 writel(NVREG_PFF_ALWAYS
, base
+ NvRegPacketFilterFlags
);
4848 writel(np
->rx_buf_sz
, base
+ NvRegOffloadConfig
);
4850 writel(readl(base
+ NvRegReceiverStatus
), base
+ NvRegReceiverStatus
);
4851 get_random_bytes(&i
, sizeof(i
));
4852 writel(NVREG_RNDSEED_FORCE
| (i
&NVREG_RNDSEED_MASK
), base
+ NvRegRandomSeed
);
4853 writel(NVREG_TX_DEFERRAL_DEFAULT
, base
+ NvRegTxDeferral
);
4854 writel(NVREG_RX_DEFERRAL_DEFAULT
, base
+ NvRegRxDeferral
);
4855 if (poll_interval
== -1) {
4856 if (optimization_mode
== NV_OPTIMIZATION_MODE_THROUGHPUT
)
4857 writel(NVREG_POLL_DEFAULT_THROUGHPUT
, base
+ NvRegPollingInterval
);
4859 writel(NVREG_POLL_DEFAULT_CPU
, base
+ NvRegPollingInterval
);
4862 writel(poll_interval
& 0xFFFF, base
+ NvRegPollingInterval
);
4863 writel(NVREG_UNKSETUP6_VAL
, base
+ NvRegUnknownSetupReg6
);
4864 writel((np
->phyaddr
<< NVREG_ADAPTCTL_PHYSHIFT
)|NVREG_ADAPTCTL_PHYVALID
|NVREG_ADAPTCTL_RUNNING
,
4865 base
+ NvRegAdapterControl
);
4866 writel(NVREG_MIISPEED_BIT8
|NVREG_MIIDELAY
, base
+ NvRegMIISpeed
);
4867 writel(NVREG_MII_LINKCHANGE
, base
+ NvRegMIIMask
);
4869 writel(NVREG_WAKEUPFLAGS_ENABLE
, base
+ NvRegWakeUpFlags
);
4871 i
= readl(base
+ NvRegPowerState
);
4872 if ( (i
& NVREG_POWERSTATE_POWEREDUP
) == 0)
4873 writel(NVREG_POWERSTATE_POWEREDUP
|i
, base
+ NvRegPowerState
);
4877 writel(readl(base
+ NvRegPowerState
) | NVREG_POWERSTATE_VALID
, base
+ NvRegPowerState
);
4879 nv_disable_hw_interrupts(dev
, np
->irqmask
);
4881 writel(NVREG_MIISTAT_MASK2
, base
+ NvRegMIIStatus
);
4882 writel(NVREG_IRQSTAT_MASK
, base
+ NvRegIrqStatus
);
4885 if (nv_request_irq(dev
, 0)) {
4889 /* ask for interrupts */
4890 nv_enable_hw_interrupts(dev
, np
->irqmask
);
4892 spin_lock_irq(&np
->lock
);
4893 writel(NVREG_MCASTADDRA_FORCE
, base
+ NvRegMulticastAddrA
);
4894 writel(0, base
+ NvRegMulticastAddrB
);
4895 writel(0, base
+ NvRegMulticastMaskA
);
4896 writel(0, base
+ NvRegMulticastMaskB
);
4897 writel(NVREG_PFF_ALWAYS
|NVREG_PFF_MYADDR
, base
+ NvRegPacketFilterFlags
);
4898 /* One manual link speed update: Interrupts are enabled, future link
4899 * speed changes cause interrupts and are handled by nv_link_irq().
4903 miistat
= readl(base
+ NvRegMIIStatus
);
4904 writel(NVREG_MIISTAT_MASK
, base
+ NvRegMIIStatus
);
4905 dprintk(KERN_INFO
"startup: got 0x%08x.\n", miistat
);
4907 /* set linkspeed to invalid value, thus force nv_update_linkspeed
4910 ret
= nv_update_linkspeed(dev
);
4913 netif_start_queue(dev
);
4914 netif_poll_enable(dev
);
4917 netif_carrier_on(dev
);
4919 printk("%s: no link during initialization.\n", dev
->name
);
4920 netif_carrier_off(dev
);
4923 mod_timer(&np
->oom_kick
, jiffies
+ OOM_REFILL
);
4925 /* start statistics timer */
4926 if (np
->driver_data
& (DEV_HAS_STATISTICS_V1
|DEV_HAS_STATISTICS_V2
))
4927 mod_timer(&np
->stats_poll
, jiffies
+ STATS_INTERVAL
);
4929 spin_unlock_irq(&np
->lock
);
4937 static int nv_close(struct net_device
*dev
)
4939 struct fe_priv
*np
= netdev_priv(dev
);
4942 spin_lock_irq(&np
->lock
);
4943 np
->in_shutdown
= 1;
4944 spin_unlock_irq(&np
->lock
);
4945 netif_poll_disable(dev
);
4946 synchronize_irq(dev
->irq
);
4948 del_timer_sync(&np
->oom_kick
);
4949 del_timer_sync(&np
->nic_poll
);
4950 del_timer_sync(&np
->stats_poll
);
4952 netif_stop_queue(dev
);
4953 spin_lock_irq(&np
->lock
);
4958 /* disable interrupts on the nic or we will lock up */
4959 base
= get_hwbase(dev
);
4960 nv_disable_hw_interrupts(dev
, np
->irqmask
);
4962 dprintk(KERN_INFO
"%s: Irqmask is zero again\n", dev
->name
);
4964 spin_unlock_irq(&np
->lock
);
4970 if (np
->wolenabled
) {
4971 writel(NVREG_PFF_ALWAYS
|NVREG_PFF_MYADDR
, base
+ NvRegPacketFilterFlags
);
4975 /* FIXME: power down nic */
4980 static int __devinit
nv_probe(struct pci_dev
*pci_dev
, const struct pci_device_id
*id
)
4982 struct net_device
*dev
;
4987 u32 powerstate
, txreg
;
4988 u32 phystate_orig
= 0, phystate
;
4989 int phyinitialized
= 0;
4991 dev
= alloc_etherdev(sizeof(struct fe_priv
));
4996 np
= netdev_priv(dev
);
4997 np
->pci_dev
= pci_dev
;
4998 spin_lock_init(&np
->lock
);
4999 SET_MODULE_OWNER(dev
);
5000 SET_NETDEV_DEV(dev
, &pci_dev
->dev
);
5002 init_timer(&np
->oom_kick
);
5003 np
->oom_kick
.data
= (unsigned long) dev
;
5004 np
->oom_kick
.function
= &nv_do_rx_refill
; /* timer handler */
5005 init_timer(&np
->nic_poll
);
5006 np
->nic_poll
.data
= (unsigned long) dev
;
5007 np
->nic_poll
.function
= &nv_do_nic_poll
; /* timer handler */
5008 init_timer(&np
->stats_poll
);
5009 np
->stats_poll
.data
= (unsigned long) dev
;
5010 np
->stats_poll
.function
= &nv_do_stats_poll
; /* timer handler */
5012 err
= pci_enable_device(pci_dev
);
5014 printk(KERN_INFO
"forcedeth: pci_enable_dev failed (%d) for device %s\n",
5015 err
, pci_name(pci_dev
));
5019 pci_set_master(pci_dev
);
5021 err
= pci_request_regions(pci_dev
, DRV_NAME
);
5025 if (id
->driver_data
& (DEV_HAS_VLAN
|DEV_HAS_MSI_X
|DEV_HAS_POWER_CNTRL
|DEV_HAS_STATISTICS_V2
))
5026 np
->register_size
= NV_PCI_REGSZ_VER3
;
5027 else if (id
->driver_data
& DEV_HAS_STATISTICS_V1
)
5028 np
->register_size
= NV_PCI_REGSZ_VER2
;
5030 np
->register_size
= NV_PCI_REGSZ_VER1
;
5034 for (i
= 0; i
< DEVICE_COUNT_RESOURCE
; i
++) {
5035 dprintk(KERN_DEBUG
"%s: resource %d start %p len %ld flags 0x%08lx.\n",
5036 pci_name(pci_dev
), i
, (void*)pci_resource_start(pci_dev
, i
),
5037 pci_resource_len(pci_dev
, i
),
5038 pci_resource_flags(pci_dev
, i
));
5039 if (pci_resource_flags(pci_dev
, i
) & IORESOURCE_MEM
&&
5040 pci_resource_len(pci_dev
, i
) >= np
->register_size
) {
5041 addr
= pci_resource_start(pci_dev
, i
);
5045 if (i
== DEVICE_COUNT_RESOURCE
) {
5046 printk(KERN_INFO
"forcedeth: Couldn't find register window for device %s.\n",
5051 /* copy of driver data */
5052 np
->driver_data
= id
->driver_data
;
5054 /* handle different descriptor versions */
5055 if (id
->driver_data
& DEV_HAS_HIGH_DMA
) {
5056 /* packet format 3: supports 40-bit addressing */
5057 np
->desc_ver
= DESC_VER_3
;
5058 np
->txrxctl_bits
= NVREG_TXRXCTL_DESC_3
;
5060 if (pci_set_dma_mask(pci_dev
, DMA_39BIT_MASK
)) {
5061 printk(KERN_INFO
"forcedeth: 64-bit DMA failed, using 32-bit addressing for device %s.\n",
5064 dev
->features
|= NETIF_F_HIGHDMA
;
5065 printk(KERN_INFO
"forcedeth: using HIGHDMA\n");
5067 if (pci_set_consistent_dma_mask(pci_dev
, DMA_39BIT_MASK
)) {
5068 printk(KERN_INFO
"forcedeth: 64-bit DMA (consistent) failed, using 32-bit ring buffers for device %s.\n",
5072 } else if (id
->driver_data
& DEV_HAS_LARGEDESC
) {
5073 /* packet format 2: supports jumbo frames */
5074 np
->desc_ver
= DESC_VER_2
;
5075 np
->txrxctl_bits
= NVREG_TXRXCTL_DESC_2
;
5077 /* original packet format */
5078 np
->desc_ver
= DESC_VER_1
;
5079 np
->txrxctl_bits
= NVREG_TXRXCTL_DESC_1
;
5082 np
->pkt_limit
= NV_PKTLIMIT_1
;
5083 if (id
->driver_data
& DEV_HAS_LARGEDESC
)
5084 np
->pkt_limit
= NV_PKTLIMIT_2
;
5086 if (id
->driver_data
& DEV_HAS_CHECKSUM
) {
5088 np
->txrxctl_bits
|= NVREG_TXRXCTL_RXCHECK
;
5089 dev
->features
|= NETIF_F_HW_CSUM
| NETIF_F_SG
;
5090 dev
->features
|= NETIF_F_TSO
;
5093 np
->vlanctl_bits
= 0;
5094 if (id
->driver_data
& DEV_HAS_VLAN
) {
5095 np
->vlanctl_bits
= NVREG_VLANCONTROL_ENABLE
;
5096 dev
->features
|= NETIF_F_HW_VLAN_RX
| NETIF_F_HW_VLAN_TX
;
5097 dev
->vlan_rx_register
= nv_vlan_rx_register
;
5101 if ((id
->driver_data
& DEV_HAS_MSI
) && msi
) {
5102 np
->msi_flags
|= NV_MSI_CAPABLE
;
5104 if ((id
->driver_data
& DEV_HAS_MSI_X
) && msix
) {
5105 np
->msi_flags
|= NV_MSI_X_CAPABLE
;
5108 np
->pause_flags
= NV_PAUSEFRAME_RX_CAPABLE
| NV_PAUSEFRAME_RX_REQ
| NV_PAUSEFRAME_AUTONEG
;
5109 if (id
->driver_data
& DEV_HAS_PAUSEFRAME_TX
) {
5110 np
->pause_flags
|= NV_PAUSEFRAME_TX_CAPABLE
| NV_PAUSEFRAME_TX_REQ
;
5115 np
->base
= ioremap(addr
, np
->register_size
);
5118 dev
->base_addr
= (unsigned long)np
->base
;
5120 dev
->irq
= pci_dev
->irq
;
5122 np
->rx_ring_size
= RX_RING_DEFAULT
;
5123 np
->tx_ring_size
= TX_RING_DEFAULT
;
5125 if (np
->desc_ver
== DESC_VER_1
|| np
->desc_ver
== DESC_VER_2
) {
5126 np
->rx_ring
.orig
= pci_alloc_consistent(pci_dev
,
5127 sizeof(struct ring_desc
) * (np
->rx_ring_size
+ np
->tx_ring_size
),
5129 if (!np
->rx_ring
.orig
)
5131 np
->tx_ring
.orig
= &np
->rx_ring
.orig
[np
->rx_ring_size
];
5133 np
->rx_ring
.ex
= pci_alloc_consistent(pci_dev
,
5134 sizeof(struct ring_desc_ex
) * (np
->rx_ring_size
+ np
->tx_ring_size
),
5136 if (!np
->rx_ring
.ex
)
5138 np
->tx_ring
.ex
= &np
->rx_ring
.ex
[np
->rx_ring_size
];
5140 np
->rx_skb
= kcalloc(np
->rx_ring_size
, sizeof(struct nv_skb_map
), GFP_KERNEL
);
5141 np
->tx_skb
= kcalloc(np
->tx_ring_size
, sizeof(struct nv_skb_map
), GFP_KERNEL
);
5142 if (!np
->rx_skb
|| !np
->tx_skb
)
5145 dev
->open
= nv_open
;
5146 dev
->stop
= nv_close
;
5147 if (np
->desc_ver
== DESC_VER_1
|| np
->desc_ver
== DESC_VER_2
)
5148 dev
->hard_start_xmit
= nv_start_xmit
;
5150 dev
->hard_start_xmit
= nv_start_xmit_optimized
;
5151 dev
->get_stats
= nv_get_stats
;
5152 dev
->change_mtu
= nv_change_mtu
;
5153 dev
->set_mac_address
= nv_set_mac_address
;
5154 dev
->set_multicast_list
= nv_set_multicast
;
5155 #ifdef CONFIG_NET_POLL_CONTROLLER
5156 dev
->poll_controller
= nv_poll_controller
;
5158 dev
->weight
= RX_WORK_PER_LOOP
;
5159 #ifdef CONFIG_FORCEDETH_NAPI
5160 dev
->poll
= nv_napi_poll
;
5162 SET_ETHTOOL_OPS(dev
, &ops
);
5163 dev
->tx_timeout
= nv_tx_timeout
;
5164 dev
->watchdog_timeo
= NV_WATCHDOG_TIMEO
;
5166 pci_set_drvdata(pci_dev
, dev
);
5168 /* read the mac address */
5169 base
= get_hwbase(dev
);
5170 np
->orig_mac
[0] = readl(base
+ NvRegMacAddrA
);
5171 np
->orig_mac
[1] = readl(base
+ NvRegMacAddrB
);
5173 /* check the workaround bit for correct mac address order */
5174 txreg
= readl(base
+ NvRegTransmitPoll
);
5175 if (txreg
& NVREG_TRANSMITPOLL_MAC_ADDR_REV
) {
5176 /* mac address is already in correct order */
5177 dev
->dev_addr
[0] = (np
->orig_mac
[0] >> 0) & 0xff;
5178 dev
->dev_addr
[1] = (np
->orig_mac
[0] >> 8) & 0xff;
5179 dev
->dev_addr
[2] = (np
->orig_mac
[0] >> 16) & 0xff;
5180 dev
->dev_addr
[3] = (np
->orig_mac
[0] >> 24) & 0xff;
5181 dev
->dev_addr
[4] = (np
->orig_mac
[1] >> 0) & 0xff;
5182 dev
->dev_addr
[5] = (np
->orig_mac
[1] >> 8) & 0xff;
5184 /* need to reverse mac address to correct order */
5185 dev
->dev_addr
[0] = (np
->orig_mac
[1] >> 8) & 0xff;
5186 dev
->dev_addr
[1] = (np
->orig_mac
[1] >> 0) & 0xff;
5187 dev
->dev_addr
[2] = (np
->orig_mac
[0] >> 24) & 0xff;
5188 dev
->dev_addr
[3] = (np
->orig_mac
[0] >> 16) & 0xff;
5189 dev
->dev_addr
[4] = (np
->orig_mac
[0] >> 8) & 0xff;
5190 dev
->dev_addr
[5] = (np
->orig_mac
[0] >> 0) & 0xff;
5191 /* set permanent address to be correct aswell */
5192 np
->orig_mac
[0] = (dev
->dev_addr
[0] << 0) + (dev
->dev_addr
[1] << 8) +
5193 (dev
->dev_addr
[2] << 16) + (dev
->dev_addr
[3] << 24);
5194 np
->orig_mac
[1] = (dev
->dev_addr
[4] << 0) + (dev
->dev_addr
[5] << 8);
5195 writel(txreg
|NVREG_TRANSMITPOLL_MAC_ADDR_REV
, base
+ NvRegTransmitPoll
);
5197 memcpy(dev
->perm_addr
, dev
->dev_addr
, dev
->addr_len
);
5199 if (!is_valid_ether_addr(dev
->perm_addr
)) {
5201 * Bad mac address. At least one bios sets the mac address
5202 * to 01:23:45:67:89:ab
5204 printk(KERN_ERR
"%s: Invalid Mac address detected: %02x:%02x:%02x:%02x:%02x:%02x\n",
5206 dev
->dev_addr
[0], dev
->dev_addr
[1], dev
->dev_addr
[2],
5207 dev
->dev_addr
[3], dev
->dev_addr
[4], dev
->dev_addr
[5]);
5208 printk(KERN_ERR
"Please complain to your hardware vendor. Switching to a random MAC.\n");
5209 dev
->dev_addr
[0] = 0x00;
5210 dev
->dev_addr
[1] = 0x00;
5211 dev
->dev_addr
[2] = 0x6c;
5212 get_random_bytes(&dev
->dev_addr
[3], 3);
5215 dprintk(KERN_DEBUG
"%s: MAC Address %02x:%02x:%02x:%02x:%02x:%02x\n", pci_name(pci_dev
),
5216 dev
->dev_addr
[0], dev
->dev_addr
[1], dev
->dev_addr
[2],
5217 dev
->dev_addr
[3], dev
->dev_addr
[4], dev
->dev_addr
[5]);
5219 /* set mac address */
5220 nv_copy_mac_to_hw(dev
);
5223 writel(0, base
+ NvRegWakeUpFlags
);
5226 if (id
->driver_data
& DEV_HAS_POWER_CNTRL
) {
5228 /* take phy and nic out of low power mode */
5229 powerstate
= readl(base
+ NvRegPowerState2
);
5230 powerstate
&= ~NVREG_POWERSTATE2_POWERUP_MASK
;
5231 if ((id
->device
== PCI_DEVICE_ID_NVIDIA_NVENET_12
||
5232 id
->device
== PCI_DEVICE_ID_NVIDIA_NVENET_13
) &&
5233 pci_dev
->revision
>= 0xA3)
5234 powerstate
|= NVREG_POWERSTATE2_POWERUP_REV_A3
;
5235 writel(powerstate
, base
+ NvRegPowerState2
);
5238 if (np
->desc_ver
== DESC_VER_1
) {
5239 np
->tx_flags
= NV_TX_VALID
;
5241 np
->tx_flags
= NV_TX2_VALID
;
5243 if (optimization_mode
== NV_OPTIMIZATION_MODE_THROUGHPUT
) {
5244 np
->irqmask
= NVREG_IRQMASK_THROUGHPUT
;
5245 if (np
->msi_flags
& NV_MSI_X_CAPABLE
) /* set number of vectors */
5246 np
->msi_flags
|= 0x0003;
5248 np
->irqmask
= NVREG_IRQMASK_CPU
;
5249 if (np
->msi_flags
& NV_MSI_X_CAPABLE
) /* set number of vectors */
5250 np
->msi_flags
|= 0x0001;
5253 if (id
->driver_data
& DEV_NEED_TIMERIRQ
)
5254 np
->irqmask
|= NVREG_IRQ_TIMER
;
5255 if (id
->driver_data
& DEV_NEED_LINKTIMER
) {
5256 dprintk(KERN_INFO
"%s: link timer on.\n", pci_name(pci_dev
));
5257 np
->need_linktimer
= 1;
5258 np
->link_timeout
= jiffies
+ LINK_TIMEOUT
;
5260 dprintk(KERN_INFO
"%s: link timer off.\n", pci_name(pci_dev
));
5261 np
->need_linktimer
= 0;
5264 /* clear phy state and temporarily halt phy interrupts */
5265 writel(0, base
+ NvRegMIIMask
);
5266 phystate
= readl(base
+ NvRegAdapterControl
);
5267 if (phystate
& NVREG_ADAPTCTL_RUNNING
) {
5269 phystate
&= ~NVREG_ADAPTCTL_RUNNING
;
5270 writel(phystate
, base
+ NvRegAdapterControl
);
5272 writel(NVREG_MIISTAT_MASK
, base
+ NvRegMIIStatus
);
5274 if (id
->driver_data
& DEV_HAS_MGMT_UNIT
) {
5275 /* management unit running on the mac? */
5276 if (readl(base
+ NvRegTransmitterControl
) & NVREG_XMITCTL_SYNC_PHY_INIT
) {
5277 np
->mac_in_use
= readl(base
+ NvRegTransmitterControl
) & NVREG_XMITCTL_MGMT_ST
;
5278 dprintk(KERN_INFO
"%s: mgmt unit is running. mac in use %x.\n", pci_name(pci_dev
), np
->mac_in_use
);
5279 for (i
= 0; i
< 5000; i
++) {
5281 if (nv_mgmt_acquire_sema(dev
)) {
5282 /* management unit setup the phy already? */
5283 if ((readl(base
+ NvRegTransmitterControl
) & NVREG_XMITCTL_SYNC_MASK
) ==
5284 NVREG_XMITCTL_SYNC_PHY_INIT
) {
5285 /* phy is inited by mgmt unit */
5287 dprintk(KERN_INFO
"%s: Phy already initialized by mgmt unit.\n", pci_name(pci_dev
));
5289 /* we need to init the phy */
5297 /* find a suitable phy */
5298 for (i
= 1; i
<= 32; i
++) {
5300 int phyaddr
= i
& 0x1F;
5302 spin_lock_irq(&np
->lock
);
5303 id1
= mii_rw(dev
, phyaddr
, MII_PHYSID1
, MII_READ
);
5304 spin_unlock_irq(&np
->lock
);
5305 if (id1
< 0 || id1
== 0xffff)
5307 spin_lock_irq(&np
->lock
);
5308 id2
= mii_rw(dev
, phyaddr
, MII_PHYSID2
, MII_READ
);
5309 spin_unlock_irq(&np
->lock
);
5310 if (id2
< 0 || id2
== 0xffff)
5313 np
->phy_model
= id2
& PHYID2_MODEL_MASK
;
5314 id1
= (id1
& PHYID1_OUI_MASK
) << PHYID1_OUI_SHFT
;
5315 id2
= (id2
& PHYID2_OUI_MASK
) >> PHYID2_OUI_SHFT
;
5316 dprintk(KERN_DEBUG
"%s: open: Found PHY %04x:%04x at address %d.\n",
5317 pci_name(pci_dev
), id1
, id2
, phyaddr
);
5318 np
->phyaddr
= phyaddr
;
5319 np
->phy_oui
= id1
| id2
;
5323 printk(KERN_INFO
"%s: open: Could not find a valid PHY.\n",
5328 if (!phyinitialized
) {
5332 /* see if it is a gigabit phy */
5333 u32 mii_status
= mii_rw(dev
, np
->phyaddr
, MII_BMSR
, MII_READ
);
5334 if (mii_status
& PHY_GIGABIT
) {
5335 np
->gigabit
= PHY_GIGABIT
;
5339 /* set default link speed settings */
5340 np
->linkspeed
= NVREG_LINKSPEED_FORCE
|NVREG_LINKSPEED_10
;
5344 err
= register_netdev(dev
);
5346 printk(KERN_INFO
"forcedeth: unable to register netdev: %d\n", err
);
5349 printk(KERN_INFO
"%s: forcedeth.c: subsystem: %05x:%04x bound to %s\n",
5350 dev
->name
, pci_dev
->subsystem_vendor
, pci_dev
->subsystem_device
,
5357 writel(phystate
|NVREG_ADAPTCTL_RUNNING
, base
+ NvRegAdapterControl
);
5358 pci_set_drvdata(pci_dev
, NULL
);
5362 iounmap(get_hwbase(dev
));
5364 pci_release_regions(pci_dev
);
5366 pci_disable_device(pci_dev
);
5373 static void __devexit
nv_remove(struct pci_dev
*pci_dev
)
5375 struct net_device
*dev
= pci_get_drvdata(pci_dev
);
5376 struct fe_priv
*np
= netdev_priv(dev
);
5377 u8 __iomem
*base
= get_hwbase(dev
);
5379 unregister_netdev(dev
);
5381 /* special op: write back the misordered MAC address - otherwise
5382 * the next nv_probe would see a wrong address.
5384 writel(np
->orig_mac
[0], base
+ NvRegMacAddrA
);
5385 writel(np
->orig_mac
[1], base
+ NvRegMacAddrB
);
5387 /* free all structures */
5389 iounmap(get_hwbase(dev
));
5390 pci_release_regions(pci_dev
);
5391 pci_disable_device(pci_dev
);
5393 pci_set_drvdata(pci_dev
, NULL
);
5397 static int nv_suspend(struct pci_dev
*pdev
, pm_message_t state
)
5399 struct net_device
*dev
= pci_get_drvdata(pdev
);
5400 struct fe_priv
*np
= netdev_priv(dev
);
5402 if (!netif_running(dev
))
5405 netif_device_detach(dev
);
5410 pci_save_state(pdev
);
5411 pci_enable_wake(pdev
, pci_choose_state(pdev
, state
), np
->wolenabled
);
5412 pci_set_power_state(pdev
, pci_choose_state(pdev
, state
));
5417 static int nv_resume(struct pci_dev
*pdev
)
5419 struct net_device
*dev
= pci_get_drvdata(pdev
);
5422 if (!netif_running(dev
))
5425 netif_device_attach(dev
);
5427 pci_set_power_state(pdev
, PCI_D0
);
5428 pci_restore_state(pdev
);
5429 pci_enable_wake(pdev
, PCI_D0
, 0);
5436 #define nv_suspend NULL
5437 #define nv_resume NULL
5438 #endif /* CONFIG_PM */
5440 static struct pci_device_id pci_tbl
[] = {
5441 { /* nForce Ethernet Controller */
5442 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_1
),
5443 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
,
5445 { /* nForce2 Ethernet Controller */
5446 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_2
),
5447 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
,
5449 { /* nForce3 Ethernet Controller */
5450 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_3
),
5451 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
,
5453 { /* nForce3 Ethernet Controller */
5454 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_4
),
5455 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_CHECKSUM
,
5457 { /* nForce3 Ethernet Controller */
5458 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_5
),
5459 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_CHECKSUM
,
5461 { /* nForce3 Ethernet Controller */
5462 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_6
),
5463 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_CHECKSUM
,
5465 { /* nForce3 Ethernet Controller */
5466 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_7
),
5467 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_CHECKSUM
,
5469 { /* CK804 Ethernet Controller */
5470 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_8
),
5471 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_CHECKSUM
|DEV_HAS_HIGH_DMA
|DEV_HAS_STATISTICS_V1
,
5473 { /* CK804 Ethernet Controller */
5474 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_9
),
5475 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_CHECKSUM
|DEV_HAS_HIGH_DMA
|DEV_HAS_STATISTICS_V1
,
5477 { /* MCP04 Ethernet Controller */
5478 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_10
),
5479 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_CHECKSUM
|DEV_HAS_HIGH_DMA
|DEV_HAS_STATISTICS_V1
,
5481 { /* MCP04 Ethernet Controller */
5482 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_11
),
5483 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_CHECKSUM
|DEV_HAS_HIGH_DMA
|DEV_HAS_STATISTICS_V1
,
5485 { /* MCP51 Ethernet Controller */
5486 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_12
),
5487 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
|DEV_HAS_STATISTICS_V1
,
5489 { /* MCP51 Ethernet Controller */
5490 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_13
),
5491 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
|DEV_HAS_STATISTICS_V1
,
5493 { /* MCP55 Ethernet Controller */
5494 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_14
),
5495 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_CHECKSUM
|DEV_HAS_HIGH_DMA
|DEV_HAS_VLAN
|DEV_HAS_MSI
|DEV_HAS_MSI_X
|DEV_HAS_POWER_CNTRL
|DEV_HAS_PAUSEFRAME_TX
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
,
5497 { /* MCP55 Ethernet Controller */
5498 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_15
),
5499 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_CHECKSUM
|DEV_HAS_HIGH_DMA
|DEV_HAS_VLAN
|DEV_HAS_MSI
|DEV_HAS_MSI_X
|DEV_HAS_POWER_CNTRL
|DEV_HAS_PAUSEFRAME_TX
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
,
5501 { /* MCP61 Ethernet Controller */
5502 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_16
),
5503 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
|DEV_HAS_MSI
|DEV_HAS_PAUSEFRAME_TX
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
,
5505 { /* MCP61 Ethernet Controller */
5506 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_17
),
5507 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
|DEV_HAS_MSI
|DEV_HAS_PAUSEFRAME_TX
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
,
5509 { /* MCP61 Ethernet Controller */
5510 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_18
),
5511 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
|DEV_HAS_MSI
|DEV_HAS_PAUSEFRAME_TX
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
,
5513 { /* MCP61 Ethernet Controller */
5514 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_19
),
5515 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
|DEV_HAS_MSI
|DEV_HAS_PAUSEFRAME_TX
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
,
5517 { /* MCP65 Ethernet Controller */
5518 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_20
),
5519 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
|DEV_HAS_MSI
|DEV_HAS_PAUSEFRAME_TX
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
,
5521 { /* MCP65 Ethernet Controller */
5522 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_21
),
5523 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
|DEV_HAS_MSI
|DEV_HAS_PAUSEFRAME_TX
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
,
5525 { /* MCP65 Ethernet Controller */
5526 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_22
),
5527 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
|DEV_HAS_MSI
|DEV_HAS_PAUSEFRAME_TX
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
,
5529 { /* MCP65 Ethernet Controller */
5530 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_23
),
5531 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_LARGEDESC
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
|DEV_HAS_MSI
|DEV_HAS_PAUSEFRAME_TX
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
,
5533 { /* MCP67 Ethernet Controller */
5534 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_24
),
5535 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
|DEV_HAS_MSI
|DEV_HAS_PAUSEFRAME_TX
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
,
5537 { /* MCP67 Ethernet Controller */
5538 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_25
),
5539 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
|DEV_HAS_MSI
|DEV_HAS_PAUSEFRAME_TX
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
,
5541 { /* MCP67 Ethernet Controller */
5542 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_26
),
5543 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
|DEV_HAS_MSI
|DEV_HAS_PAUSEFRAME_TX
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
,
5545 { /* MCP67 Ethernet Controller */
5546 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA
, PCI_DEVICE_ID_NVIDIA_NVENET_27
),
5547 .driver_data
= DEV_NEED_TIMERIRQ
|DEV_NEED_LINKTIMER
|DEV_HAS_HIGH_DMA
|DEV_HAS_POWER_CNTRL
|DEV_HAS_MSI
|DEV_HAS_PAUSEFRAME_TX
|DEV_HAS_STATISTICS_V2
|DEV_HAS_TEST_EXTENDED
|DEV_HAS_MGMT_UNIT
,
5552 static struct pci_driver driver
= {
5553 .name
= "forcedeth",
5554 .id_table
= pci_tbl
,
5556 .remove
= __devexit_p(nv_remove
),
5557 .suspend
= nv_suspend
,
5558 .resume
= nv_resume
,
5561 static int __init
init_nic(void)
5563 printk(KERN_INFO
"forcedeth.c: Reverse Engineered nForce ethernet driver. Version %s.\n", FORCEDETH_VERSION
);
5564 return pci_register_driver(&driver
);
5567 static void __exit
exit_nic(void)
5569 pci_unregister_driver(&driver
);
5572 module_param(max_interrupt_work
, int, 0);
5573 MODULE_PARM_DESC(max_interrupt_work
, "forcedeth maximum events handled per interrupt");
5574 module_param(optimization_mode
, int, 0);
5575 MODULE_PARM_DESC(optimization_mode
, "In throughput mode (0), every tx & rx packet will generate an interrupt. In CPU mode (1), interrupts are controlled by a timer.");
5576 module_param(poll_interval
, int, 0);
5577 MODULE_PARM_DESC(poll_interval
, "Interval determines how frequent timer interrupt is generated by [(time_in_micro_secs * 100) / (2^10)]. Min is 0 and Max is 65535.");
5578 module_param(msi
, int, 0);
5579 MODULE_PARM_DESC(msi
, "MSI interrupts are enabled by setting to 1 and disabled by setting to 0.");
5580 module_param(msix
, int, 0);
5581 MODULE_PARM_DESC(msix
, "MSIX interrupts are enabled by setting to 1 and disabled by setting to 0.");
5582 module_param(dma_64bit
, int, 0);
5583 MODULE_PARM_DESC(dma_64bit
, "High DMA is enabled by setting to 1 and disabled by setting to 0.");
5585 MODULE_AUTHOR("Manfred Spraul <manfred@colorfullife.com>");
5586 MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver");
5587 MODULE_LICENSE("GPL");
5589 MODULE_DEVICE_TABLE(pci
, pci_tbl
);
5591 module_init(init_nic
);
5592 module_exit(exit_nic
);