]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/net/ethernet/nvidia/forcedeth.c
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net...
[mirror_ubuntu-artful-kernel.git] / drivers / net / ethernet / nvidia / forcedeth.c
1 /*
2 * forcedeth: Ethernet driver for NVIDIA nForce media access controllers.
3 *
4 * Note: This driver is a cleanroom reimplementation based on reverse
5 * engineered documentation written by Carl-Daniel Hailfinger
6 * and Andrew de Quincey.
7 *
8 * NVIDIA, nForce and other NVIDIA marks are trademarks or registered
9 * trademarks of NVIDIA Corporation in the United States and other
10 * countries.
11 *
12 * Copyright (C) 2003,4,5 Manfred Spraul
13 * Copyright (C) 2004 Andrew de Quincey (wol support)
14 * Copyright (C) 2004 Carl-Daniel Hailfinger (invalid MAC handling, insane
15 * IRQ rate fixes, bigendian fixes, cleanups, verification)
16 * Copyright (c) 2004,2005,2006,2007,2008,2009 NVIDIA Corporation
17 *
18 * This program is free software; you can redistribute it and/or modify
19 * it under the terms of the GNU General Public License as published by
20 * the Free Software Foundation; either version 2 of the License, or
21 * (at your option) any later version.
22 *
23 * This program is distributed in the hope that it will be useful,
24 * but WITHOUT ANY WARRANTY; without even the implied warranty of
25 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
26 * GNU General Public License for more details.
27 *
28 * You should have received a copy of the GNU General Public License
29 * along with this program; if not, write to the Free Software
30 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
31 *
32 * Known bugs:
33 * We suspect that on some hardware no TX done interrupts are generated.
34 * This means recovery from netif_stop_queue only happens if the hw timer
35 * interrupt fires (100 times/second, configurable with NVREG_POLL_DEFAULT)
36 * and the timer is active in the IRQMask, or if a rx packet arrives by chance.
37 * If your hardware reliably generates tx done interrupts, then you can remove
38 * DEV_NEED_TIMERIRQ from the driver_data flags.
39 * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few
40 * superfluous timer interrupts from the nic.
41 */
42
43 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
44
45 #define FORCEDETH_VERSION "0.64"
46 #define DRV_NAME "forcedeth"
47
48 #include <linux/module.h>
49 #include <linux/types.h>
50 #include <linux/pci.h>
51 #include <linux/interrupt.h>
52 #include <linux/netdevice.h>
53 #include <linux/etherdevice.h>
54 #include <linux/delay.h>
55 #include <linux/sched.h>
56 #include <linux/spinlock.h>
57 #include <linux/ethtool.h>
58 #include <linux/timer.h>
59 #include <linux/skbuff.h>
60 #include <linux/mii.h>
61 #include <linux/random.h>
62 #include <linux/init.h>
63 #include <linux/if_vlan.h>
64 #include <linux/dma-mapping.h>
65 #include <linux/slab.h>
66 #include <linux/uaccess.h>
67 #include <linux/prefetch.h>
68 #include <linux/io.h>
69
70 #include <asm/irq.h>
71 #include <asm/system.h>
72
73 #define TX_WORK_PER_LOOP 64
74 #define RX_WORK_PER_LOOP 64
75
76 /*
77 * Hardware access:
78 */
79
80 #define DEV_NEED_TIMERIRQ 0x0000001 /* set the timer irq flag in the irq mask */
81 #define DEV_NEED_LINKTIMER 0x0000002 /* poll link settings. Relies on the timer irq */
82 #define DEV_HAS_LARGEDESC 0x0000004 /* device supports jumbo frames and needs packet format 2 */
83 #define DEV_HAS_HIGH_DMA 0x0000008 /* device supports 64bit dma */
84 #define DEV_HAS_CHECKSUM 0x0000010 /* device supports tx and rx checksum offloads */
85 #define DEV_HAS_VLAN 0x0000020 /* device supports vlan tagging and striping */
86 #define DEV_HAS_MSI 0x0000040 /* device supports MSI */
87 #define DEV_HAS_MSI_X 0x0000080 /* device supports MSI-X */
88 #define DEV_HAS_POWER_CNTRL 0x0000100 /* device supports power savings */
89 #define DEV_HAS_STATISTICS_V1 0x0000200 /* device supports hw statistics version 1 */
90 #define DEV_HAS_STATISTICS_V2 0x0000400 /* device supports hw statistics version 2 */
91 #define DEV_HAS_STATISTICS_V3 0x0000800 /* device supports hw statistics version 3 */
92 #define DEV_HAS_STATISTICS_V12 0x0000600 /* device supports hw statistics version 1 and 2 */
93 #define DEV_HAS_STATISTICS_V123 0x0000e00 /* device supports hw statistics version 1, 2, and 3 */
94 #define DEV_HAS_TEST_EXTENDED 0x0001000 /* device supports extended diagnostic test */
95 #define DEV_HAS_MGMT_UNIT 0x0002000 /* device supports management unit */
96 #define DEV_HAS_CORRECT_MACADDR 0x0004000 /* device supports correct mac address order */
97 #define DEV_HAS_COLLISION_FIX 0x0008000 /* device supports tx collision fix */
98 #define DEV_HAS_PAUSEFRAME_TX_V1 0x0010000 /* device supports tx pause frames version 1 */
99 #define DEV_HAS_PAUSEFRAME_TX_V2 0x0020000 /* device supports tx pause frames version 2 */
100 #define DEV_HAS_PAUSEFRAME_TX_V3 0x0040000 /* device supports tx pause frames version 3 */
101 #define DEV_NEED_TX_LIMIT 0x0080000 /* device needs to limit tx */
102 #define DEV_NEED_TX_LIMIT2 0x0180000 /* device needs to limit tx, expect for some revs */
103 #define DEV_HAS_GEAR_MODE 0x0200000 /* device supports gear mode */
104 #define DEV_NEED_PHY_INIT_FIX 0x0400000 /* device needs specific phy workaround */
105 #define DEV_NEED_LOW_POWER_FIX 0x0800000 /* device needs special power up workaround */
106 #define DEV_NEED_MSI_FIX 0x1000000 /* device needs msi workaround */
107
108 enum {
109 NvRegIrqStatus = 0x000,
110 #define NVREG_IRQSTAT_MIIEVENT 0x040
111 #define NVREG_IRQSTAT_MASK 0x83ff
112 NvRegIrqMask = 0x004,
113 #define NVREG_IRQ_RX_ERROR 0x0001
114 #define NVREG_IRQ_RX 0x0002
115 #define NVREG_IRQ_RX_NOBUF 0x0004
116 #define NVREG_IRQ_TX_ERR 0x0008
117 #define NVREG_IRQ_TX_OK 0x0010
118 #define NVREG_IRQ_TIMER 0x0020
119 #define NVREG_IRQ_LINK 0x0040
120 #define NVREG_IRQ_RX_FORCED 0x0080
121 #define NVREG_IRQ_TX_FORCED 0x0100
122 #define NVREG_IRQ_RECOVER_ERROR 0x8200
123 #define NVREG_IRQMASK_THROUGHPUT 0x00df
124 #define NVREG_IRQMASK_CPU 0x0060
125 #define NVREG_IRQ_TX_ALL (NVREG_IRQ_TX_ERR|NVREG_IRQ_TX_OK|NVREG_IRQ_TX_FORCED)
126 #define NVREG_IRQ_RX_ALL (NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_RX_FORCED)
127 #define NVREG_IRQ_OTHER (NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_RECOVER_ERROR)
128
129 NvRegUnknownSetupReg6 = 0x008,
130 #define NVREG_UNKSETUP6_VAL 3
131
132 /*
133 * NVREG_POLL_DEFAULT is the interval length of the timer source on the nic
134 * NVREG_POLL_DEFAULT=97 would result in an interval length of 1 ms
135 */
136 NvRegPollingInterval = 0x00c,
137 #define NVREG_POLL_DEFAULT_THROUGHPUT 65535 /* backup tx cleanup if loop max reached */
138 #define NVREG_POLL_DEFAULT_CPU 13
139 NvRegMSIMap0 = 0x020,
140 NvRegMSIMap1 = 0x024,
141 NvRegMSIIrqMask = 0x030,
142 #define NVREG_MSI_VECTOR_0_ENABLED 0x01
143 NvRegMisc1 = 0x080,
144 #define NVREG_MISC1_PAUSE_TX 0x01
145 #define NVREG_MISC1_HD 0x02
146 #define NVREG_MISC1_FORCE 0x3b0f3c
147
148 NvRegMacReset = 0x34,
149 #define NVREG_MAC_RESET_ASSERT 0x0F3
150 NvRegTransmitterControl = 0x084,
151 #define NVREG_XMITCTL_START 0x01
152 #define NVREG_XMITCTL_MGMT_ST 0x40000000
153 #define NVREG_XMITCTL_SYNC_MASK 0x000f0000
154 #define NVREG_XMITCTL_SYNC_NOT_READY 0x0
155 #define NVREG_XMITCTL_SYNC_PHY_INIT 0x00040000
156 #define NVREG_XMITCTL_MGMT_SEMA_MASK 0x00000f00
157 #define NVREG_XMITCTL_MGMT_SEMA_FREE 0x0
158 #define NVREG_XMITCTL_HOST_SEMA_MASK 0x0000f000
159 #define NVREG_XMITCTL_HOST_SEMA_ACQ 0x0000f000
160 #define NVREG_XMITCTL_HOST_LOADED 0x00004000
161 #define NVREG_XMITCTL_TX_PATH_EN 0x01000000
162 #define NVREG_XMITCTL_DATA_START 0x00100000
163 #define NVREG_XMITCTL_DATA_READY 0x00010000
164 #define NVREG_XMITCTL_DATA_ERROR 0x00020000
165 NvRegTransmitterStatus = 0x088,
166 #define NVREG_XMITSTAT_BUSY 0x01
167
168 NvRegPacketFilterFlags = 0x8c,
169 #define NVREG_PFF_PAUSE_RX 0x08
170 #define NVREG_PFF_ALWAYS 0x7F0000
171 #define NVREG_PFF_PROMISC 0x80
172 #define NVREG_PFF_MYADDR 0x20
173 #define NVREG_PFF_LOOPBACK 0x10
174
175 NvRegOffloadConfig = 0x90,
176 #define NVREG_OFFLOAD_HOMEPHY 0x601
177 #define NVREG_OFFLOAD_NORMAL RX_NIC_BUFSIZE
178 NvRegReceiverControl = 0x094,
179 #define NVREG_RCVCTL_START 0x01
180 #define NVREG_RCVCTL_RX_PATH_EN 0x01000000
181 NvRegReceiverStatus = 0x98,
182 #define NVREG_RCVSTAT_BUSY 0x01
183
184 NvRegSlotTime = 0x9c,
185 #define NVREG_SLOTTIME_LEGBF_ENABLED 0x80000000
186 #define NVREG_SLOTTIME_10_100_FULL 0x00007f00
187 #define NVREG_SLOTTIME_1000_FULL 0x0003ff00
188 #define NVREG_SLOTTIME_HALF 0x0000ff00
189 #define NVREG_SLOTTIME_DEFAULT 0x00007f00
190 #define NVREG_SLOTTIME_MASK 0x000000ff
191
192 NvRegTxDeferral = 0xA0,
193 #define NVREG_TX_DEFERRAL_DEFAULT 0x15050f
194 #define NVREG_TX_DEFERRAL_RGMII_10_100 0x16070f
195 #define NVREG_TX_DEFERRAL_RGMII_1000 0x14050f
196 #define NVREG_TX_DEFERRAL_RGMII_STRETCH_10 0x16190f
197 #define NVREG_TX_DEFERRAL_RGMII_STRETCH_100 0x16300f
198 #define NVREG_TX_DEFERRAL_MII_STRETCH 0x152000
199 NvRegRxDeferral = 0xA4,
200 #define NVREG_RX_DEFERRAL_DEFAULT 0x16
201 NvRegMacAddrA = 0xA8,
202 NvRegMacAddrB = 0xAC,
203 NvRegMulticastAddrA = 0xB0,
204 #define NVREG_MCASTADDRA_FORCE 0x01
205 NvRegMulticastAddrB = 0xB4,
206 NvRegMulticastMaskA = 0xB8,
207 #define NVREG_MCASTMASKA_NONE 0xffffffff
208 NvRegMulticastMaskB = 0xBC,
209 #define NVREG_MCASTMASKB_NONE 0xffff
210
211 NvRegPhyInterface = 0xC0,
212 #define PHY_RGMII 0x10000000
213 NvRegBackOffControl = 0xC4,
214 #define NVREG_BKOFFCTRL_DEFAULT 0x70000000
215 #define NVREG_BKOFFCTRL_SEED_MASK 0x000003ff
216 #define NVREG_BKOFFCTRL_SELECT 24
217 #define NVREG_BKOFFCTRL_GEAR 12
218
219 NvRegTxRingPhysAddr = 0x100,
220 NvRegRxRingPhysAddr = 0x104,
221 NvRegRingSizes = 0x108,
222 #define NVREG_RINGSZ_TXSHIFT 0
223 #define NVREG_RINGSZ_RXSHIFT 16
224 NvRegTransmitPoll = 0x10c,
225 #define NVREG_TRANSMITPOLL_MAC_ADDR_REV 0x00008000
226 NvRegLinkSpeed = 0x110,
227 #define NVREG_LINKSPEED_FORCE 0x10000
228 #define NVREG_LINKSPEED_10 1000
229 #define NVREG_LINKSPEED_100 100
230 #define NVREG_LINKSPEED_1000 50
231 #define NVREG_LINKSPEED_MASK (0xFFF)
232 NvRegUnknownSetupReg5 = 0x130,
233 #define NVREG_UNKSETUP5_BIT31 (1<<31)
234 NvRegTxWatermark = 0x13c,
235 #define NVREG_TX_WM_DESC1_DEFAULT 0x0200010
236 #define NVREG_TX_WM_DESC2_3_DEFAULT 0x1e08000
237 #define NVREG_TX_WM_DESC2_3_1000 0xfe08000
238 NvRegTxRxControl = 0x144,
239 #define NVREG_TXRXCTL_KICK 0x0001
240 #define NVREG_TXRXCTL_BIT1 0x0002
241 #define NVREG_TXRXCTL_BIT2 0x0004
242 #define NVREG_TXRXCTL_IDLE 0x0008
243 #define NVREG_TXRXCTL_RESET 0x0010
244 #define NVREG_TXRXCTL_RXCHECK 0x0400
245 #define NVREG_TXRXCTL_DESC_1 0
246 #define NVREG_TXRXCTL_DESC_2 0x002100
247 #define NVREG_TXRXCTL_DESC_3 0xc02200
248 #define NVREG_TXRXCTL_VLANSTRIP 0x00040
249 #define NVREG_TXRXCTL_VLANINS 0x00080
250 NvRegTxRingPhysAddrHigh = 0x148,
251 NvRegRxRingPhysAddrHigh = 0x14C,
252 NvRegTxPauseFrame = 0x170,
253 #define NVREG_TX_PAUSEFRAME_DISABLE 0x0fff0080
254 #define NVREG_TX_PAUSEFRAME_ENABLE_V1 0x01800010
255 #define NVREG_TX_PAUSEFRAME_ENABLE_V2 0x056003f0
256 #define NVREG_TX_PAUSEFRAME_ENABLE_V3 0x09f00880
257 NvRegTxPauseFrameLimit = 0x174,
258 #define NVREG_TX_PAUSEFRAMELIMIT_ENABLE 0x00010000
259 NvRegMIIStatus = 0x180,
260 #define NVREG_MIISTAT_ERROR 0x0001
261 #define NVREG_MIISTAT_LINKCHANGE 0x0008
262 #define NVREG_MIISTAT_MASK_RW 0x0007
263 #define NVREG_MIISTAT_MASK_ALL 0x000f
264 NvRegMIIMask = 0x184,
265 #define NVREG_MII_LINKCHANGE 0x0008
266
267 NvRegAdapterControl = 0x188,
268 #define NVREG_ADAPTCTL_START 0x02
269 #define NVREG_ADAPTCTL_LINKUP 0x04
270 #define NVREG_ADAPTCTL_PHYVALID 0x40000
271 #define NVREG_ADAPTCTL_RUNNING 0x100000
272 #define NVREG_ADAPTCTL_PHYSHIFT 24
273 NvRegMIISpeed = 0x18c,
274 #define NVREG_MIISPEED_BIT8 (1<<8)
275 #define NVREG_MIIDELAY 5
276 NvRegMIIControl = 0x190,
277 #define NVREG_MIICTL_INUSE 0x08000
278 #define NVREG_MIICTL_WRITE 0x00400
279 #define NVREG_MIICTL_ADDRSHIFT 5
280 NvRegMIIData = 0x194,
281 NvRegTxUnicast = 0x1a0,
282 NvRegTxMulticast = 0x1a4,
283 NvRegTxBroadcast = 0x1a8,
284 NvRegWakeUpFlags = 0x200,
285 #define NVREG_WAKEUPFLAGS_VAL 0x7770
286 #define NVREG_WAKEUPFLAGS_BUSYSHIFT 24
287 #define NVREG_WAKEUPFLAGS_ENABLESHIFT 16
288 #define NVREG_WAKEUPFLAGS_D3SHIFT 12
289 #define NVREG_WAKEUPFLAGS_D2SHIFT 8
290 #define NVREG_WAKEUPFLAGS_D1SHIFT 4
291 #define NVREG_WAKEUPFLAGS_D0SHIFT 0
292 #define NVREG_WAKEUPFLAGS_ACCEPT_MAGPAT 0x01
293 #define NVREG_WAKEUPFLAGS_ACCEPT_WAKEUPPAT 0x02
294 #define NVREG_WAKEUPFLAGS_ACCEPT_LINKCHANGE 0x04
295 #define NVREG_WAKEUPFLAGS_ENABLE 0x1111
296
297 NvRegMgmtUnitGetVersion = 0x204,
298 #define NVREG_MGMTUNITGETVERSION 0x01
299 NvRegMgmtUnitVersion = 0x208,
300 #define NVREG_MGMTUNITVERSION 0x08
301 NvRegPowerCap = 0x268,
302 #define NVREG_POWERCAP_D3SUPP (1<<30)
303 #define NVREG_POWERCAP_D2SUPP (1<<26)
304 #define NVREG_POWERCAP_D1SUPP (1<<25)
305 NvRegPowerState = 0x26c,
306 #define NVREG_POWERSTATE_POWEREDUP 0x8000
307 #define NVREG_POWERSTATE_VALID 0x0100
308 #define NVREG_POWERSTATE_MASK 0x0003
309 #define NVREG_POWERSTATE_D0 0x0000
310 #define NVREG_POWERSTATE_D1 0x0001
311 #define NVREG_POWERSTATE_D2 0x0002
312 #define NVREG_POWERSTATE_D3 0x0003
313 NvRegMgmtUnitControl = 0x278,
314 #define NVREG_MGMTUNITCONTROL_INUSE 0x20000
315 NvRegTxCnt = 0x280,
316 NvRegTxZeroReXmt = 0x284,
317 NvRegTxOneReXmt = 0x288,
318 NvRegTxManyReXmt = 0x28c,
319 NvRegTxLateCol = 0x290,
320 NvRegTxUnderflow = 0x294,
321 NvRegTxLossCarrier = 0x298,
322 NvRegTxExcessDef = 0x29c,
323 NvRegTxRetryErr = 0x2a0,
324 NvRegRxFrameErr = 0x2a4,
325 NvRegRxExtraByte = 0x2a8,
326 NvRegRxLateCol = 0x2ac,
327 NvRegRxRunt = 0x2b0,
328 NvRegRxFrameTooLong = 0x2b4,
329 NvRegRxOverflow = 0x2b8,
330 NvRegRxFCSErr = 0x2bc,
331 NvRegRxFrameAlignErr = 0x2c0,
332 NvRegRxLenErr = 0x2c4,
333 NvRegRxUnicast = 0x2c8,
334 NvRegRxMulticast = 0x2cc,
335 NvRegRxBroadcast = 0x2d0,
336 NvRegTxDef = 0x2d4,
337 NvRegTxFrame = 0x2d8,
338 NvRegRxCnt = 0x2dc,
339 NvRegTxPause = 0x2e0,
340 NvRegRxPause = 0x2e4,
341 NvRegRxDropFrame = 0x2e8,
342 NvRegVlanControl = 0x300,
343 #define NVREG_VLANCONTROL_ENABLE 0x2000
344 NvRegMSIXMap0 = 0x3e0,
345 NvRegMSIXMap1 = 0x3e4,
346 NvRegMSIXIrqStatus = 0x3f0,
347
348 NvRegPowerState2 = 0x600,
349 #define NVREG_POWERSTATE2_POWERUP_MASK 0x0F15
350 #define NVREG_POWERSTATE2_POWERUP_REV_A3 0x0001
351 #define NVREG_POWERSTATE2_PHY_RESET 0x0004
352 #define NVREG_POWERSTATE2_GATE_CLOCKS 0x0F00
353 };
354
355 /* Big endian: should work, but is untested */
356 struct ring_desc {
357 __le32 buf;
358 __le32 flaglen;
359 };
360
361 struct ring_desc_ex {
362 __le32 bufhigh;
363 __le32 buflow;
364 __le32 txvlan;
365 __le32 flaglen;
366 };
367
368 union ring_type {
369 struct ring_desc *orig;
370 struct ring_desc_ex *ex;
371 };
372
373 #define FLAG_MASK_V1 0xffff0000
374 #define FLAG_MASK_V2 0xffffc000
375 #define LEN_MASK_V1 (0xffffffff ^ FLAG_MASK_V1)
376 #define LEN_MASK_V2 (0xffffffff ^ FLAG_MASK_V2)
377
378 #define NV_TX_LASTPACKET (1<<16)
379 #define NV_TX_RETRYERROR (1<<19)
380 #define NV_TX_RETRYCOUNT_MASK (0xF<<20)
381 #define NV_TX_FORCED_INTERRUPT (1<<24)
382 #define NV_TX_DEFERRED (1<<26)
383 #define NV_TX_CARRIERLOST (1<<27)
384 #define NV_TX_LATECOLLISION (1<<28)
385 #define NV_TX_UNDERFLOW (1<<29)
386 #define NV_TX_ERROR (1<<30)
387 #define NV_TX_VALID (1<<31)
388
389 #define NV_TX2_LASTPACKET (1<<29)
390 #define NV_TX2_RETRYERROR (1<<18)
391 #define NV_TX2_RETRYCOUNT_MASK (0xF<<19)
392 #define NV_TX2_FORCED_INTERRUPT (1<<30)
393 #define NV_TX2_DEFERRED (1<<25)
394 #define NV_TX2_CARRIERLOST (1<<26)
395 #define NV_TX2_LATECOLLISION (1<<27)
396 #define NV_TX2_UNDERFLOW (1<<28)
397 /* error and valid are the same for both */
398 #define NV_TX2_ERROR (1<<30)
399 #define NV_TX2_VALID (1<<31)
400 #define NV_TX2_TSO (1<<28)
401 #define NV_TX2_TSO_SHIFT 14
402 #define NV_TX2_TSO_MAX_SHIFT 14
403 #define NV_TX2_TSO_MAX_SIZE (1<<NV_TX2_TSO_MAX_SHIFT)
404 #define NV_TX2_CHECKSUM_L3 (1<<27)
405 #define NV_TX2_CHECKSUM_L4 (1<<26)
406
407 #define NV_TX3_VLAN_TAG_PRESENT (1<<18)
408
409 #define NV_RX_DESCRIPTORVALID (1<<16)
410 #define NV_RX_MISSEDFRAME (1<<17)
411 #define NV_RX_SUBSTRACT1 (1<<18)
412 #define NV_RX_ERROR1 (1<<23)
413 #define NV_RX_ERROR2 (1<<24)
414 #define NV_RX_ERROR3 (1<<25)
415 #define NV_RX_ERROR4 (1<<26)
416 #define NV_RX_CRCERR (1<<27)
417 #define NV_RX_OVERFLOW (1<<28)
418 #define NV_RX_FRAMINGERR (1<<29)
419 #define NV_RX_ERROR (1<<30)
420 #define NV_RX_AVAIL (1<<31)
421 #define NV_RX_ERROR_MASK (NV_RX_ERROR1|NV_RX_ERROR2|NV_RX_ERROR3|NV_RX_ERROR4|NV_RX_CRCERR|NV_RX_OVERFLOW|NV_RX_FRAMINGERR)
422
423 #define NV_RX2_CHECKSUMMASK (0x1C000000)
424 #define NV_RX2_CHECKSUM_IP (0x10000000)
425 #define NV_RX2_CHECKSUM_IP_TCP (0x14000000)
426 #define NV_RX2_CHECKSUM_IP_UDP (0x18000000)
427 #define NV_RX2_DESCRIPTORVALID (1<<29)
428 #define NV_RX2_SUBSTRACT1 (1<<25)
429 #define NV_RX2_ERROR1 (1<<18)
430 #define NV_RX2_ERROR2 (1<<19)
431 #define NV_RX2_ERROR3 (1<<20)
432 #define NV_RX2_ERROR4 (1<<21)
433 #define NV_RX2_CRCERR (1<<22)
434 #define NV_RX2_OVERFLOW (1<<23)
435 #define NV_RX2_FRAMINGERR (1<<24)
436 /* error and avail are the same for both */
437 #define NV_RX2_ERROR (1<<30)
438 #define NV_RX2_AVAIL (1<<31)
439 #define NV_RX2_ERROR_MASK (NV_RX2_ERROR1|NV_RX2_ERROR2|NV_RX2_ERROR3|NV_RX2_ERROR4|NV_RX2_CRCERR|NV_RX2_OVERFLOW|NV_RX2_FRAMINGERR)
440
441 #define NV_RX3_VLAN_TAG_PRESENT (1<<16)
442 #define NV_RX3_VLAN_TAG_MASK (0x0000FFFF)
443
444 /* Miscellaneous hardware related defines: */
445 #define NV_PCI_REGSZ_VER1 0x270
446 #define NV_PCI_REGSZ_VER2 0x2d4
447 #define NV_PCI_REGSZ_VER3 0x604
448 #define NV_PCI_REGSZ_MAX 0x604
449
450 /* various timeout delays: all in usec */
451 #define NV_TXRX_RESET_DELAY 4
452 #define NV_TXSTOP_DELAY1 10
453 #define NV_TXSTOP_DELAY1MAX 500000
454 #define NV_TXSTOP_DELAY2 100
455 #define NV_RXSTOP_DELAY1 10
456 #define NV_RXSTOP_DELAY1MAX 500000
457 #define NV_RXSTOP_DELAY2 100
458 #define NV_SETUP5_DELAY 5
459 #define NV_SETUP5_DELAYMAX 50000
460 #define NV_POWERUP_DELAY 5
461 #define NV_POWERUP_DELAYMAX 5000
462 #define NV_MIIBUSY_DELAY 50
463 #define NV_MIIPHY_DELAY 10
464 #define NV_MIIPHY_DELAYMAX 10000
465 #define NV_MAC_RESET_DELAY 64
466
467 #define NV_WAKEUPPATTERNS 5
468 #define NV_WAKEUPMASKENTRIES 4
469
470 /* General driver defaults */
471 #define NV_WATCHDOG_TIMEO (5*HZ)
472
473 #define RX_RING_DEFAULT 512
474 #define TX_RING_DEFAULT 256
475 #define RX_RING_MIN 128
476 #define TX_RING_MIN 64
477 #define RING_MAX_DESC_VER_1 1024
478 #define RING_MAX_DESC_VER_2_3 16384
479
480 /* rx/tx mac addr + type + vlan + align + slack*/
481 #define NV_RX_HEADERS (64)
482 /* even more slack. */
483 #define NV_RX_ALLOC_PAD (64)
484
485 /* maximum mtu size */
486 #define NV_PKTLIMIT_1 ETH_DATA_LEN /* hard limit not known */
487 #define NV_PKTLIMIT_2 9100 /* Actual limit according to NVidia: 9202 */
488
489 #define OOM_REFILL (1+HZ/20)
490 #define POLL_WAIT (1+HZ/100)
491 #define LINK_TIMEOUT (3*HZ)
492 #define STATS_INTERVAL (10*HZ)
493
494 /*
495 * desc_ver values:
496 * The nic supports three different descriptor types:
497 * - DESC_VER_1: Original
498 * - DESC_VER_2: support for jumbo frames.
499 * - DESC_VER_3: 64-bit format.
500 */
501 #define DESC_VER_1 1
502 #define DESC_VER_2 2
503 #define DESC_VER_3 3
504
505 /* PHY defines */
506 #define PHY_OUI_MARVELL 0x5043
507 #define PHY_OUI_CICADA 0x03f1
508 #define PHY_OUI_VITESSE 0x01c1
509 #define PHY_OUI_REALTEK 0x0732
510 #define PHY_OUI_REALTEK2 0x0020
511 #define PHYID1_OUI_MASK 0x03ff
512 #define PHYID1_OUI_SHFT 6
513 #define PHYID2_OUI_MASK 0xfc00
514 #define PHYID2_OUI_SHFT 10
515 #define PHYID2_MODEL_MASK 0x03f0
516 #define PHY_MODEL_REALTEK_8211 0x0110
517 #define PHY_REV_MASK 0x0001
518 #define PHY_REV_REALTEK_8211B 0x0000
519 #define PHY_REV_REALTEK_8211C 0x0001
520 #define PHY_MODEL_REALTEK_8201 0x0200
521 #define PHY_MODEL_MARVELL_E3016 0x0220
522 #define PHY_MARVELL_E3016_INITMASK 0x0300
523 #define PHY_CICADA_INIT1 0x0f000
524 #define PHY_CICADA_INIT2 0x0e00
525 #define PHY_CICADA_INIT3 0x01000
526 #define PHY_CICADA_INIT4 0x0200
527 #define PHY_CICADA_INIT5 0x0004
528 #define PHY_CICADA_INIT6 0x02000
529 #define PHY_VITESSE_INIT_REG1 0x1f
530 #define PHY_VITESSE_INIT_REG2 0x10
531 #define PHY_VITESSE_INIT_REG3 0x11
532 #define PHY_VITESSE_INIT_REG4 0x12
533 #define PHY_VITESSE_INIT_MSK1 0xc
534 #define PHY_VITESSE_INIT_MSK2 0x0180
535 #define PHY_VITESSE_INIT1 0x52b5
536 #define PHY_VITESSE_INIT2 0xaf8a
537 #define PHY_VITESSE_INIT3 0x8
538 #define PHY_VITESSE_INIT4 0x8f8a
539 #define PHY_VITESSE_INIT5 0xaf86
540 #define PHY_VITESSE_INIT6 0x8f86
541 #define PHY_VITESSE_INIT7 0xaf82
542 #define PHY_VITESSE_INIT8 0x0100
543 #define PHY_VITESSE_INIT9 0x8f82
544 #define PHY_VITESSE_INIT10 0x0
545 #define PHY_REALTEK_INIT_REG1 0x1f
546 #define PHY_REALTEK_INIT_REG2 0x19
547 #define PHY_REALTEK_INIT_REG3 0x13
548 #define PHY_REALTEK_INIT_REG4 0x14
549 #define PHY_REALTEK_INIT_REG5 0x18
550 #define PHY_REALTEK_INIT_REG6 0x11
551 #define PHY_REALTEK_INIT_REG7 0x01
552 #define PHY_REALTEK_INIT1 0x0000
553 #define PHY_REALTEK_INIT2 0x8e00
554 #define PHY_REALTEK_INIT3 0x0001
555 #define PHY_REALTEK_INIT4 0xad17
556 #define PHY_REALTEK_INIT5 0xfb54
557 #define PHY_REALTEK_INIT6 0xf5c7
558 #define PHY_REALTEK_INIT7 0x1000
559 #define PHY_REALTEK_INIT8 0x0003
560 #define PHY_REALTEK_INIT9 0x0008
561 #define PHY_REALTEK_INIT10 0x0005
562 #define PHY_REALTEK_INIT11 0x0200
563 #define PHY_REALTEK_INIT_MSK1 0x0003
564
565 #define PHY_GIGABIT 0x0100
566
567 #define PHY_TIMEOUT 0x1
568 #define PHY_ERROR 0x2
569
570 #define PHY_100 0x1
571 #define PHY_1000 0x2
572 #define PHY_HALF 0x100
573
574 #define NV_PAUSEFRAME_RX_CAPABLE 0x0001
575 #define NV_PAUSEFRAME_TX_CAPABLE 0x0002
576 #define NV_PAUSEFRAME_RX_ENABLE 0x0004
577 #define NV_PAUSEFRAME_TX_ENABLE 0x0008
578 #define NV_PAUSEFRAME_RX_REQ 0x0010
579 #define NV_PAUSEFRAME_TX_REQ 0x0020
580 #define NV_PAUSEFRAME_AUTONEG 0x0040
581
582 /* MSI/MSI-X defines */
583 #define NV_MSI_X_MAX_VECTORS 8
584 #define NV_MSI_X_VECTORS_MASK 0x000f
585 #define NV_MSI_CAPABLE 0x0010
586 #define NV_MSI_X_CAPABLE 0x0020
587 #define NV_MSI_ENABLED 0x0040
588 #define NV_MSI_X_ENABLED 0x0080
589
590 #define NV_MSI_X_VECTOR_ALL 0x0
591 #define NV_MSI_X_VECTOR_RX 0x0
592 #define NV_MSI_X_VECTOR_TX 0x1
593 #define NV_MSI_X_VECTOR_OTHER 0x2
594
595 #define NV_MSI_PRIV_OFFSET 0x68
596 #define NV_MSI_PRIV_VALUE 0xffffffff
597
598 #define NV_RESTART_TX 0x1
599 #define NV_RESTART_RX 0x2
600
601 #define NV_TX_LIMIT_COUNT 16
602
603 #define NV_DYNAMIC_THRESHOLD 4
604 #define NV_DYNAMIC_MAX_QUIET_COUNT 2048
605
606 /* statistics */
607 struct nv_ethtool_str {
608 char name[ETH_GSTRING_LEN];
609 };
610
611 static const struct nv_ethtool_str nv_estats_str[] = {
612 { "tx_bytes" },
613 { "tx_zero_rexmt" },
614 { "tx_one_rexmt" },
615 { "tx_many_rexmt" },
616 { "tx_late_collision" },
617 { "tx_fifo_errors" },
618 { "tx_carrier_errors" },
619 { "tx_excess_deferral" },
620 { "tx_retry_error" },
621 { "rx_frame_error" },
622 { "rx_extra_byte" },
623 { "rx_late_collision" },
624 { "rx_runt" },
625 { "rx_frame_too_long" },
626 { "rx_over_errors" },
627 { "rx_crc_errors" },
628 { "rx_frame_align_error" },
629 { "rx_length_error" },
630 { "rx_unicast" },
631 { "rx_multicast" },
632 { "rx_broadcast" },
633 { "rx_packets" },
634 { "rx_errors_total" },
635 { "tx_errors_total" },
636
637 /* version 2 stats */
638 { "tx_deferral" },
639 { "tx_packets" },
640 { "rx_bytes" },
641 { "tx_pause" },
642 { "rx_pause" },
643 { "rx_drop_frame" },
644
645 /* version 3 stats */
646 { "tx_unicast" },
647 { "tx_multicast" },
648 { "tx_broadcast" }
649 };
650
651 struct nv_ethtool_stats {
652 u64 tx_bytes;
653 u64 tx_zero_rexmt;
654 u64 tx_one_rexmt;
655 u64 tx_many_rexmt;
656 u64 tx_late_collision;
657 u64 tx_fifo_errors;
658 u64 tx_carrier_errors;
659 u64 tx_excess_deferral;
660 u64 tx_retry_error;
661 u64 rx_frame_error;
662 u64 rx_extra_byte;
663 u64 rx_late_collision;
664 u64 rx_runt;
665 u64 rx_frame_too_long;
666 u64 rx_over_errors;
667 u64 rx_crc_errors;
668 u64 rx_frame_align_error;
669 u64 rx_length_error;
670 u64 rx_unicast;
671 u64 rx_multicast;
672 u64 rx_broadcast;
673 u64 rx_packets;
674 u64 rx_errors_total;
675 u64 tx_errors_total;
676
677 /* version 2 stats */
678 u64 tx_deferral;
679 u64 tx_packets;
680 u64 rx_bytes;
681 u64 tx_pause;
682 u64 rx_pause;
683 u64 rx_drop_frame;
684
685 /* version 3 stats */
686 u64 tx_unicast;
687 u64 tx_multicast;
688 u64 tx_broadcast;
689 };
690
691 #define NV_DEV_STATISTICS_V3_COUNT (sizeof(struct nv_ethtool_stats)/sizeof(u64))
692 #define NV_DEV_STATISTICS_V2_COUNT (NV_DEV_STATISTICS_V3_COUNT - 3)
693 #define NV_DEV_STATISTICS_V1_COUNT (NV_DEV_STATISTICS_V2_COUNT - 6)
694
695 /* diagnostics */
696 #define NV_TEST_COUNT_BASE 3
697 #define NV_TEST_COUNT_EXTENDED 4
698
699 static const struct nv_ethtool_str nv_etests_str[] = {
700 { "link (online/offline)" },
701 { "register (offline) " },
702 { "interrupt (offline) " },
703 { "loopback (offline) " }
704 };
705
706 struct register_test {
707 __u32 reg;
708 __u32 mask;
709 };
710
711 static const struct register_test nv_registers_test[] = {
712 { NvRegUnknownSetupReg6, 0x01 },
713 { NvRegMisc1, 0x03c },
714 { NvRegOffloadConfig, 0x03ff },
715 { NvRegMulticastAddrA, 0xffffffff },
716 { NvRegTxWatermark, 0x0ff },
717 { NvRegWakeUpFlags, 0x07777 },
718 { 0, 0 }
719 };
720
721 struct nv_skb_map {
722 struct sk_buff *skb;
723 dma_addr_t dma;
724 unsigned int dma_len:31;
725 unsigned int dma_single:1;
726 struct ring_desc_ex *first_tx_desc;
727 struct nv_skb_map *next_tx_ctx;
728 };
729
730 /*
731 * SMP locking:
732 * All hardware access under netdev_priv(dev)->lock, except the performance
733 * critical parts:
734 * - rx is (pseudo-) lockless: it relies on the single-threading provided
735 * by the arch code for interrupts.
736 * - tx setup is lockless: it relies on netif_tx_lock. Actual submission
737 * needs netdev_priv(dev)->lock :-(
738 * - set_multicast_list: preparation lockless, relies on netif_tx_lock.
739 */
740
741 /* in dev: base, irq */
742 struct fe_priv {
743 spinlock_t lock;
744
745 struct net_device *dev;
746 struct napi_struct napi;
747
748 /* General data:
749 * Locking: spin_lock(&np->lock); */
750 struct nv_ethtool_stats estats;
751 int in_shutdown;
752 u32 linkspeed;
753 int duplex;
754 int autoneg;
755 int fixed_mode;
756 int phyaddr;
757 int wolenabled;
758 unsigned int phy_oui;
759 unsigned int phy_model;
760 unsigned int phy_rev;
761 u16 gigabit;
762 int intr_test;
763 int recover_error;
764 int quiet_count;
765
766 /* General data: RO fields */
767 dma_addr_t ring_addr;
768 struct pci_dev *pci_dev;
769 u32 orig_mac[2];
770 u32 events;
771 u32 irqmask;
772 u32 desc_ver;
773 u32 txrxctl_bits;
774 u32 vlanctl_bits;
775 u32 driver_data;
776 u32 device_id;
777 u32 register_size;
778 u32 mac_in_use;
779 int mgmt_version;
780 int mgmt_sema;
781
782 void __iomem *base;
783
784 /* rx specific fields.
785 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
786 */
787 union ring_type get_rx, put_rx, first_rx, last_rx;
788 struct nv_skb_map *get_rx_ctx, *put_rx_ctx;
789 struct nv_skb_map *first_rx_ctx, *last_rx_ctx;
790 struct nv_skb_map *rx_skb;
791
792 union ring_type rx_ring;
793 unsigned int rx_buf_sz;
794 unsigned int pkt_limit;
795 struct timer_list oom_kick;
796 struct timer_list nic_poll;
797 struct timer_list stats_poll;
798 u32 nic_poll_irq;
799 int rx_ring_size;
800
801 /* media detection workaround.
802 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
803 */
804 int need_linktimer;
805 unsigned long link_timeout;
806 /*
807 * tx specific fields.
808 */
809 union ring_type get_tx, put_tx, first_tx, last_tx;
810 struct nv_skb_map *get_tx_ctx, *put_tx_ctx;
811 struct nv_skb_map *first_tx_ctx, *last_tx_ctx;
812 struct nv_skb_map *tx_skb;
813
814 union ring_type tx_ring;
815 u32 tx_flags;
816 int tx_ring_size;
817 int tx_limit;
818 u32 tx_pkts_in_progress;
819 struct nv_skb_map *tx_change_owner;
820 struct nv_skb_map *tx_end_flip;
821 int tx_stop;
822
823 /* msi/msi-x fields */
824 u32 msi_flags;
825 struct msix_entry msi_x_entry[NV_MSI_X_MAX_VECTORS];
826
827 /* flow control */
828 u32 pause_flags;
829
830 /* power saved state */
831 u32 saved_config_space[NV_PCI_REGSZ_MAX/4];
832
833 /* for different msi-x irq type */
834 char name_rx[IFNAMSIZ + 3]; /* -rx */
835 char name_tx[IFNAMSIZ + 3]; /* -tx */
836 char name_other[IFNAMSIZ + 6]; /* -other */
837 };
838
839 /*
840 * Maximum number of loops until we assume that a bit in the irq mask
841 * is stuck. Overridable with module param.
842 */
843 static int max_interrupt_work = 4;
844
845 /*
846 * Optimization can be either throuput mode or cpu mode
847 *
848 * Throughput Mode: Every tx and rx packet will generate an interrupt.
849 * CPU Mode: Interrupts are controlled by a timer.
850 */
851 enum {
852 NV_OPTIMIZATION_MODE_THROUGHPUT,
853 NV_OPTIMIZATION_MODE_CPU,
854 NV_OPTIMIZATION_MODE_DYNAMIC
855 };
856 static int optimization_mode = NV_OPTIMIZATION_MODE_DYNAMIC;
857
858 /*
859 * Poll interval for timer irq
860 *
861 * This interval determines how frequent an interrupt is generated.
862 * The is value is determined by [(time_in_micro_secs * 100) / (2^10)]
863 * Min = 0, and Max = 65535
864 */
865 static int poll_interval = -1;
866
867 /*
868 * MSI interrupts
869 */
870 enum {
871 NV_MSI_INT_DISABLED,
872 NV_MSI_INT_ENABLED
873 };
874 static int msi = NV_MSI_INT_ENABLED;
875
876 /*
877 * MSIX interrupts
878 */
879 enum {
880 NV_MSIX_INT_DISABLED,
881 NV_MSIX_INT_ENABLED
882 };
883 static int msix = NV_MSIX_INT_ENABLED;
884
885 /*
886 * DMA 64bit
887 */
888 enum {
889 NV_DMA_64BIT_DISABLED,
890 NV_DMA_64BIT_ENABLED
891 };
892 static int dma_64bit = NV_DMA_64BIT_ENABLED;
893
894 /*
895 * Crossover Detection
896 * Realtek 8201 phy + some OEM boards do not work properly.
897 */
898 enum {
899 NV_CROSSOVER_DETECTION_DISABLED,
900 NV_CROSSOVER_DETECTION_ENABLED
901 };
902 static int phy_cross = NV_CROSSOVER_DETECTION_DISABLED;
903
904 /*
905 * Power down phy when interface is down (persists through reboot;
906 * older Linux and other OSes may not power it up again)
907 */
908 static int phy_power_down;
909
910 static inline struct fe_priv *get_nvpriv(struct net_device *dev)
911 {
912 return netdev_priv(dev);
913 }
914
915 static inline u8 __iomem *get_hwbase(struct net_device *dev)
916 {
917 return ((struct fe_priv *)netdev_priv(dev))->base;
918 }
919
920 static inline void pci_push(u8 __iomem *base)
921 {
922 /* force out pending posted writes */
923 readl(base);
924 }
925
926 static inline u32 nv_descr_getlength(struct ring_desc *prd, u32 v)
927 {
928 return le32_to_cpu(prd->flaglen)
929 & ((v == DESC_VER_1) ? LEN_MASK_V1 : LEN_MASK_V2);
930 }
931
932 static inline u32 nv_descr_getlength_ex(struct ring_desc_ex *prd, u32 v)
933 {
934 return le32_to_cpu(prd->flaglen) & LEN_MASK_V2;
935 }
936
937 static bool nv_optimized(struct fe_priv *np)
938 {
939 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
940 return false;
941 return true;
942 }
943
944 static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target,
945 int delay, int delaymax)
946 {
947 u8 __iomem *base = get_hwbase(dev);
948
949 pci_push(base);
950 do {
951 udelay(delay);
952 delaymax -= delay;
953 if (delaymax < 0)
954 return 1;
955 } while ((readl(base + offset) & mask) != target);
956 return 0;
957 }
958
959 #define NV_SETUP_RX_RING 0x01
960 #define NV_SETUP_TX_RING 0x02
961
962 static inline u32 dma_low(dma_addr_t addr)
963 {
964 return addr;
965 }
966
967 static inline u32 dma_high(dma_addr_t addr)
968 {
969 return addr>>31>>1; /* 0 if 32bit, shift down by 32 if 64bit */
970 }
971
972 static void setup_hw_rings(struct net_device *dev, int rxtx_flags)
973 {
974 struct fe_priv *np = get_nvpriv(dev);
975 u8 __iomem *base = get_hwbase(dev);
976
977 if (!nv_optimized(np)) {
978 if (rxtx_flags & NV_SETUP_RX_RING)
979 writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr);
980 if (rxtx_flags & NV_SETUP_TX_RING)
981 writel(dma_low(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr);
982 } else {
983 if (rxtx_flags & NV_SETUP_RX_RING) {
984 writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr);
985 writel(dma_high(np->ring_addr), base + NvRegRxRingPhysAddrHigh);
986 }
987 if (rxtx_flags & NV_SETUP_TX_RING) {
988 writel(dma_low(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr);
989 writel(dma_high(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddrHigh);
990 }
991 }
992 }
993
994 static void free_rings(struct net_device *dev)
995 {
996 struct fe_priv *np = get_nvpriv(dev);
997
998 if (!nv_optimized(np)) {
999 if (np->rx_ring.orig)
1000 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size),
1001 np->rx_ring.orig, np->ring_addr);
1002 } else {
1003 if (np->rx_ring.ex)
1004 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size),
1005 np->rx_ring.ex, np->ring_addr);
1006 }
1007 kfree(np->rx_skb);
1008 kfree(np->tx_skb);
1009 }
1010
1011 static int using_multi_irqs(struct net_device *dev)
1012 {
1013 struct fe_priv *np = get_nvpriv(dev);
1014
1015 if (!(np->msi_flags & NV_MSI_X_ENABLED) ||
1016 ((np->msi_flags & NV_MSI_X_ENABLED) &&
1017 ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1)))
1018 return 0;
1019 else
1020 return 1;
1021 }
1022
1023 static void nv_txrx_gate(struct net_device *dev, bool gate)
1024 {
1025 struct fe_priv *np = get_nvpriv(dev);
1026 u8 __iomem *base = get_hwbase(dev);
1027 u32 powerstate;
1028
1029 if (!np->mac_in_use &&
1030 (np->driver_data & DEV_HAS_POWER_CNTRL)) {
1031 powerstate = readl(base + NvRegPowerState2);
1032 if (gate)
1033 powerstate |= NVREG_POWERSTATE2_GATE_CLOCKS;
1034 else
1035 powerstate &= ~NVREG_POWERSTATE2_GATE_CLOCKS;
1036 writel(powerstate, base + NvRegPowerState2);
1037 }
1038 }
1039
1040 static void nv_enable_irq(struct net_device *dev)
1041 {
1042 struct fe_priv *np = get_nvpriv(dev);
1043
1044 if (!using_multi_irqs(dev)) {
1045 if (np->msi_flags & NV_MSI_X_ENABLED)
1046 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
1047 else
1048 enable_irq(np->pci_dev->irq);
1049 } else {
1050 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
1051 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
1052 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
1053 }
1054 }
1055
1056 static void nv_disable_irq(struct net_device *dev)
1057 {
1058 struct fe_priv *np = get_nvpriv(dev);
1059
1060 if (!using_multi_irqs(dev)) {
1061 if (np->msi_flags & NV_MSI_X_ENABLED)
1062 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
1063 else
1064 disable_irq(np->pci_dev->irq);
1065 } else {
1066 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
1067 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
1068 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
1069 }
1070 }
1071
1072 /* In MSIX mode, a write to irqmask behaves as XOR */
1073 static void nv_enable_hw_interrupts(struct net_device *dev, u32 mask)
1074 {
1075 u8 __iomem *base = get_hwbase(dev);
1076
1077 writel(mask, base + NvRegIrqMask);
1078 }
1079
1080 static void nv_disable_hw_interrupts(struct net_device *dev, u32 mask)
1081 {
1082 struct fe_priv *np = get_nvpriv(dev);
1083 u8 __iomem *base = get_hwbase(dev);
1084
1085 if (np->msi_flags & NV_MSI_X_ENABLED) {
1086 writel(mask, base + NvRegIrqMask);
1087 } else {
1088 if (np->msi_flags & NV_MSI_ENABLED)
1089 writel(0, base + NvRegMSIIrqMask);
1090 writel(0, base + NvRegIrqMask);
1091 }
1092 }
1093
1094 static void nv_napi_enable(struct net_device *dev)
1095 {
1096 struct fe_priv *np = get_nvpriv(dev);
1097
1098 napi_enable(&np->napi);
1099 }
1100
1101 static void nv_napi_disable(struct net_device *dev)
1102 {
1103 struct fe_priv *np = get_nvpriv(dev);
1104
1105 napi_disable(&np->napi);
1106 }
1107
1108 #define MII_READ (-1)
1109 /* mii_rw: read/write a register on the PHY.
1110 *
1111 * Caller must guarantee serialization
1112 */
1113 static int mii_rw(struct net_device *dev, int addr, int miireg, int value)
1114 {
1115 u8 __iomem *base = get_hwbase(dev);
1116 u32 reg;
1117 int retval;
1118
1119 writel(NVREG_MIISTAT_MASK_RW, base + NvRegMIIStatus);
1120
1121 reg = readl(base + NvRegMIIControl);
1122 if (reg & NVREG_MIICTL_INUSE) {
1123 writel(NVREG_MIICTL_INUSE, base + NvRegMIIControl);
1124 udelay(NV_MIIBUSY_DELAY);
1125 }
1126
1127 reg = (addr << NVREG_MIICTL_ADDRSHIFT) | miireg;
1128 if (value != MII_READ) {
1129 writel(value, base + NvRegMIIData);
1130 reg |= NVREG_MIICTL_WRITE;
1131 }
1132 writel(reg, base + NvRegMIIControl);
1133
1134 if (reg_delay(dev, NvRegMIIControl, NVREG_MIICTL_INUSE, 0,
1135 NV_MIIPHY_DELAY, NV_MIIPHY_DELAYMAX)) {
1136 retval = -1;
1137 } else if (value != MII_READ) {
1138 /* it was a write operation - fewer failures are detectable */
1139 retval = 0;
1140 } else if (readl(base + NvRegMIIStatus) & NVREG_MIISTAT_ERROR) {
1141 retval = -1;
1142 } else {
1143 retval = readl(base + NvRegMIIData);
1144 }
1145
1146 return retval;
1147 }
1148
1149 static int phy_reset(struct net_device *dev, u32 bmcr_setup)
1150 {
1151 struct fe_priv *np = netdev_priv(dev);
1152 u32 miicontrol;
1153 unsigned int tries = 0;
1154
1155 miicontrol = BMCR_RESET | bmcr_setup;
1156 if (mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol))
1157 return -1;
1158
1159 /* wait for 500ms */
1160 msleep(500);
1161
1162 /* must wait till reset is deasserted */
1163 while (miicontrol & BMCR_RESET) {
1164 usleep_range(10000, 20000);
1165 miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
1166 /* FIXME: 100 tries seem excessive */
1167 if (tries++ > 100)
1168 return -1;
1169 }
1170 return 0;
1171 }
1172
1173 static int init_realtek_8211b(struct net_device *dev, struct fe_priv *np)
1174 {
1175 static const struct {
1176 int reg;
1177 int init;
1178 } ri[] = {
1179 { PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1 },
1180 { PHY_REALTEK_INIT_REG2, PHY_REALTEK_INIT2 },
1181 { PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3 },
1182 { PHY_REALTEK_INIT_REG3, PHY_REALTEK_INIT4 },
1183 { PHY_REALTEK_INIT_REG4, PHY_REALTEK_INIT5 },
1184 { PHY_REALTEK_INIT_REG5, PHY_REALTEK_INIT6 },
1185 { PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1 },
1186 };
1187 int i;
1188
1189 for (i = 0; i < ARRAY_SIZE(ri); i++) {
1190 if (mii_rw(dev, np->phyaddr, ri[i].reg, ri[i].init))
1191 return PHY_ERROR;
1192 }
1193
1194 return 0;
1195 }
1196
1197 static int init_realtek_8211c(struct net_device *dev, struct fe_priv *np)
1198 {
1199 u32 reg;
1200 u8 __iomem *base = get_hwbase(dev);
1201 u32 powerstate = readl(base + NvRegPowerState2);
1202
1203 /* need to perform hw phy reset */
1204 powerstate |= NVREG_POWERSTATE2_PHY_RESET;
1205 writel(powerstate, base + NvRegPowerState2);
1206 msleep(25);
1207
1208 powerstate &= ~NVREG_POWERSTATE2_PHY_RESET;
1209 writel(powerstate, base + NvRegPowerState2);
1210 msleep(25);
1211
1212 reg = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, MII_READ);
1213 reg |= PHY_REALTEK_INIT9;
1214 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, reg))
1215 return PHY_ERROR;
1216 if (mii_rw(dev, np->phyaddr,
1217 PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT10))
1218 return PHY_ERROR;
1219 reg = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG7, MII_READ);
1220 if (!(reg & PHY_REALTEK_INIT11)) {
1221 reg |= PHY_REALTEK_INIT11;
1222 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG7, reg))
1223 return PHY_ERROR;
1224 }
1225 if (mii_rw(dev, np->phyaddr,
1226 PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1))
1227 return PHY_ERROR;
1228
1229 return 0;
1230 }
1231
1232 static int init_realtek_8201(struct net_device *dev, struct fe_priv *np)
1233 {
1234 u32 phy_reserved;
1235
1236 if (np->driver_data & DEV_NEED_PHY_INIT_FIX) {
1237 phy_reserved = mii_rw(dev, np->phyaddr,
1238 PHY_REALTEK_INIT_REG6, MII_READ);
1239 phy_reserved |= PHY_REALTEK_INIT7;
1240 if (mii_rw(dev, np->phyaddr,
1241 PHY_REALTEK_INIT_REG6, phy_reserved))
1242 return PHY_ERROR;
1243 }
1244
1245 return 0;
1246 }
1247
1248 static int init_realtek_8201_cross(struct net_device *dev, struct fe_priv *np)
1249 {
1250 u32 phy_reserved;
1251
1252 if (phy_cross == NV_CROSSOVER_DETECTION_DISABLED) {
1253 if (mii_rw(dev, np->phyaddr,
1254 PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3))
1255 return PHY_ERROR;
1256 phy_reserved = mii_rw(dev, np->phyaddr,
1257 PHY_REALTEK_INIT_REG2, MII_READ);
1258 phy_reserved &= ~PHY_REALTEK_INIT_MSK1;
1259 phy_reserved |= PHY_REALTEK_INIT3;
1260 if (mii_rw(dev, np->phyaddr,
1261 PHY_REALTEK_INIT_REG2, phy_reserved))
1262 return PHY_ERROR;
1263 if (mii_rw(dev, np->phyaddr,
1264 PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1))
1265 return PHY_ERROR;
1266 }
1267
1268 return 0;
1269 }
1270
1271 static int init_cicada(struct net_device *dev, struct fe_priv *np,
1272 u32 phyinterface)
1273 {
1274 u32 phy_reserved;
1275
1276 if (phyinterface & PHY_RGMII) {
1277 phy_reserved = mii_rw(dev, np->phyaddr, MII_RESV1, MII_READ);
1278 phy_reserved &= ~(PHY_CICADA_INIT1 | PHY_CICADA_INIT2);
1279 phy_reserved |= (PHY_CICADA_INIT3 | PHY_CICADA_INIT4);
1280 if (mii_rw(dev, np->phyaddr, MII_RESV1, phy_reserved))
1281 return PHY_ERROR;
1282 phy_reserved = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ);
1283 phy_reserved |= PHY_CICADA_INIT5;
1284 if (mii_rw(dev, np->phyaddr, MII_NCONFIG, phy_reserved))
1285 return PHY_ERROR;
1286 }
1287 phy_reserved = mii_rw(dev, np->phyaddr, MII_SREVISION, MII_READ);
1288 phy_reserved |= PHY_CICADA_INIT6;
1289 if (mii_rw(dev, np->phyaddr, MII_SREVISION, phy_reserved))
1290 return PHY_ERROR;
1291
1292 return 0;
1293 }
1294
1295 static int init_vitesse(struct net_device *dev, struct fe_priv *np)
1296 {
1297 u32 phy_reserved;
1298
1299 if (mii_rw(dev, np->phyaddr,
1300 PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT1))
1301 return PHY_ERROR;
1302 if (mii_rw(dev, np->phyaddr,
1303 PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT2))
1304 return PHY_ERROR;
1305 phy_reserved = mii_rw(dev, np->phyaddr,
1306 PHY_VITESSE_INIT_REG4, MII_READ);
1307 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved))
1308 return PHY_ERROR;
1309 phy_reserved = mii_rw(dev, np->phyaddr,
1310 PHY_VITESSE_INIT_REG3, MII_READ);
1311 phy_reserved &= ~PHY_VITESSE_INIT_MSK1;
1312 phy_reserved |= PHY_VITESSE_INIT3;
1313 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved))
1314 return PHY_ERROR;
1315 if (mii_rw(dev, np->phyaddr,
1316 PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT4))
1317 return PHY_ERROR;
1318 if (mii_rw(dev, np->phyaddr,
1319 PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT5))
1320 return PHY_ERROR;
1321 phy_reserved = mii_rw(dev, np->phyaddr,
1322 PHY_VITESSE_INIT_REG4, MII_READ);
1323 phy_reserved &= ~PHY_VITESSE_INIT_MSK1;
1324 phy_reserved |= PHY_VITESSE_INIT3;
1325 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved))
1326 return PHY_ERROR;
1327 phy_reserved = mii_rw(dev, np->phyaddr,
1328 PHY_VITESSE_INIT_REG3, MII_READ);
1329 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved))
1330 return PHY_ERROR;
1331 if (mii_rw(dev, np->phyaddr,
1332 PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT6))
1333 return PHY_ERROR;
1334 if (mii_rw(dev, np->phyaddr,
1335 PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT7))
1336 return PHY_ERROR;
1337 phy_reserved = mii_rw(dev, np->phyaddr,
1338 PHY_VITESSE_INIT_REG4, MII_READ);
1339 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved))
1340 return PHY_ERROR;
1341 phy_reserved = mii_rw(dev, np->phyaddr,
1342 PHY_VITESSE_INIT_REG3, MII_READ);
1343 phy_reserved &= ~PHY_VITESSE_INIT_MSK2;
1344 phy_reserved |= PHY_VITESSE_INIT8;
1345 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved))
1346 return PHY_ERROR;
1347 if (mii_rw(dev, np->phyaddr,
1348 PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT9))
1349 return PHY_ERROR;
1350 if (mii_rw(dev, np->phyaddr,
1351 PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT10))
1352 return PHY_ERROR;
1353
1354 return 0;
1355 }
1356
1357 static int phy_init(struct net_device *dev)
1358 {
1359 struct fe_priv *np = get_nvpriv(dev);
1360 u8 __iomem *base = get_hwbase(dev);
1361 u32 phyinterface;
1362 u32 mii_status, mii_control, mii_control_1000, reg;
1363
1364 /* phy errata for E3016 phy */
1365 if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
1366 reg = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ);
1367 reg &= ~PHY_MARVELL_E3016_INITMASK;
1368 if (mii_rw(dev, np->phyaddr, MII_NCONFIG, reg)) {
1369 netdev_info(dev, "%s: phy write to errata reg failed\n",
1370 pci_name(np->pci_dev));
1371 return PHY_ERROR;
1372 }
1373 }
1374 if (np->phy_oui == PHY_OUI_REALTEK) {
1375 if (np->phy_model == PHY_MODEL_REALTEK_8211 &&
1376 np->phy_rev == PHY_REV_REALTEK_8211B) {
1377 if (init_realtek_8211b(dev, np)) {
1378 netdev_info(dev, "%s: phy init failed\n",
1379 pci_name(np->pci_dev));
1380 return PHY_ERROR;
1381 }
1382 } else if (np->phy_model == PHY_MODEL_REALTEK_8211 &&
1383 np->phy_rev == PHY_REV_REALTEK_8211C) {
1384 if (init_realtek_8211c(dev, np)) {
1385 netdev_info(dev, "%s: phy init failed\n",
1386 pci_name(np->pci_dev));
1387 return PHY_ERROR;
1388 }
1389 } else if (np->phy_model == PHY_MODEL_REALTEK_8201) {
1390 if (init_realtek_8201(dev, np)) {
1391 netdev_info(dev, "%s: phy init failed\n",
1392 pci_name(np->pci_dev));
1393 return PHY_ERROR;
1394 }
1395 }
1396 }
1397
1398 /* set advertise register */
1399 reg = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
1400 reg |= (ADVERTISE_10HALF | ADVERTISE_10FULL |
1401 ADVERTISE_100HALF | ADVERTISE_100FULL |
1402 ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP);
1403 if (mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg)) {
1404 netdev_info(dev, "%s: phy write to advertise failed\n",
1405 pci_name(np->pci_dev));
1406 return PHY_ERROR;
1407 }
1408
1409 /* get phy interface type */
1410 phyinterface = readl(base + NvRegPhyInterface);
1411
1412 /* see if gigabit phy */
1413 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
1414 if (mii_status & PHY_GIGABIT) {
1415 np->gigabit = PHY_GIGABIT;
1416 mii_control_1000 = mii_rw(dev, np->phyaddr,
1417 MII_CTRL1000, MII_READ);
1418 mii_control_1000 &= ~ADVERTISE_1000HALF;
1419 if (phyinterface & PHY_RGMII)
1420 mii_control_1000 |= ADVERTISE_1000FULL;
1421 else
1422 mii_control_1000 &= ~ADVERTISE_1000FULL;
1423
1424 if (mii_rw(dev, np->phyaddr, MII_CTRL1000, mii_control_1000)) {
1425 netdev_info(dev, "%s: phy init failed\n",
1426 pci_name(np->pci_dev));
1427 return PHY_ERROR;
1428 }
1429 } else
1430 np->gigabit = 0;
1431
1432 mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
1433 mii_control |= BMCR_ANENABLE;
1434
1435 if (np->phy_oui == PHY_OUI_REALTEK &&
1436 np->phy_model == PHY_MODEL_REALTEK_8211 &&
1437 np->phy_rev == PHY_REV_REALTEK_8211C) {
1438 /* start autoneg since we already performed hw reset above */
1439 mii_control |= BMCR_ANRESTART;
1440 if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) {
1441 netdev_info(dev, "%s: phy init failed\n",
1442 pci_name(np->pci_dev));
1443 return PHY_ERROR;
1444 }
1445 } else {
1446 /* reset the phy
1447 * (certain phys need bmcr to be setup with reset)
1448 */
1449 if (phy_reset(dev, mii_control)) {
1450 netdev_info(dev, "%s: phy reset failed\n",
1451 pci_name(np->pci_dev));
1452 return PHY_ERROR;
1453 }
1454 }
1455
1456 /* phy vendor specific configuration */
1457 if ((np->phy_oui == PHY_OUI_CICADA)) {
1458 if (init_cicada(dev, np, phyinterface)) {
1459 netdev_info(dev, "%s: phy init failed\n",
1460 pci_name(np->pci_dev));
1461 return PHY_ERROR;
1462 }
1463 } else if (np->phy_oui == PHY_OUI_VITESSE) {
1464 if (init_vitesse(dev, np)) {
1465 netdev_info(dev, "%s: phy init failed\n",
1466 pci_name(np->pci_dev));
1467 return PHY_ERROR;
1468 }
1469 } else if (np->phy_oui == PHY_OUI_REALTEK) {
1470 if (np->phy_model == PHY_MODEL_REALTEK_8211 &&
1471 np->phy_rev == PHY_REV_REALTEK_8211B) {
1472 /* reset could have cleared these out, set them back */
1473 if (init_realtek_8211b(dev, np)) {
1474 netdev_info(dev, "%s: phy init failed\n",
1475 pci_name(np->pci_dev));
1476 return PHY_ERROR;
1477 }
1478 } else if (np->phy_model == PHY_MODEL_REALTEK_8201) {
1479 if (init_realtek_8201(dev, np) ||
1480 init_realtek_8201_cross(dev, np)) {
1481 netdev_info(dev, "%s: phy init failed\n",
1482 pci_name(np->pci_dev));
1483 return PHY_ERROR;
1484 }
1485 }
1486 }
1487
1488 /* some phys clear out pause advertisement on reset, set it back */
1489 mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg);
1490
1491 /* restart auto negotiation, power down phy */
1492 mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
1493 mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE);
1494 if (phy_power_down)
1495 mii_control |= BMCR_PDOWN;
1496 if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control))
1497 return PHY_ERROR;
1498
1499 return 0;
1500 }
1501
1502 static void nv_start_rx(struct net_device *dev)
1503 {
1504 struct fe_priv *np = netdev_priv(dev);
1505 u8 __iomem *base = get_hwbase(dev);
1506 u32 rx_ctrl = readl(base + NvRegReceiverControl);
1507
1508 /* Already running? Stop it. */
1509 if ((readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) && !np->mac_in_use) {
1510 rx_ctrl &= ~NVREG_RCVCTL_START;
1511 writel(rx_ctrl, base + NvRegReceiverControl);
1512 pci_push(base);
1513 }
1514 writel(np->linkspeed, base + NvRegLinkSpeed);
1515 pci_push(base);
1516 rx_ctrl |= NVREG_RCVCTL_START;
1517 if (np->mac_in_use)
1518 rx_ctrl &= ~NVREG_RCVCTL_RX_PATH_EN;
1519 writel(rx_ctrl, base + NvRegReceiverControl);
1520 pci_push(base);
1521 }
1522
1523 static void nv_stop_rx(struct net_device *dev)
1524 {
1525 struct fe_priv *np = netdev_priv(dev);
1526 u8 __iomem *base = get_hwbase(dev);
1527 u32 rx_ctrl = readl(base + NvRegReceiverControl);
1528
1529 if (!np->mac_in_use)
1530 rx_ctrl &= ~NVREG_RCVCTL_START;
1531 else
1532 rx_ctrl |= NVREG_RCVCTL_RX_PATH_EN;
1533 writel(rx_ctrl, base + NvRegReceiverControl);
1534 if (reg_delay(dev, NvRegReceiverStatus, NVREG_RCVSTAT_BUSY, 0,
1535 NV_RXSTOP_DELAY1, NV_RXSTOP_DELAY1MAX))
1536 netdev_info(dev, "%s: ReceiverStatus remained busy\n",
1537 __func__);
1538
1539 udelay(NV_RXSTOP_DELAY2);
1540 if (!np->mac_in_use)
1541 writel(0, base + NvRegLinkSpeed);
1542 }
1543
1544 static void nv_start_tx(struct net_device *dev)
1545 {
1546 struct fe_priv *np = netdev_priv(dev);
1547 u8 __iomem *base = get_hwbase(dev);
1548 u32 tx_ctrl = readl(base + NvRegTransmitterControl);
1549
1550 tx_ctrl |= NVREG_XMITCTL_START;
1551 if (np->mac_in_use)
1552 tx_ctrl &= ~NVREG_XMITCTL_TX_PATH_EN;
1553 writel(tx_ctrl, base + NvRegTransmitterControl);
1554 pci_push(base);
1555 }
1556
1557 static void nv_stop_tx(struct net_device *dev)
1558 {
1559 struct fe_priv *np = netdev_priv(dev);
1560 u8 __iomem *base = get_hwbase(dev);
1561 u32 tx_ctrl = readl(base + NvRegTransmitterControl);
1562
1563 if (!np->mac_in_use)
1564 tx_ctrl &= ~NVREG_XMITCTL_START;
1565 else
1566 tx_ctrl |= NVREG_XMITCTL_TX_PATH_EN;
1567 writel(tx_ctrl, base + NvRegTransmitterControl);
1568 if (reg_delay(dev, NvRegTransmitterStatus, NVREG_XMITSTAT_BUSY, 0,
1569 NV_TXSTOP_DELAY1, NV_TXSTOP_DELAY1MAX))
1570 netdev_info(dev, "%s: TransmitterStatus remained busy\n",
1571 __func__);
1572
1573 udelay(NV_TXSTOP_DELAY2);
1574 if (!np->mac_in_use)
1575 writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV,
1576 base + NvRegTransmitPoll);
1577 }
1578
1579 static void nv_start_rxtx(struct net_device *dev)
1580 {
1581 nv_start_rx(dev);
1582 nv_start_tx(dev);
1583 }
1584
1585 static void nv_stop_rxtx(struct net_device *dev)
1586 {
1587 nv_stop_rx(dev);
1588 nv_stop_tx(dev);
1589 }
1590
1591 static void nv_txrx_reset(struct net_device *dev)
1592 {
1593 struct fe_priv *np = netdev_priv(dev);
1594 u8 __iomem *base = get_hwbase(dev);
1595
1596 writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl);
1597 pci_push(base);
1598 udelay(NV_TXRX_RESET_DELAY);
1599 writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl);
1600 pci_push(base);
1601 }
1602
1603 static void nv_mac_reset(struct net_device *dev)
1604 {
1605 struct fe_priv *np = netdev_priv(dev);
1606 u8 __iomem *base = get_hwbase(dev);
1607 u32 temp1, temp2, temp3;
1608
1609 writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl);
1610 pci_push(base);
1611
1612 /* save registers since they will be cleared on reset */
1613 temp1 = readl(base + NvRegMacAddrA);
1614 temp2 = readl(base + NvRegMacAddrB);
1615 temp3 = readl(base + NvRegTransmitPoll);
1616
1617 writel(NVREG_MAC_RESET_ASSERT, base + NvRegMacReset);
1618 pci_push(base);
1619 udelay(NV_MAC_RESET_DELAY);
1620 writel(0, base + NvRegMacReset);
1621 pci_push(base);
1622 udelay(NV_MAC_RESET_DELAY);
1623
1624 /* restore saved registers */
1625 writel(temp1, base + NvRegMacAddrA);
1626 writel(temp2, base + NvRegMacAddrB);
1627 writel(temp3, base + NvRegTransmitPoll);
1628
1629 writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl);
1630 pci_push(base);
1631 }
1632
1633 static void nv_get_hw_stats(struct net_device *dev)
1634 {
1635 struct fe_priv *np = netdev_priv(dev);
1636 u8 __iomem *base = get_hwbase(dev);
1637
1638 np->estats.tx_bytes += readl(base + NvRegTxCnt);
1639 np->estats.tx_zero_rexmt += readl(base + NvRegTxZeroReXmt);
1640 np->estats.tx_one_rexmt += readl(base + NvRegTxOneReXmt);
1641 np->estats.tx_many_rexmt += readl(base + NvRegTxManyReXmt);
1642 np->estats.tx_late_collision += readl(base + NvRegTxLateCol);
1643 np->estats.tx_fifo_errors += readl(base + NvRegTxUnderflow);
1644 np->estats.tx_carrier_errors += readl(base + NvRegTxLossCarrier);
1645 np->estats.tx_excess_deferral += readl(base + NvRegTxExcessDef);
1646 np->estats.tx_retry_error += readl(base + NvRegTxRetryErr);
1647 np->estats.rx_frame_error += readl(base + NvRegRxFrameErr);
1648 np->estats.rx_extra_byte += readl(base + NvRegRxExtraByte);
1649 np->estats.rx_late_collision += readl(base + NvRegRxLateCol);
1650 np->estats.rx_runt += readl(base + NvRegRxRunt);
1651 np->estats.rx_frame_too_long += readl(base + NvRegRxFrameTooLong);
1652 np->estats.rx_over_errors += readl(base + NvRegRxOverflow);
1653 np->estats.rx_crc_errors += readl(base + NvRegRxFCSErr);
1654 np->estats.rx_frame_align_error += readl(base + NvRegRxFrameAlignErr);
1655 np->estats.rx_length_error += readl(base + NvRegRxLenErr);
1656 np->estats.rx_unicast += readl(base + NvRegRxUnicast);
1657 np->estats.rx_multicast += readl(base + NvRegRxMulticast);
1658 np->estats.rx_broadcast += readl(base + NvRegRxBroadcast);
1659 np->estats.rx_packets =
1660 np->estats.rx_unicast +
1661 np->estats.rx_multicast +
1662 np->estats.rx_broadcast;
1663 np->estats.rx_errors_total =
1664 np->estats.rx_crc_errors +
1665 np->estats.rx_over_errors +
1666 np->estats.rx_frame_error +
1667 (np->estats.rx_frame_align_error - np->estats.rx_extra_byte) +
1668 np->estats.rx_late_collision +
1669 np->estats.rx_runt +
1670 np->estats.rx_frame_too_long;
1671 np->estats.tx_errors_total =
1672 np->estats.tx_late_collision +
1673 np->estats.tx_fifo_errors +
1674 np->estats.tx_carrier_errors +
1675 np->estats.tx_excess_deferral +
1676 np->estats.tx_retry_error;
1677
1678 if (np->driver_data & DEV_HAS_STATISTICS_V2) {
1679 np->estats.tx_deferral += readl(base + NvRegTxDef);
1680 np->estats.tx_packets += readl(base + NvRegTxFrame);
1681 np->estats.rx_bytes += readl(base + NvRegRxCnt);
1682 np->estats.tx_pause += readl(base + NvRegTxPause);
1683 np->estats.rx_pause += readl(base + NvRegRxPause);
1684 np->estats.rx_drop_frame += readl(base + NvRegRxDropFrame);
1685 np->estats.rx_errors_total += np->estats.rx_drop_frame;
1686 }
1687
1688 if (np->driver_data & DEV_HAS_STATISTICS_V3) {
1689 np->estats.tx_unicast += readl(base + NvRegTxUnicast);
1690 np->estats.tx_multicast += readl(base + NvRegTxMulticast);
1691 np->estats.tx_broadcast += readl(base + NvRegTxBroadcast);
1692 }
1693 }
1694
1695 /*
1696 * nv_get_stats: dev->get_stats function
1697 * Get latest stats value from the nic.
1698 * Called with read_lock(&dev_base_lock) held for read -
1699 * only synchronized against unregister_netdevice.
1700 */
1701 static struct net_device_stats *nv_get_stats(struct net_device *dev)
1702 {
1703 struct fe_priv *np = netdev_priv(dev);
1704
1705 /* If the nic supports hw counters then retrieve latest values */
1706 if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_STATISTICS_V3)) {
1707 nv_get_hw_stats(dev);
1708
1709 /* copy to net_device stats */
1710 dev->stats.tx_packets = np->estats.tx_packets;
1711 dev->stats.rx_bytes = np->estats.rx_bytes;
1712 dev->stats.tx_bytes = np->estats.tx_bytes;
1713 dev->stats.tx_fifo_errors = np->estats.tx_fifo_errors;
1714 dev->stats.tx_carrier_errors = np->estats.tx_carrier_errors;
1715 dev->stats.rx_crc_errors = np->estats.rx_crc_errors;
1716 dev->stats.rx_over_errors = np->estats.rx_over_errors;
1717 dev->stats.rx_fifo_errors = np->estats.rx_drop_frame;
1718 dev->stats.rx_errors = np->estats.rx_errors_total;
1719 dev->stats.tx_errors = np->estats.tx_errors_total;
1720 }
1721
1722 return &dev->stats;
1723 }
1724
1725 /*
1726 * nv_alloc_rx: fill rx ring entries.
1727 * Return 1 if the allocations for the skbs failed and the
1728 * rx engine is without Available descriptors
1729 */
1730 static int nv_alloc_rx(struct net_device *dev)
1731 {
1732 struct fe_priv *np = netdev_priv(dev);
1733 struct ring_desc *less_rx;
1734
1735 less_rx = np->get_rx.orig;
1736 if (less_rx-- == np->first_rx.orig)
1737 less_rx = np->last_rx.orig;
1738
1739 while (np->put_rx.orig != less_rx) {
1740 struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD);
1741 if (skb) {
1742 np->put_rx_ctx->skb = skb;
1743 np->put_rx_ctx->dma = pci_map_single(np->pci_dev,
1744 skb->data,
1745 skb_tailroom(skb),
1746 PCI_DMA_FROMDEVICE);
1747 np->put_rx_ctx->dma_len = skb_tailroom(skb);
1748 np->put_rx.orig->buf = cpu_to_le32(np->put_rx_ctx->dma);
1749 wmb();
1750 np->put_rx.orig->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL);
1751 if (unlikely(np->put_rx.orig++ == np->last_rx.orig))
1752 np->put_rx.orig = np->first_rx.orig;
1753 if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
1754 np->put_rx_ctx = np->first_rx_ctx;
1755 } else
1756 return 1;
1757 }
1758 return 0;
1759 }
1760
1761 static int nv_alloc_rx_optimized(struct net_device *dev)
1762 {
1763 struct fe_priv *np = netdev_priv(dev);
1764 struct ring_desc_ex *less_rx;
1765
1766 less_rx = np->get_rx.ex;
1767 if (less_rx-- == np->first_rx.ex)
1768 less_rx = np->last_rx.ex;
1769
1770 while (np->put_rx.ex != less_rx) {
1771 struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD);
1772 if (skb) {
1773 np->put_rx_ctx->skb = skb;
1774 np->put_rx_ctx->dma = pci_map_single(np->pci_dev,
1775 skb->data,
1776 skb_tailroom(skb),
1777 PCI_DMA_FROMDEVICE);
1778 np->put_rx_ctx->dma_len = skb_tailroom(skb);
1779 np->put_rx.ex->bufhigh = cpu_to_le32(dma_high(np->put_rx_ctx->dma));
1780 np->put_rx.ex->buflow = cpu_to_le32(dma_low(np->put_rx_ctx->dma));
1781 wmb();
1782 np->put_rx.ex->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL);
1783 if (unlikely(np->put_rx.ex++ == np->last_rx.ex))
1784 np->put_rx.ex = np->first_rx.ex;
1785 if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
1786 np->put_rx_ctx = np->first_rx_ctx;
1787 } else
1788 return 1;
1789 }
1790 return 0;
1791 }
1792
1793 /* If rx bufs are exhausted called after 50ms to attempt to refresh */
1794 static void nv_do_rx_refill(unsigned long data)
1795 {
1796 struct net_device *dev = (struct net_device *) data;
1797 struct fe_priv *np = netdev_priv(dev);
1798
1799 /* Just reschedule NAPI rx processing */
1800 napi_schedule(&np->napi);
1801 }
1802
1803 static void nv_init_rx(struct net_device *dev)
1804 {
1805 struct fe_priv *np = netdev_priv(dev);
1806 int i;
1807
1808 np->get_rx = np->put_rx = np->first_rx = np->rx_ring;
1809
1810 if (!nv_optimized(np))
1811 np->last_rx.orig = &np->rx_ring.orig[np->rx_ring_size-1];
1812 else
1813 np->last_rx.ex = &np->rx_ring.ex[np->rx_ring_size-1];
1814 np->get_rx_ctx = np->put_rx_ctx = np->first_rx_ctx = np->rx_skb;
1815 np->last_rx_ctx = &np->rx_skb[np->rx_ring_size-1];
1816
1817 for (i = 0; i < np->rx_ring_size; i++) {
1818 if (!nv_optimized(np)) {
1819 np->rx_ring.orig[i].flaglen = 0;
1820 np->rx_ring.orig[i].buf = 0;
1821 } else {
1822 np->rx_ring.ex[i].flaglen = 0;
1823 np->rx_ring.ex[i].txvlan = 0;
1824 np->rx_ring.ex[i].bufhigh = 0;
1825 np->rx_ring.ex[i].buflow = 0;
1826 }
1827 np->rx_skb[i].skb = NULL;
1828 np->rx_skb[i].dma = 0;
1829 }
1830 }
1831
1832 static void nv_init_tx(struct net_device *dev)
1833 {
1834 struct fe_priv *np = netdev_priv(dev);
1835 int i;
1836
1837 np->get_tx = np->put_tx = np->first_tx = np->tx_ring;
1838
1839 if (!nv_optimized(np))
1840 np->last_tx.orig = &np->tx_ring.orig[np->tx_ring_size-1];
1841 else
1842 np->last_tx.ex = &np->tx_ring.ex[np->tx_ring_size-1];
1843 np->get_tx_ctx = np->put_tx_ctx = np->first_tx_ctx = np->tx_skb;
1844 np->last_tx_ctx = &np->tx_skb[np->tx_ring_size-1];
1845 np->tx_pkts_in_progress = 0;
1846 np->tx_change_owner = NULL;
1847 np->tx_end_flip = NULL;
1848 np->tx_stop = 0;
1849
1850 for (i = 0; i < np->tx_ring_size; i++) {
1851 if (!nv_optimized(np)) {
1852 np->tx_ring.orig[i].flaglen = 0;
1853 np->tx_ring.orig[i].buf = 0;
1854 } else {
1855 np->tx_ring.ex[i].flaglen = 0;
1856 np->tx_ring.ex[i].txvlan = 0;
1857 np->tx_ring.ex[i].bufhigh = 0;
1858 np->tx_ring.ex[i].buflow = 0;
1859 }
1860 np->tx_skb[i].skb = NULL;
1861 np->tx_skb[i].dma = 0;
1862 np->tx_skb[i].dma_len = 0;
1863 np->tx_skb[i].dma_single = 0;
1864 np->tx_skb[i].first_tx_desc = NULL;
1865 np->tx_skb[i].next_tx_ctx = NULL;
1866 }
1867 }
1868
1869 static int nv_init_ring(struct net_device *dev)
1870 {
1871 struct fe_priv *np = netdev_priv(dev);
1872
1873 nv_init_tx(dev);
1874 nv_init_rx(dev);
1875
1876 if (!nv_optimized(np))
1877 return nv_alloc_rx(dev);
1878 else
1879 return nv_alloc_rx_optimized(dev);
1880 }
1881
1882 static void nv_unmap_txskb(struct fe_priv *np, struct nv_skb_map *tx_skb)
1883 {
1884 if (tx_skb->dma) {
1885 if (tx_skb->dma_single)
1886 pci_unmap_single(np->pci_dev, tx_skb->dma,
1887 tx_skb->dma_len,
1888 PCI_DMA_TODEVICE);
1889 else
1890 pci_unmap_page(np->pci_dev, tx_skb->dma,
1891 tx_skb->dma_len,
1892 PCI_DMA_TODEVICE);
1893 tx_skb->dma = 0;
1894 }
1895 }
1896
1897 static int nv_release_txskb(struct fe_priv *np, struct nv_skb_map *tx_skb)
1898 {
1899 nv_unmap_txskb(np, tx_skb);
1900 if (tx_skb->skb) {
1901 dev_kfree_skb_any(tx_skb->skb);
1902 tx_skb->skb = NULL;
1903 return 1;
1904 }
1905 return 0;
1906 }
1907
1908 static void nv_drain_tx(struct net_device *dev)
1909 {
1910 struct fe_priv *np = netdev_priv(dev);
1911 unsigned int i;
1912
1913 for (i = 0; i < np->tx_ring_size; i++) {
1914 if (!nv_optimized(np)) {
1915 np->tx_ring.orig[i].flaglen = 0;
1916 np->tx_ring.orig[i].buf = 0;
1917 } else {
1918 np->tx_ring.ex[i].flaglen = 0;
1919 np->tx_ring.ex[i].txvlan = 0;
1920 np->tx_ring.ex[i].bufhigh = 0;
1921 np->tx_ring.ex[i].buflow = 0;
1922 }
1923 if (nv_release_txskb(np, &np->tx_skb[i]))
1924 dev->stats.tx_dropped++;
1925 np->tx_skb[i].dma = 0;
1926 np->tx_skb[i].dma_len = 0;
1927 np->tx_skb[i].dma_single = 0;
1928 np->tx_skb[i].first_tx_desc = NULL;
1929 np->tx_skb[i].next_tx_ctx = NULL;
1930 }
1931 np->tx_pkts_in_progress = 0;
1932 np->tx_change_owner = NULL;
1933 np->tx_end_flip = NULL;
1934 }
1935
1936 static void nv_drain_rx(struct net_device *dev)
1937 {
1938 struct fe_priv *np = netdev_priv(dev);
1939 int i;
1940
1941 for (i = 0; i < np->rx_ring_size; i++) {
1942 if (!nv_optimized(np)) {
1943 np->rx_ring.orig[i].flaglen = 0;
1944 np->rx_ring.orig[i].buf = 0;
1945 } else {
1946 np->rx_ring.ex[i].flaglen = 0;
1947 np->rx_ring.ex[i].txvlan = 0;
1948 np->rx_ring.ex[i].bufhigh = 0;
1949 np->rx_ring.ex[i].buflow = 0;
1950 }
1951 wmb();
1952 if (np->rx_skb[i].skb) {
1953 pci_unmap_single(np->pci_dev, np->rx_skb[i].dma,
1954 (skb_end_pointer(np->rx_skb[i].skb) -
1955 np->rx_skb[i].skb->data),
1956 PCI_DMA_FROMDEVICE);
1957 dev_kfree_skb(np->rx_skb[i].skb);
1958 np->rx_skb[i].skb = NULL;
1959 }
1960 }
1961 }
1962
1963 static void nv_drain_rxtx(struct net_device *dev)
1964 {
1965 nv_drain_tx(dev);
1966 nv_drain_rx(dev);
1967 }
1968
1969 static inline u32 nv_get_empty_tx_slots(struct fe_priv *np)
1970 {
1971 return (u32)(np->tx_ring_size - ((np->tx_ring_size + (np->put_tx_ctx - np->get_tx_ctx)) % np->tx_ring_size));
1972 }
1973
1974 static void nv_legacybackoff_reseed(struct net_device *dev)
1975 {
1976 u8 __iomem *base = get_hwbase(dev);
1977 u32 reg;
1978 u32 low;
1979 int tx_status = 0;
1980
1981 reg = readl(base + NvRegSlotTime) & ~NVREG_SLOTTIME_MASK;
1982 get_random_bytes(&low, sizeof(low));
1983 reg |= low & NVREG_SLOTTIME_MASK;
1984
1985 /* Need to stop tx before change takes effect.
1986 * Caller has already gained np->lock.
1987 */
1988 tx_status = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_START;
1989 if (tx_status)
1990 nv_stop_tx(dev);
1991 nv_stop_rx(dev);
1992 writel(reg, base + NvRegSlotTime);
1993 if (tx_status)
1994 nv_start_tx(dev);
1995 nv_start_rx(dev);
1996 }
1997
1998 /* Gear Backoff Seeds */
1999 #define BACKOFF_SEEDSET_ROWS 8
2000 #define BACKOFF_SEEDSET_LFSRS 15
2001
2002 /* Known Good seed sets */
2003 static const u32 main_seedset[BACKOFF_SEEDSET_ROWS][BACKOFF_SEEDSET_LFSRS] = {
2004 {145, 155, 165, 175, 185, 196, 235, 245, 255, 265, 275, 285, 660, 690, 874},
2005 {245, 255, 265, 575, 385, 298, 335, 345, 355, 366, 375, 385, 761, 790, 974},
2006 {145, 155, 165, 175, 185, 196, 235, 245, 255, 265, 275, 285, 660, 690, 874},
2007 {245, 255, 265, 575, 385, 298, 335, 345, 355, 366, 375, 386, 761, 790, 974},
2008 {266, 265, 276, 585, 397, 208, 345, 355, 365, 376, 385, 396, 771, 700, 984},
2009 {266, 265, 276, 586, 397, 208, 346, 355, 365, 376, 285, 396, 771, 700, 984},
2010 {366, 365, 376, 686, 497, 308, 447, 455, 466, 476, 485, 496, 871, 800, 84},
2011 {466, 465, 476, 786, 597, 408, 547, 555, 566, 576, 585, 597, 971, 900, 184} };
2012
2013 static const u32 gear_seedset[BACKOFF_SEEDSET_ROWS][BACKOFF_SEEDSET_LFSRS] = {
2014 {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295},
2015 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395},
2016 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 397},
2017 {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295},
2018 {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295},
2019 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395},
2020 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395},
2021 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395} };
2022
2023 static void nv_gear_backoff_reseed(struct net_device *dev)
2024 {
2025 u8 __iomem *base = get_hwbase(dev);
2026 u32 miniseed1, miniseed2, miniseed2_reversed, miniseed3, miniseed3_reversed;
2027 u32 temp, seedset, combinedSeed;
2028 int i;
2029
2030 /* Setup seed for free running LFSR */
2031 /* We are going to read the time stamp counter 3 times
2032 and swizzle bits around to increase randomness */
2033 get_random_bytes(&miniseed1, sizeof(miniseed1));
2034 miniseed1 &= 0x0fff;
2035 if (miniseed1 == 0)
2036 miniseed1 = 0xabc;
2037
2038 get_random_bytes(&miniseed2, sizeof(miniseed2));
2039 miniseed2 &= 0x0fff;
2040 if (miniseed2 == 0)
2041 miniseed2 = 0xabc;
2042 miniseed2_reversed =
2043 ((miniseed2 & 0xF00) >> 8) |
2044 (miniseed2 & 0x0F0) |
2045 ((miniseed2 & 0x00F) << 8);
2046
2047 get_random_bytes(&miniseed3, sizeof(miniseed3));
2048 miniseed3 &= 0x0fff;
2049 if (miniseed3 == 0)
2050 miniseed3 = 0xabc;
2051 miniseed3_reversed =
2052 ((miniseed3 & 0xF00) >> 8) |
2053 (miniseed3 & 0x0F0) |
2054 ((miniseed3 & 0x00F) << 8);
2055
2056 combinedSeed = ((miniseed1 ^ miniseed2_reversed) << 12) |
2057 (miniseed2 ^ miniseed3_reversed);
2058
2059 /* Seeds can not be zero */
2060 if ((combinedSeed & NVREG_BKOFFCTRL_SEED_MASK) == 0)
2061 combinedSeed |= 0x08;
2062 if ((combinedSeed & (NVREG_BKOFFCTRL_SEED_MASK << NVREG_BKOFFCTRL_GEAR)) == 0)
2063 combinedSeed |= 0x8000;
2064
2065 /* No need to disable tx here */
2066 temp = NVREG_BKOFFCTRL_DEFAULT | (0 << NVREG_BKOFFCTRL_SELECT);
2067 temp |= combinedSeed & NVREG_BKOFFCTRL_SEED_MASK;
2068 temp |= combinedSeed >> NVREG_BKOFFCTRL_GEAR;
2069 writel(temp, base + NvRegBackOffControl);
2070
2071 /* Setup seeds for all gear LFSRs. */
2072 get_random_bytes(&seedset, sizeof(seedset));
2073 seedset = seedset % BACKOFF_SEEDSET_ROWS;
2074 for (i = 1; i <= BACKOFF_SEEDSET_LFSRS; i++) {
2075 temp = NVREG_BKOFFCTRL_DEFAULT | (i << NVREG_BKOFFCTRL_SELECT);
2076 temp |= main_seedset[seedset][i-1] & 0x3ff;
2077 temp |= ((gear_seedset[seedset][i-1] & 0x3ff) << NVREG_BKOFFCTRL_GEAR);
2078 writel(temp, base + NvRegBackOffControl);
2079 }
2080 }
2081
2082 /*
2083 * nv_start_xmit: dev->hard_start_xmit function
2084 * Called with netif_tx_lock held.
2085 */
2086 static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
2087 {
2088 struct fe_priv *np = netdev_priv(dev);
2089 u32 tx_flags = 0;
2090 u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET);
2091 unsigned int fragments = skb_shinfo(skb)->nr_frags;
2092 unsigned int i;
2093 u32 offset = 0;
2094 u32 bcnt;
2095 u32 size = skb_headlen(skb);
2096 u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
2097 u32 empty_slots;
2098 struct ring_desc *put_tx;
2099 struct ring_desc *start_tx;
2100 struct ring_desc *prev_tx;
2101 struct nv_skb_map *prev_tx_ctx;
2102 unsigned long flags;
2103
2104 /* add fragments to entries count */
2105 for (i = 0; i < fragments; i++) {
2106 u32 frag_size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
2107
2108 entries += (frag_size >> NV_TX2_TSO_MAX_SHIFT) +
2109 ((frag_size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
2110 }
2111
2112 spin_lock_irqsave(&np->lock, flags);
2113 empty_slots = nv_get_empty_tx_slots(np);
2114 if (unlikely(empty_slots <= entries)) {
2115 netif_stop_queue(dev);
2116 np->tx_stop = 1;
2117 spin_unlock_irqrestore(&np->lock, flags);
2118 return NETDEV_TX_BUSY;
2119 }
2120 spin_unlock_irqrestore(&np->lock, flags);
2121
2122 start_tx = put_tx = np->put_tx.orig;
2123
2124 /* setup the header buffer */
2125 do {
2126 prev_tx = put_tx;
2127 prev_tx_ctx = np->put_tx_ctx;
2128 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
2129 np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt,
2130 PCI_DMA_TODEVICE);
2131 np->put_tx_ctx->dma_len = bcnt;
2132 np->put_tx_ctx->dma_single = 1;
2133 put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma);
2134 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
2135
2136 tx_flags = np->tx_flags;
2137 offset += bcnt;
2138 size -= bcnt;
2139 if (unlikely(put_tx++ == np->last_tx.orig))
2140 put_tx = np->first_tx.orig;
2141 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
2142 np->put_tx_ctx = np->first_tx_ctx;
2143 } while (size);
2144
2145 /* setup the fragments */
2146 for (i = 0; i < fragments; i++) {
2147 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2148 u32 frag_size = skb_frag_size(frag);
2149 offset = 0;
2150
2151 do {
2152 prev_tx = put_tx;
2153 prev_tx_ctx = np->put_tx_ctx;
2154 bcnt = (frag_size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : frag_size;
2155 np->put_tx_ctx->dma = skb_frag_dma_map(
2156 &np->pci_dev->dev,
2157 frag, offset,
2158 bcnt,
2159 DMA_TO_DEVICE);
2160 np->put_tx_ctx->dma_len = bcnt;
2161 np->put_tx_ctx->dma_single = 0;
2162 put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma);
2163 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
2164
2165 offset += bcnt;
2166 frag_size -= bcnt;
2167 if (unlikely(put_tx++ == np->last_tx.orig))
2168 put_tx = np->first_tx.orig;
2169 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
2170 np->put_tx_ctx = np->first_tx_ctx;
2171 } while (frag_size);
2172 }
2173
2174 /* set last fragment flag */
2175 prev_tx->flaglen |= cpu_to_le32(tx_flags_extra);
2176
2177 /* save skb in this slot's context area */
2178 prev_tx_ctx->skb = skb;
2179
2180 if (skb_is_gso(skb))
2181 tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT);
2182 else
2183 tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ?
2184 NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0;
2185
2186 spin_lock_irqsave(&np->lock, flags);
2187
2188 /* set tx flags */
2189 start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra);
2190 np->put_tx.orig = put_tx;
2191
2192 spin_unlock_irqrestore(&np->lock, flags);
2193
2194 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
2195 return NETDEV_TX_OK;
2196 }
2197
2198 static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
2199 struct net_device *dev)
2200 {
2201 struct fe_priv *np = netdev_priv(dev);
2202 u32 tx_flags = 0;
2203 u32 tx_flags_extra;
2204 unsigned int fragments = skb_shinfo(skb)->nr_frags;
2205 unsigned int i;
2206 u32 offset = 0;
2207 u32 bcnt;
2208 u32 size = skb_headlen(skb);
2209 u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
2210 u32 empty_slots;
2211 struct ring_desc_ex *put_tx;
2212 struct ring_desc_ex *start_tx;
2213 struct ring_desc_ex *prev_tx;
2214 struct nv_skb_map *prev_tx_ctx;
2215 struct nv_skb_map *start_tx_ctx;
2216 unsigned long flags;
2217
2218 /* add fragments to entries count */
2219 for (i = 0; i < fragments; i++) {
2220 u32 frag_size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
2221
2222 entries += (frag_size >> NV_TX2_TSO_MAX_SHIFT) +
2223 ((frag_size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
2224 }
2225
2226 spin_lock_irqsave(&np->lock, flags);
2227 empty_slots = nv_get_empty_tx_slots(np);
2228 if (unlikely(empty_slots <= entries)) {
2229 netif_stop_queue(dev);
2230 np->tx_stop = 1;
2231 spin_unlock_irqrestore(&np->lock, flags);
2232 return NETDEV_TX_BUSY;
2233 }
2234 spin_unlock_irqrestore(&np->lock, flags);
2235
2236 start_tx = put_tx = np->put_tx.ex;
2237 start_tx_ctx = np->put_tx_ctx;
2238
2239 /* setup the header buffer */
2240 do {
2241 prev_tx = put_tx;
2242 prev_tx_ctx = np->put_tx_ctx;
2243 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
2244 np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt,
2245 PCI_DMA_TODEVICE);
2246 np->put_tx_ctx->dma_len = bcnt;
2247 np->put_tx_ctx->dma_single = 1;
2248 put_tx->bufhigh = cpu_to_le32(dma_high(np->put_tx_ctx->dma));
2249 put_tx->buflow = cpu_to_le32(dma_low(np->put_tx_ctx->dma));
2250 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
2251
2252 tx_flags = NV_TX2_VALID;
2253 offset += bcnt;
2254 size -= bcnt;
2255 if (unlikely(put_tx++ == np->last_tx.ex))
2256 put_tx = np->first_tx.ex;
2257 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
2258 np->put_tx_ctx = np->first_tx_ctx;
2259 } while (size);
2260
2261 /* setup the fragments */
2262 for (i = 0; i < fragments; i++) {
2263 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2264 u32 frag_size = skb_frag_size(frag);
2265 offset = 0;
2266
2267 do {
2268 prev_tx = put_tx;
2269 prev_tx_ctx = np->put_tx_ctx;
2270 bcnt = (frag_size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : frag_size;
2271 np->put_tx_ctx->dma = skb_frag_dma_map(
2272 &np->pci_dev->dev,
2273 frag, offset,
2274 bcnt,
2275 DMA_TO_DEVICE);
2276 np->put_tx_ctx->dma_len = bcnt;
2277 np->put_tx_ctx->dma_single = 0;
2278 put_tx->bufhigh = cpu_to_le32(dma_high(np->put_tx_ctx->dma));
2279 put_tx->buflow = cpu_to_le32(dma_low(np->put_tx_ctx->dma));
2280 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
2281
2282 offset += bcnt;
2283 frag_size -= bcnt;
2284 if (unlikely(put_tx++ == np->last_tx.ex))
2285 put_tx = np->first_tx.ex;
2286 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
2287 np->put_tx_ctx = np->first_tx_ctx;
2288 } while (frag_size);
2289 }
2290
2291 /* set last fragment flag */
2292 prev_tx->flaglen |= cpu_to_le32(NV_TX2_LASTPACKET);
2293
2294 /* save skb in this slot's context area */
2295 prev_tx_ctx->skb = skb;
2296
2297 if (skb_is_gso(skb))
2298 tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT);
2299 else
2300 tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ?
2301 NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0;
2302
2303 /* vlan tag */
2304 if (vlan_tx_tag_present(skb))
2305 start_tx->txvlan = cpu_to_le32(NV_TX3_VLAN_TAG_PRESENT |
2306 vlan_tx_tag_get(skb));
2307 else
2308 start_tx->txvlan = 0;
2309
2310 spin_lock_irqsave(&np->lock, flags);
2311
2312 if (np->tx_limit) {
2313 /* Limit the number of outstanding tx. Setup all fragments, but
2314 * do not set the VALID bit on the first descriptor. Save a pointer
2315 * to that descriptor and also for next skb_map element.
2316 */
2317
2318 if (np->tx_pkts_in_progress == NV_TX_LIMIT_COUNT) {
2319 if (!np->tx_change_owner)
2320 np->tx_change_owner = start_tx_ctx;
2321
2322 /* remove VALID bit */
2323 tx_flags &= ~NV_TX2_VALID;
2324 start_tx_ctx->first_tx_desc = start_tx;
2325 start_tx_ctx->next_tx_ctx = np->put_tx_ctx;
2326 np->tx_end_flip = np->put_tx_ctx;
2327 } else {
2328 np->tx_pkts_in_progress++;
2329 }
2330 }
2331
2332 /* set tx flags */
2333 start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra);
2334 np->put_tx.ex = put_tx;
2335
2336 spin_unlock_irqrestore(&np->lock, flags);
2337
2338 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
2339 return NETDEV_TX_OK;
2340 }
2341
2342 static inline void nv_tx_flip_ownership(struct net_device *dev)
2343 {
2344 struct fe_priv *np = netdev_priv(dev);
2345
2346 np->tx_pkts_in_progress--;
2347 if (np->tx_change_owner) {
2348 np->tx_change_owner->first_tx_desc->flaglen |=
2349 cpu_to_le32(NV_TX2_VALID);
2350 np->tx_pkts_in_progress++;
2351
2352 np->tx_change_owner = np->tx_change_owner->next_tx_ctx;
2353 if (np->tx_change_owner == np->tx_end_flip)
2354 np->tx_change_owner = NULL;
2355
2356 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
2357 }
2358 }
2359
2360 /*
2361 * nv_tx_done: check for completed packets, release the skbs.
2362 *
2363 * Caller must own np->lock.
2364 */
2365 static int nv_tx_done(struct net_device *dev, int limit)
2366 {
2367 struct fe_priv *np = netdev_priv(dev);
2368 u32 flags;
2369 int tx_work = 0;
2370 struct ring_desc *orig_get_tx = np->get_tx.orig;
2371
2372 while ((np->get_tx.orig != np->put_tx.orig) &&
2373 !((flags = le32_to_cpu(np->get_tx.orig->flaglen)) & NV_TX_VALID) &&
2374 (tx_work < limit)) {
2375
2376 nv_unmap_txskb(np, np->get_tx_ctx);
2377
2378 if (np->desc_ver == DESC_VER_1) {
2379 if (flags & NV_TX_LASTPACKET) {
2380 if (flags & NV_TX_ERROR) {
2381 if ((flags & NV_TX_RETRYERROR) && !(flags & NV_TX_RETRYCOUNT_MASK))
2382 nv_legacybackoff_reseed(dev);
2383 }
2384 dev_kfree_skb_any(np->get_tx_ctx->skb);
2385 np->get_tx_ctx->skb = NULL;
2386 tx_work++;
2387 }
2388 } else {
2389 if (flags & NV_TX2_LASTPACKET) {
2390 if (flags & NV_TX2_ERROR) {
2391 if ((flags & NV_TX2_RETRYERROR) && !(flags & NV_TX2_RETRYCOUNT_MASK))
2392 nv_legacybackoff_reseed(dev);
2393 }
2394 dev_kfree_skb_any(np->get_tx_ctx->skb);
2395 np->get_tx_ctx->skb = NULL;
2396 tx_work++;
2397 }
2398 }
2399 if (unlikely(np->get_tx.orig++ == np->last_tx.orig))
2400 np->get_tx.orig = np->first_tx.orig;
2401 if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx))
2402 np->get_tx_ctx = np->first_tx_ctx;
2403 }
2404 if (unlikely((np->tx_stop == 1) && (np->get_tx.orig != orig_get_tx))) {
2405 np->tx_stop = 0;
2406 netif_wake_queue(dev);
2407 }
2408 return tx_work;
2409 }
2410
2411 static int nv_tx_done_optimized(struct net_device *dev, int limit)
2412 {
2413 struct fe_priv *np = netdev_priv(dev);
2414 u32 flags;
2415 int tx_work = 0;
2416 struct ring_desc_ex *orig_get_tx = np->get_tx.ex;
2417
2418 while ((np->get_tx.ex != np->put_tx.ex) &&
2419 !((flags = le32_to_cpu(np->get_tx.ex->flaglen)) & NV_TX2_VALID) &&
2420 (tx_work < limit)) {
2421
2422 nv_unmap_txskb(np, np->get_tx_ctx);
2423
2424 if (flags & NV_TX2_LASTPACKET) {
2425 if (flags & NV_TX2_ERROR) {
2426 if ((flags & NV_TX2_RETRYERROR) && !(flags & NV_TX2_RETRYCOUNT_MASK)) {
2427 if (np->driver_data & DEV_HAS_GEAR_MODE)
2428 nv_gear_backoff_reseed(dev);
2429 else
2430 nv_legacybackoff_reseed(dev);
2431 }
2432 }
2433
2434 dev_kfree_skb_any(np->get_tx_ctx->skb);
2435 np->get_tx_ctx->skb = NULL;
2436 tx_work++;
2437
2438 if (np->tx_limit)
2439 nv_tx_flip_ownership(dev);
2440 }
2441 if (unlikely(np->get_tx.ex++ == np->last_tx.ex))
2442 np->get_tx.ex = np->first_tx.ex;
2443 if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx))
2444 np->get_tx_ctx = np->first_tx_ctx;
2445 }
2446 if (unlikely((np->tx_stop == 1) && (np->get_tx.ex != orig_get_tx))) {
2447 np->tx_stop = 0;
2448 netif_wake_queue(dev);
2449 }
2450 return tx_work;
2451 }
2452
2453 /*
2454 * nv_tx_timeout: dev->tx_timeout function
2455 * Called with netif_tx_lock held.
2456 */
2457 static void nv_tx_timeout(struct net_device *dev)
2458 {
2459 struct fe_priv *np = netdev_priv(dev);
2460 u8 __iomem *base = get_hwbase(dev);
2461 u32 status;
2462 union ring_type put_tx;
2463 int saved_tx_limit;
2464 int i;
2465
2466 if (np->msi_flags & NV_MSI_X_ENABLED)
2467 status = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
2468 else
2469 status = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
2470
2471 netdev_info(dev, "Got tx_timeout. irq: %08x\n", status);
2472
2473 netdev_info(dev, "Ring at %lx\n", (unsigned long)np->ring_addr);
2474 netdev_info(dev, "Dumping tx registers\n");
2475 for (i = 0; i <= np->register_size; i += 32) {
2476 netdev_info(dev,
2477 "%3x: %08x %08x %08x %08x %08x %08x %08x %08x\n",
2478 i,
2479 readl(base + i + 0), readl(base + i + 4),
2480 readl(base + i + 8), readl(base + i + 12),
2481 readl(base + i + 16), readl(base + i + 20),
2482 readl(base + i + 24), readl(base + i + 28));
2483 }
2484 netdev_info(dev, "Dumping tx ring\n");
2485 for (i = 0; i < np->tx_ring_size; i += 4) {
2486 if (!nv_optimized(np)) {
2487 netdev_info(dev,
2488 "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n",
2489 i,
2490 le32_to_cpu(np->tx_ring.orig[i].buf),
2491 le32_to_cpu(np->tx_ring.orig[i].flaglen),
2492 le32_to_cpu(np->tx_ring.orig[i+1].buf),
2493 le32_to_cpu(np->tx_ring.orig[i+1].flaglen),
2494 le32_to_cpu(np->tx_ring.orig[i+2].buf),
2495 le32_to_cpu(np->tx_ring.orig[i+2].flaglen),
2496 le32_to_cpu(np->tx_ring.orig[i+3].buf),
2497 le32_to_cpu(np->tx_ring.orig[i+3].flaglen));
2498 } else {
2499 netdev_info(dev,
2500 "%03x: %08x %08x %08x // %08x %08x %08x // %08x %08x %08x // %08x %08x %08x\n",
2501 i,
2502 le32_to_cpu(np->tx_ring.ex[i].bufhigh),
2503 le32_to_cpu(np->tx_ring.ex[i].buflow),
2504 le32_to_cpu(np->tx_ring.ex[i].flaglen),
2505 le32_to_cpu(np->tx_ring.ex[i+1].bufhigh),
2506 le32_to_cpu(np->tx_ring.ex[i+1].buflow),
2507 le32_to_cpu(np->tx_ring.ex[i+1].flaglen),
2508 le32_to_cpu(np->tx_ring.ex[i+2].bufhigh),
2509 le32_to_cpu(np->tx_ring.ex[i+2].buflow),
2510 le32_to_cpu(np->tx_ring.ex[i+2].flaglen),
2511 le32_to_cpu(np->tx_ring.ex[i+3].bufhigh),
2512 le32_to_cpu(np->tx_ring.ex[i+3].buflow),
2513 le32_to_cpu(np->tx_ring.ex[i+3].flaglen));
2514 }
2515 }
2516
2517 spin_lock_irq(&np->lock);
2518
2519 /* 1) stop tx engine */
2520 nv_stop_tx(dev);
2521
2522 /* 2) complete any outstanding tx and do not give HW any limited tx pkts */
2523 saved_tx_limit = np->tx_limit;
2524 np->tx_limit = 0; /* prevent giving HW any limited pkts */
2525 np->tx_stop = 0; /* prevent waking tx queue */
2526 if (!nv_optimized(np))
2527 nv_tx_done(dev, np->tx_ring_size);
2528 else
2529 nv_tx_done_optimized(dev, np->tx_ring_size);
2530
2531 /* save current HW position */
2532 if (np->tx_change_owner)
2533 put_tx.ex = np->tx_change_owner->first_tx_desc;
2534 else
2535 put_tx = np->put_tx;
2536
2537 /* 3) clear all tx state */
2538 nv_drain_tx(dev);
2539 nv_init_tx(dev);
2540
2541 /* 4) restore state to current HW position */
2542 np->get_tx = np->put_tx = put_tx;
2543 np->tx_limit = saved_tx_limit;
2544
2545 /* 5) restart tx engine */
2546 nv_start_tx(dev);
2547 netif_wake_queue(dev);
2548 spin_unlock_irq(&np->lock);
2549 }
2550
2551 /*
2552 * Called when the nic notices a mismatch between the actual data len on the
2553 * wire and the len indicated in the 802 header
2554 */
2555 static int nv_getlen(struct net_device *dev, void *packet, int datalen)
2556 {
2557 int hdrlen; /* length of the 802 header */
2558 int protolen; /* length as stored in the proto field */
2559
2560 /* 1) calculate len according to header */
2561 if (((struct vlan_ethhdr *)packet)->h_vlan_proto == htons(ETH_P_8021Q)) {
2562 protolen = ntohs(((struct vlan_ethhdr *)packet)->h_vlan_encapsulated_proto);
2563 hdrlen = VLAN_HLEN;
2564 } else {
2565 protolen = ntohs(((struct ethhdr *)packet)->h_proto);
2566 hdrlen = ETH_HLEN;
2567 }
2568 if (protolen > ETH_DATA_LEN)
2569 return datalen; /* Value in proto field not a len, no checks possible */
2570
2571 protolen += hdrlen;
2572 /* consistency checks: */
2573 if (datalen > ETH_ZLEN) {
2574 if (datalen >= protolen) {
2575 /* more data on wire than in 802 header, trim of
2576 * additional data.
2577 */
2578 return protolen;
2579 } else {
2580 /* less data on wire than mentioned in header.
2581 * Discard the packet.
2582 */
2583 return -1;
2584 }
2585 } else {
2586 /* short packet. Accept only if 802 values are also short */
2587 if (protolen > ETH_ZLEN) {
2588 return -1;
2589 }
2590 return datalen;
2591 }
2592 }
2593
2594 static int nv_rx_process(struct net_device *dev, int limit)
2595 {
2596 struct fe_priv *np = netdev_priv(dev);
2597 u32 flags;
2598 int rx_work = 0;
2599 struct sk_buff *skb;
2600 int len;
2601
2602 while ((np->get_rx.orig != np->put_rx.orig) &&
2603 !((flags = le32_to_cpu(np->get_rx.orig->flaglen)) & NV_RX_AVAIL) &&
2604 (rx_work < limit)) {
2605
2606 /*
2607 * the packet is for us - immediately tear down the pci mapping.
2608 * TODO: check if a prefetch of the first cacheline improves
2609 * the performance.
2610 */
2611 pci_unmap_single(np->pci_dev, np->get_rx_ctx->dma,
2612 np->get_rx_ctx->dma_len,
2613 PCI_DMA_FROMDEVICE);
2614 skb = np->get_rx_ctx->skb;
2615 np->get_rx_ctx->skb = NULL;
2616
2617 /* look at what we actually got: */
2618 if (np->desc_ver == DESC_VER_1) {
2619 if (likely(flags & NV_RX_DESCRIPTORVALID)) {
2620 len = flags & LEN_MASK_V1;
2621 if (unlikely(flags & NV_RX_ERROR)) {
2622 if ((flags & NV_RX_ERROR_MASK) == NV_RX_ERROR4) {
2623 len = nv_getlen(dev, skb->data, len);
2624 if (len < 0) {
2625 dev_kfree_skb(skb);
2626 goto next_pkt;
2627 }
2628 }
2629 /* framing errors are soft errors */
2630 else if ((flags & NV_RX_ERROR_MASK) == NV_RX_FRAMINGERR) {
2631 if (flags & NV_RX_SUBSTRACT1)
2632 len--;
2633 }
2634 /* the rest are hard errors */
2635 else {
2636 if (flags & NV_RX_MISSEDFRAME)
2637 dev->stats.rx_missed_errors++;
2638 dev_kfree_skb(skb);
2639 goto next_pkt;
2640 }
2641 }
2642 } else {
2643 dev_kfree_skb(skb);
2644 goto next_pkt;
2645 }
2646 } else {
2647 if (likely(flags & NV_RX2_DESCRIPTORVALID)) {
2648 len = flags & LEN_MASK_V2;
2649 if (unlikely(flags & NV_RX2_ERROR)) {
2650 if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_ERROR4) {
2651 len = nv_getlen(dev, skb->data, len);
2652 if (len < 0) {
2653 dev_kfree_skb(skb);
2654 goto next_pkt;
2655 }
2656 }
2657 /* framing errors are soft errors */
2658 else if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_FRAMINGERR) {
2659 if (flags & NV_RX2_SUBSTRACT1)
2660 len--;
2661 }
2662 /* the rest are hard errors */
2663 else {
2664 dev_kfree_skb(skb);
2665 goto next_pkt;
2666 }
2667 }
2668 if (((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_TCP) || /*ip and tcp */
2669 ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_UDP)) /*ip and udp */
2670 skb->ip_summed = CHECKSUM_UNNECESSARY;
2671 } else {
2672 dev_kfree_skb(skb);
2673 goto next_pkt;
2674 }
2675 }
2676 /* got a valid packet - forward it to the network core */
2677 skb_put(skb, len);
2678 skb->protocol = eth_type_trans(skb, dev);
2679 napi_gro_receive(&np->napi, skb);
2680 dev->stats.rx_packets++;
2681 next_pkt:
2682 if (unlikely(np->get_rx.orig++ == np->last_rx.orig))
2683 np->get_rx.orig = np->first_rx.orig;
2684 if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx))
2685 np->get_rx_ctx = np->first_rx_ctx;
2686
2687 rx_work++;
2688 }
2689
2690 return rx_work;
2691 }
2692
2693 static int nv_rx_process_optimized(struct net_device *dev, int limit)
2694 {
2695 struct fe_priv *np = netdev_priv(dev);
2696 u32 flags;
2697 u32 vlanflags = 0;
2698 int rx_work = 0;
2699 struct sk_buff *skb;
2700 int len;
2701
2702 while ((np->get_rx.ex != np->put_rx.ex) &&
2703 !((flags = le32_to_cpu(np->get_rx.ex->flaglen)) & NV_RX2_AVAIL) &&
2704 (rx_work < limit)) {
2705
2706 /*
2707 * the packet is for us - immediately tear down the pci mapping.
2708 * TODO: check if a prefetch of the first cacheline improves
2709 * the performance.
2710 */
2711 pci_unmap_single(np->pci_dev, np->get_rx_ctx->dma,
2712 np->get_rx_ctx->dma_len,
2713 PCI_DMA_FROMDEVICE);
2714 skb = np->get_rx_ctx->skb;
2715 np->get_rx_ctx->skb = NULL;
2716
2717 /* look at what we actually got: */
2718 if (likely(flags & NV_RX2_DESCRIPTORVALID)) {
2719 len = flags & LEN_MASK_V2;
2720 if (unlikely(flags & NV_RX2_ERROR)) {
2721 if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_ERROR4) {
2722 len = nv_getlen(dev, skb->data, len);
2723 if (len < 0) {
2724 dev_kfree_skb(skb);
2725 goto next_pkt;
2726 }
2727 }
2728 /* framing errors are soft errors */
2729 else if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_FRAMINGERR) {
2730 if (flags & NV_RX2_SUBSTRACT1)
2731 len--;
2732 }
2733 /* the rest are hard errors */
2734 else {
2735 dev_kfree_skb(skb);
2736 goto next_pkt;
2737 }
2738 }
2739
2740 if (((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_TCP) || /*ip and tcp */
2741 ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_UDP)) /*ip and udp */
2742 skb->ip_summed = CHECKSUM_UNNECESSARY;
2743
2744 /* got a valid packet - forward it to the network core */
2745 skb_put(skb, len);
2746 skb->protocol = eth_type_trans(skb, dev);
2747 prefetch(skb->data);
2748
2749 vlanflags = le32_to_cpu(np->get_rx.ex->buflow);
2750
2751 /*
2752 * There's need to check for NETIF_F_HW_VLAN_RX here.
2753 * Even if vlan rx accel is disabled,
2754 * NV_RX3_VLAN_TAG_PRESENT is pseudo randomly set.
2755 */
2756 if (dev->features & NETIF_F_HW_VLAN_RX &&
2757 vlanflags & NV_RX3_VLAN_TAG_PRESENT) {
2758 u16 vid = vlanflags & NV_RX3_VLAN_TAG_MASK;
2759
2760 __vlan_hwaccel_put_tag(skb, vid);
2761 }
2762 napi_gro_receive(&np->napi, skb);
2763 dev->stats.rx_packets++;
2764 } else {
2765 dev_kfree_skb(skb);
2766 }
2767 next_pkt:
2768 if (unlikely(np->get_rx.ex++ == np->last_rx.ex))
2769 np->get_rx.ex = np->first_rx.ex;
2770 if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx))
2771 np->get_rx_ctx = np->first_rx_ctx;
2772
2773 rx_work++;
2774 }
2775
2776 return rx_work;
2777 }
2778
2779 static void set_bufsize(struct net_device *dev)
2780 {
2781 struct fe_priv *np = netdev_priv(dev);
2782
2783 if (dev->mtu <= ETH_DATA_LEN)
2784 np->rx_buf_sz = ETH_DATA_LEN + NV_RX_HEADERS;
2785 else
2786 np->rx_buf_sz = dev->mtu + NV_RX_HEADERS;
2787 }
2788
2789 /*
2790 * nv_change_mtu: dev->change_mtu function
2791 * Called with dev_base_lock held for read.
2792 */
2793 static int nv_change_mtu(struct net_device *dev, int new_mtu)
2794 {
2795 struct fe_priv *np = netdev_priv(dev);
2796 int old_mtu;
2797
2798 if (new_mtu < 64 || new_mtu > np->pkt_limit)
2799 return -EINVAL;
2800
2801 old_mtu = dev->mtu;
2802 dev->mtu = new_mtu;
2803
2804 /* return early if the buffer sizes will not change */
2805 if (old_mtu <= ETH_DATA_LEN && new_mtu <= ETH_DATA_LEN)
2806 return 0;
2807 if (old_mtu == new_mtu)
2808 return 0;
2809
2810 /* synchronized against open : rtnl_lock() held by caller */
2811 if (netif_running(dev)) {
2812 u8 __iomem *base = get_hwbase(dev);
2813 /*
2814 * It seems that the nic preloads valid ring entries into an
2815 * internal buffer. The procedure for flushing everything is
2816 * guessed, there is probably a simpler approach.
2817 * Changing the MTU is a rare event, it shouldn't matter.
2818 */
2819 nv_disable_irq(dev);
2820 nv_napi_disable(dev);
2821 netif_tx_lock_bh(dev);
2822 netif_addr_lock(dev);
2823 spin_lock(&np->lock);
2824 /* stop engines */
2825 nv_stop_rxtx(dev);
2826 nv_txrx_reset(dev);
2827 /* drain rx queue */
2828 nv_drain_rxtx(dev);
2829 /* reinit driver view of the rx queue */
2830 set_bufsize(dev);
2831 if (nv_init_ring(dev)) {
2832 if (!np->in_shutdown)
2833 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
2834 }
2835 /* reinit nic view of the rx queue */
2836 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
2837 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
2838 writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
2839 base + NvRegRingSizes);
2840 pci_push(base);
2841 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
2842 pci_push(base);
2843
2844 /* restart rx engine */
2845 nv_start_rxtx(dev);
2846 spin_unlock(&np->lock);
2847 netif_addr_unlock(dev);
2848 netif_tx_unlock_bh(dev);
2849 nv_napi_enable(dev);
2850 nv_enable_irq(dev);
2851 }
2852 return 0;
2853 }
2854
2855 static void nv_copy_mac_to_hw(struct net_device *dev)
2856 {
2857 u8 __iomem *base = get_hwbase(dev);
2858 u32 mac[2];
2859
2860 mac[0] = (dev->dev_addr[0] << 0) + (dev->dev_addr[1] << 8) +
2861 (dev->dev_addr[2] << 16) + (dev->dev_addr[3] << 24);
2862 mac[1] = (dev->dev_addr[4] << 0) + (dev->dev_addr[5] << 8);
2863
2864 writel(mac[0], base + NvRegMacAddrA);
2865 writel(mac[1], base + NvRegMacAddrB);
2866 }
2867
2868 /*
2869 * nv_set_mac_address: dev->set_mac_address function
2870 * Called with rtnl_lock() held.
2871 */
2872 static int nv_set_mac_address(struct net_device *dev, void *addr)
2873 {
2874 struct fe_priv *np = netdev_priv(dev);
2875 struct sockaddr *macaddr = (struct sockaddr *)addr;
2876
2877 if (!is_valid_ether_addr(macaddr->sa_data))
2878 return -EADDRNOTAVAIL;
2879
2880 /* synchronized against open : rtnl_lock() held by caller */
2881 memcpy(dev->dev_addr, macaddr->sa_data, ETH_ALEN);
2882
2883 if (netif_running(dev)) {
2884 netif_tx_lock_bh(dev);
2885 netif_addr_lock(dev);
2886 spin_lock_irq(&np->lock);
2887
2888 /* stop rx engine */
2889 nv_stop_rx(dev);
2890
2891 /* set mac address */
2892 nv_copy_mac_to_hw(dev);
2893
2894 /* restart rx engine */
2895 nv_start_rx(dev);
2896 spin_unlock_irq(&np->lock);
2897 netif_addr_unlock(dev);
2898 netif_tx_unlock_bh(dev);
2899 } else {
2900 nv_copy_mac_to_hw(dev);
2901 }
2902 return 0;
2903 }
2904
2905 /*
2906 * nv_set_multicast: dev->set_multicast function
2907 * Called with netif_tx_lock held.
2908 */
2909 static void nv_set_multicast(struct net_device *dev)
2910 {
2911 struct fe_priv *np = netdev_priv(dev);
2912 u8 __iomem *base = get_hwbase(dev);
2913 u32 addr[2];
2914 u32 mask[2];
2915 u32 pff = readl(base + NvRegPacketFilterFlags) & NVREG_PFF_PAUSE_RX;
2916
2917 memset(addr, 0, sizeof(addr));
2918 memset(mask, 0, sizeof(mask));
2919
2920 if (dev->flags & IFF_PROMISC) {
2921 pff |= NVREG_PFF_PROMISC;
2922 } else {
2923 pff |= NVREG_PFF_MYADDR;
2924
2925 if (dev->flags & IFF_ALLMULTI || !netdev_mc_empty(dev)) {
2926 u32 alwaysOff[2];
2927 u32 alwaysOn[2];
2928
2929 alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0xffffffff;
2930 if (dev->flags & IFF_ALLMULTI) {
2931 alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0;
2932 } else {
2933 struct netdev_hw_addr *ha;
2934
2935 netdev_for_each_mc_addr(ha, dev) {
2936 unsigned char *hw_addr = ha->addr;
2937 u32 a, b;
2938
2939 a = le32_to_cpu(*(__le32 *) hw_addr);
2940 b = le16_to_cpu(*(__le16 *) (&hw_addr[4]));
2941 alwaysOn[0] &= a;
2942 alwaysOff[0] &= ~a;
2943 alwaysOn[1] &= b;
2944 alwaysOff[1] &= ~b;
2945 }
2946 }
2947 addr[0] = alwaysOn[0];
2948 addr[1] = alwaysOn[1];
2949 mask[0] = alwaysOn[0] | alwaysOff[0];
2950 mask[1] = alwaysOn[1] | alwaysOff[1];
2951 } else {
2952 mask[0] = NVREG_MCASTMASKA_NONE;
2953 mask[1] = NVREG_MCASTMASKB_NONE;
2954 }
2955 }
2956 addr[0] |= NVREG_MCASTADDRA_FORCE;
2957 pff |= NVREG_PFF_ALWAYS;
2958 spin_lock_irq(&np->lock);
2959 nv_stop_rx(dev);
2960 writel(addr[0], base + NvRegMulticastAddrA);
2961 writel(addr[1], base + NvRegMulticastAddrB);
2962 writel(mask[0], base + NvRegMulticastMaskA);
2963 writel(mask[1], base + NvRegMulticastMaskB);
2964 writel(pff, base + NvRegPacketFilterFlags);
2965 nv_start_rx(dev);
2966 spin_unlock_irq(&np->lock);
2967 }
2968
2969 static void nv_update_pause(struct net_device *dev, u32 pause_flags)
2970 {
2971 struct fe_priv *np = netdev_priv(dev);
2972 u8 __iomem *base = get_hwbase(dev);
2973
2974 np->pause_flags &= ~(NV_PAUSEFRAME_TX_ENABLE | NV_PAUSEFRAME_RX_ENABLE);
2975
2976 if (np->pause_flags & NV_PAUSEFRAME_RX_CAPABLE) {
2977 u32 pff = readl(base + NvRegPacketFilterFlags) & ~NVREG_PFF_PAUSE_RX;
2978 if (pause_flags & NV_PAUSEFRAME_RX_ENABLE) {
2979 writel(pff|NVREG_PFF_PAUSE_RX, base + NvRegPacketFilterFlags);
2980 np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
2981 } else {
2982 writel(pff, base + NvRegPacketFilterFlags);
2983 }
2984 }
2985 if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE) {
2986 u32 regmisc = readl(base + NvRegMisc1) & ~NVREG_MISC1_PAUSE_TX;
2987 if (pause_flags & NV_PAUSEFRAME_TX_ENABLE) {
2988 u32 pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V1;
2989 if (np->driver_data & DEV_HAS_PAUSEFRAME_TX_V2)
2990 pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V2;
2991 if (np->driver_data & DEV_HAS_PAUSEFRAME_TX_V3) {
2992 pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V3;
2993 /* limit the number of tx pause frames to a default of 8 */
2994 writel(readl(base + NvRegTxPauseFrameLimit)|NVREG_TX_PAUSEFRAMELIMIT_ENABLE, base + NvRegTxPauseFrameLimit);
2995 }
2996 writel(pause_enable, base + NvRegTxPauseFrame);
2997 writel(regmisc|NVREG_MISC1_PAUSE_TX, base + NvRegMisc1);
2998 np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
2999 } else {
3000 writel(NVREG_TX_PAUSEFRAME_DISABLE, base + NvRegTxPauseFrame);
3001 writel(regmisc, base + NvRegMisc1);
3002 }
3003 }
3004 }
3005
3006 static void nv_force_linkspeed(struct net_device *dev, int speed, int duplex)
3007 {
3008 struct fe_priv *np = netdev_priv(dev);
3009 u8 __iomem *base = get_hwbase(dev);
3010 u32 phyreg, txreg;
3011 int mii_status;
3012
3013 np->linkspeed = NVREG_LINKSPEED_FORCE|speed;
3014 np->duplex = duplex;
3015
3016 /* see if gigabit phy */
3017 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
3018 if (mii_status & PHY_GIGABIT) {
3019 np->gigabit = PHY_GIGABIT;
3020 phyreg = readl(base + NvRegSlotTime);
3021 phyreg &= ~(0x3FF00);
3022 if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_10)
3023 phyreg |= NVREG_SLOTTIME_10_100_FULL;
3024 else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_100)
3025 phyreg |= NVREG_SLOTTIME_10_100_FULL;
3026 else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_1000)
3027 phyreg |= NVREG_SLOTTIME_1000_FULL;
3028 writel(phyreg, base + NvRegSlotTime);
3029 }
3030
3031 phyreg = readl(base + NvRegPhyInterface);
3032 phyreg &= ~(PHY_HALF|PHY_100|PHY_1000);
3033 if (np->duplex == 0)
3034 phyreg |= PHY_HALF;
3035 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_100)
3036 phyreg |= PHY_100;
3037 else if ((np->linkspeed & NVREG_LINKSPEED_MASK) ==
3038 NVREG_LINKSPEED_1000)
3039 phyreg |= PHY_1000;
3040 writel(phyreg, base + NvRegPhyInterface);
3041
3042 if (phyreg & PHY_RGMII) {
3043 if ((np->linkspeed & NVREG_LINKSPEED_MASK) ==
3044 NVREG_LINKSPEED_1000)
3045 txreg = NVREG_TX_DEFERRAL_RGMII_1000;
3046 else
3047 txreg = NVREG_TX_DEFERRAL_RGMII_10_100;
3048 } else {
3049 txreg = NVREG_TX_DEFERRAL_DEFAULT;
3050 }
3051 writel(txreg, base + NvRegTxDeferral);
3052
3053 if (np->desc_ver == DESC_VER_1) {
3054 txreg = NVREG_TX_WM_DESC1_DEFAULT;
3055 } else {
3056 if ((np->linkspeed & NVREG_LINKSPEED_MASK) ==
3057 NVREG_LINKSPEED_1000)
3058 txreg = NVREG_TX_WM_DESC2_3_1000;
3059 else
3060 txreg = NVREG_TX_WM_DESC2_3_DEFAULT;
3061 }
3062 writel(txreg, base + NvRegTxWatermark);
3063
3064 writel(NVREG_MISC1_FORCE | (np->duplex ? 0 : NVREG_MISC1_HD),
3065 base + NvRegMisc1);
3066 pci_push(base);
3067 writel(np->linkspeed, base + NvRegLinkSpeed);
3068 pci_push(base);
3069
3070 return;
3071 }
3072
3073 /**
3074 * nv_update_linkspeed: Setup the MAC according to the link partner
3075 * @dev: Network device to be configured
3076 *
3077 * The function queries the PHY and checks if there is a link partner.
3078 * If yes, then it sets up the MAC accordingly. Otherwise, the MAC is
3079 * set to 10 MBit HD.
3080 *
3081 * The function returns 0 if there is no link partner and 1 if there is
3082 * a good link partner.
3083 */
3084 static int nv_update_linkspeed(struct net_device *dev)
3085 {
3086 struct fe_priv *np = netdev_priv(dev);
3087 u8 __iomem *base = get_hwbase(dev);
3088 int adv = 0;
3089 int lpa = 0;
3090 int adv_lpa, adv_pause, lpa_pause;
3091 int newls = np->linkspeed;
3092 int newdup = np->duplex;
3093 int mii_status;
3094 u32 bmcr;
3095 int retval = 0;
3096 u32 control_1000, status_1000, phyreg, pause_flags, txreg;
3097 u32 txrxFlags = 0;
3098 u32 phy_exp;
3099
3100 /* If device loopback is enabled, set carrier on and enable max link
3101 * speed.
3102 */
3103 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
3104 if (bmcr & BMCR_LOOPBACK) {
3105 if (netif_running(dev)) {
3106 nv_force_linkspeed(dev, NVREG_LINKSPEED_1000, 1);
3107 if (!netif_carrier_ok(dev))
3108 netif_carrier_on(dev);
3109 }
3110 return 1;
3111 }
3112
3113 /* BMSR_LSTATUS is latched, read it twice:
3114 * we want the current value.
3115 */
3116 mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
3117 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
3118
3119 if (!(mii_status & BMSR_LSTATUS)) {
3120 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
3121 newdup = 0;
3122 retval = 0;
3123 goto set_speed;
3124 }
3125
3126 if (np->autoneg == 0) {
3127 if (np->fixed_mode & LPA_100FULL) {
3128 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
3129 newdup = 1;
3130 } else if (np->fixed_mode & LPA_100HALF) {
3131 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
3132 newdup = 0;
3133 } else if (np->fixed_mode & LPA_10FULL) {
3134 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
3135 newdup = 1;
3136 } else {
3137 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
3138 newdup = 0;
3139 }
3140 retval = 1;
3141 goto set_speed;
3142 }
3143 /* check auto negotiation is complete */
3144 if (!(mii_status & BMSR_ANEGCOMPLETE)) {
3145 /* still in autonegotiation - configure nic for 10 MBit HD and wait. */
3146 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
3147 newdup = 0;
3148 retval = 0;
3149 goto set_speed;
3150 }
3151
3152 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
3153 lpa = mii_rw(dev, np->phyaddr, MII_LPA, MII_READ);
3154
3155 retval = 1;
3156 if (np->gigabit == PHY_GIGABIT) {
3157 control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
3158 status_1000 = mii_rw(dev, np->phyaddr, MII_STAT1000, MII_READ);
3159
3160 if ((control_1000 & ADVERTISE_1000FULL) &&
3161 (status_1000 & LPA_1000FULL)) {
3162 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_1000;
3163 newdup = 1;
3164 goto set_speed;
3165 }
3166 }
3167
3168 /* FIXME: handle parallel detection properly */
3169 adv_lpa = lpa & adv;
3170 if (adv_lpa & LPA_100FULL) {
3171 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
3172 newdup = 1;
3173 } else if (adv_lpa & LPA_100HALF) {
3174 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
3175 newdup = 0;
3176 } else if (adv_lpa & LPA_10FULL) {
3177 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
3178 newdup = 1;
3179 } else if (adv_lpa & LPA_10HALF) {
3180 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
3181 newdup = 0;
3182 } else {
3183 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
3184 newdup = 0;
3185 }
3186
3187 set_speed:
3188 if (np->duplex == newdup && np->linkspeed == newls)
3189 return retval;
3190
3191 np->duplex = newdup;
3192 np->linkspeed = newls;
3193
3194 /* The transmitter and receiver must be restarted for safe update */
3195 if (readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_START) {
3196 txrxFlags |= NV_RESTART_TX;
3197 nv_stop_tx(dev);
3198 }
3199 if (readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) {
3200 txrxFlags |= NV_RESTART_RX;
3201 nv_stop_rx(dev);
3202 }
3203
3204 if (np->gigabit == PHY_GIGABIT) {
3205 phyreg = readl(base + NvRegSlotTime);
3206 phyreg &= ~(0x3FF00);
3207 if (((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_10) ||
3208 ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_100))
3209 phyreg |= NVREG_SLOTTIME_10_100_FULL;
3210 else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_1000)
3211 phyreg |= NVREG_SLOTTIME_1000_FULL;
3212 writel(phyreg, base + NvRegSlotTime);
3213 }
3214
3215 phyreg = readl(base + NvRegPhyInterface);
3216 phyreg &= ~(PHY_HALF|PHY_100|PHY_1000);
3217 if (np->duplex == 0)
3218 phyreg |= PHY_HALF;
3219 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_100)
3220 phyreg |= PHY_100;
3221 else if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000)
3222 phyreg |= PHY_1000;
3223 writel(phyreg, base + NvRegPhyInterface);
3224
3225 phy_exp = mii_rw(dev, np->phyaddr, MII_EXPANSION, MII_READ) & EXPANSION_NWAY; /* autoneg capable */
3226 if (phyreg & PHY_RGMII) {
3227 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000) {
3228 txreg = NVREG_TX_DEFERRAL_RGMII_1000;
3229 } else {
3230 if (!phy_exp && !np->duplex && (np->driver_data & DEV_HAS_COLLISION_FIX)) {
3231 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_10)
3232 txreg = NVREG_TX_DEFERRAL_RGMII_STRETCH_10;
3233 else
3234 txreg = NVREG_TX_DEFERRAL_RGMII_STRETCH_100;
3235 } else {
3236 txreg = NVREG_TX_DEFERRAL_RGMII_10_100;
3237 }
3238 }
3239 } else {
3240 if (!phy_exp && !np->duplex && (np->driver_data & DEV_HAS_COLLISION_FIX))
3241 txreg = NVREG_TX_DEFERRAL_MII_STRETCH;
3242 else
3243 txreg = NVREG_TX_DEFERRAL_DEFAULT;
3244 }
3245 writel(txreg, base + NvRegTxDeferral);
3246
3247 if (np->desc_ver == DESC_VER_1) {
3248 txreg = NVREG_TX_WM_DESC1_DEFAULT;
3249 } else {
3250 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000)
3251 txreg = NVREG_TX_WM_DESC2_3_1000;
3252 else
3253 txreg = NVREG_TX_WM_DESC2_3_DEFAULT;
3254 }
3255 writel(txreg, base + NvRegTxWatermark);
3256
3257 writel(NVREG_MISC1_FORCE | (np->duplex ? 0 : NVREG_MISC1_HD),
3258 base + NvRegMisc1);
3259 pci_push(base);
3260 writel(np->linkspeed, base + NvRegLinkSpeed);
3261 pci_push(base);
3262
3263 pause_flags = 0;
3264 /* setup pause frame */
3265 if (np->duplex != 0) {
3266 if (np->autoneg && np->pause_flags & NV_PAUSEFRAME_AUTONEG) {
3267 adv_pause = adv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
3268 lpa_pause = lpa & (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
3269
3270 switch (adv_pause) {
3271 case ADVERTISE_PAUSE_CAP:
3272 if (lpa_pause & LPA_PAUSE_CAP) {
3273 pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
3274 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
3275 pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
3276 }
3277 break;
3278 case ADVERTISE_PAUSE_ASYM:
3279 if (lpa_pause == (LPA_PAUSE_CAP | LPA_PAUSE_ASYM))
3280 pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
3281 break;
3282 case ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM:
3283 if (lpa_pause & LPA_PAUSE_CAP) {
3284 pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
3285 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
3286 pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
3287 }
3288 if (lpa_pause == LPA_PAUSE_ASYM)
3289 pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
3290 break;
3291 }
3292 } else {
3293 pause_flags = np->pause_flags;
3294 }
3295 }
3296 nv_update_pause(dev, pause_flags);
3297
3298 if (txrxFlags & NV_RESTART_TX)
3299 nv_start_tx(dev);
3300 if (txrxFlags & NV_RESTART_RX)
3301 nv_start_rx(dev);
3302
3303 return retval;
3304 }
3305
3306 static void nv_linkchange(struct net_device *dev)
3307 {
3308 if (nv_update_linkspeed(dev)) {
3309 if (!netif_carrier_ok(dev)) {
3310 netif_carrier_on(dev);
3311 netdev_info(dev, "link up\n");
3312 nv_txrx_gate(dev, false);
3313 nv_start_rx(dev);
3314 }
3315 } else {
3316 if (netif_carrier_ok(dev)) {
3317 netif_carrier_off(dev);
3318 netdev_info(dev, "link down\n");
3319 nv_txrx_gate(dev, true);
3320 nv_stop_rx(dev);
3321 }
3322 }
3323 }
3324
3325 static void nv_link_irq(struct net_device *dev)
3326 {
3327 u8 __iomem *base = get_hwbase(dev);
3328 u32 miistat;
3329
3330 miistat = readl(base + NvRegMIIStatus);
3331 writel(NVREG_MIISTAT_LINKCHANGE, base + NvRegMIIStatus);
3332
3333 if (miistat & (NVREG_MIISTAT_LINKCHANGE))
3334 nv_linkchange(dev);
3335 }
3336
3337 static void nv_msi_workaround(struct fe_priv *np)
3338 {
3339
3340 /* Need to toggle the msi irq mask within the ethernet device,
3341 * otherwise, future interrupts will not be detected.
3342 */
3343 if (np->msi_flags & NV_MSI_ENABLED) {
3344 u8 __iomem *base = np->base;
3345
3346 writel(0, base + NvRegMSIIrqMask);
3347 writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask);
3348 }
3349 }
3350
3351 static inline int nv_change_interrupt_mode(struct net_device *dev, int total_work)
3352 {
3353 struct fe_priv *np = netdev_priv(dev);
3354
3355 if (optimization_mode == NV_OPTIMIZATION_MODE_DYNAMIC) {
3356 if (total_work > NV_DYNAMIC_THRESHOLD) {
3357 /* transition to poll based interrupts */
3358 np->quiet_count = 0;
3359 if (np->irqmask != NVREG_IRQMASK_CPU) {
3360 np->irqmask = NVREG_IRQMASK_CPU;
3361 return 1;
3362 }
3363 } else {
3364 if (np->quiet_count < NV_DYNAMIC_MAX_QUIET_COUNT) {
3365 np->quiet_count++;
3366 } else {
3367 /* reached a period of low activity, switch
3368 to per tx/rx packet interrupts */
3369 if (np->irqmask != NVREG_IRQMASK_THROUGHPUT) {
3370 np->irqmask = NVREG_IRQMASK_THROUGHPUT;
3371 return 1;
3372 }
3373 }
3374 }
3375 }
3376 return 0;
3377 }
3378
3379 static irqreturn_t nv_nic_irq(int foo, void *data)
3380 {
3381 struct net_device *dev = (struct net_device *) data;
3382 struct fe_priv *np = netdev_priv(dev);
3383 u8 __iomem *base = get_hwbase(dev);
3384
3385 if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
3386 np->events = readl(base + NvRegIrqStatus);
3387 writel(np->events, base + NvRegIrqStatus);
3388 } else {
3389 np->events = readl(base + NvRegMSIXIrqStatus);
3390 writel(np->events, base + NvRegMSIXIrqStatus);
3391 }
3392 if (!(np->events & np->irqmask))
3393 return IRQ_NONE;
3394
3395 nv_msi_workaround(np);
3396
3397 if (napi_schedule_prep(&np->napi)) {
3398 /*
3399 * Disable further irq's (msix not enabled with napi)
3400 */
3401 writel(0, base + NvRegIrqMask);
3402 __napi_schedule(&np->napi);
3403 }
3404
3405 return IRQ_HANDLED;
3406 }
3407
3408 /**
3409 * All _optimized functions are used to help increase performance
3410 * (reduce CPU and increase throughput). They use descripter version 3,
3411 * compiler directives, and reduce memory accesses.
3412 */
3413 static irqreturn_t nv_nic_irq_optimized(int foo, void *data)
3414 {
3415 struct net_device *dev = (struct net_device *) data;
3416 struct fe_priv *np = netdev_priv(dev);
3417 u8 __iomem *base = get_hwbase(dev);
3418
3419 if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
3420 np->events = readl(base + NvRegIrqStatus);
3421 writel(np->events, base + NvRegIrqStatus);
3422 } else {
3423 np->events = readl(base + NvRegMSIXIrqStatus);
3424 writel(np->events, base + NvRegMSIXIrqStatus);
3425 }
3426 if (!(np->events & np->irqmask))
3427 return IRQ_NONE;
3428
3429 nv_msi_workaround(np);
3430
3431 if (napi_schedule_prep(&np->napi)) {
3432 /*
3433 * Disable further irq's (msix not enabled with napi)
3434 */
3435 writel(0, base + NvRegIrqMask);
3436 __napi_schedule(&np->napi);
3437 }
3438
3439 return IRQ_HANDLED;
3440 }
3441
3442 static irqreturn_t nv_nic_irq_tx(int foo, void *data)
3443 {
3444 struct net_device *dev = (struct net_device *) data;
3445 struct fe_priv *np = netdev_priv(dev);
3446 u8 __iomem *base = get_hwbase(dev);
3447 u32 events;
3448 int i;
3449 unsigned long flags;
3450
3451 for (i = 0;; i++) {
3452 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_TX_ALL;
3453 writel(events, base + NvRegMSIXIrqStatus);
3454 netdev_dbg(dev, "tx irq events: %08x\n", events);
3455 if (!(events & np->irqmask))
3456 break;
3457
3458 spin_lock_irqsave(&np->lock, flags);
3459 nv_tx_done_optimized(dev, TX_WORK_PER_LOOP);
3460 spin_unlock_irqrestore(&np->lock, flags);
3461
3462 if (unlikely(i > max_interrupt_work)) {
3463 spin_lock_irqsave(&np->lock, flags);
3464 /* disable interrupts on the nic */
3465 writel(NVREG_IRQ_TX_ALL, base + NvRegIrqMask);
3466 pci_push(base);
3467
3468 if (!np->in_shutdown) {
3469 np->nic_poll_irq |= NVREG_IRQ_TX_ALL;
3470 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3471 }
3472 spin_unlock_irqrestore(&np->lock, flags);
3473 netdev_dbg(dev, "%s: too many iterations (%d)\n",
3474 __func__, i);
3475 break;
3476 }
3477
3478 }
3479
3480 return IRQ_RETVAL(i);
3481 }
3482
3483 static int nv_napi_poll(struct napi_struct *napi, int budget)
3484 {
3485 struct fe_priv *np = container_of(napi, struct fe_priv, napi);
3486 struct net_device *dev = np->dev;
3487 u8 __iomem *base = get_hwbase(dev);
3488 unsigned long flags;
3489 int retcode;
3490 int rx_count, tx_work = 0, rx_work = 0;
3491
3492 do {
3493 if (!nv_optimized(np)) {
3494 spin_lock_irqsave(&np->lock, flags);
3495 tx_work += nv_tx_done(dev, np->tx_ring_size);
3496 spin_unlock_irqrestore(&np->lock, flags);
3497
3498 rx_count = nv_rx_process(dev, budget - rx_work);
3499 retcode = nv_alloc_rx(dev);
3500 } else {
3501 spin_lock_irqsave(&np->lock, flags);
3502 tx_work += nv_tx_done_optimized(dev, np->tx_ring_size);
3503 spin_unlock_irqrestore(&np->lock, flags);
3504
3505 rx_count = nv_rx_process_optimized(dev,
3506 budget - rx_work);
3507 retcode = nv_alloc_rx_optimized(dev);
3508 }
3509 } while (retcode == 0 &&
3510 rx_count > 0 && (rx_work += rx_count) < budget);
3511
3512 if (retcode) {
3513 spin_lock_irqsave(&np->lock, flags);
3514 if (!np->in_shutdown)
3515 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
3516 spin_unlock_irqrestore(&np->lock, flags);
3517 }
3518
3519 nv_change_interrupt_mode(dev, tx_work + rx_work);
3520
3521 if (unlikely(np->events & NVREG_IRQ_LINK)) {
3522 spin_lock_irqsave(&np->lock, flags);
3523 nv_link_irq(dev);
3524 spin_unlock_irqrestore(&np->lock, flags);
3525 }
3526 if (unlikely(np->need_linktimer && time_after(jiffies, np->link_timeout))) {
3527 spin_lock_irqsave(&np->lock, flags);
3528 nv_linkchange(dev);
3529 spin_unlock_irqrestore(&np->lock, flags);
3530 np->link_timeout = jiffies + LINK_TIMEOUT;
3531 }
3532 if (unlikely(np->events & NVREG_IRQ_RECOVER_ERROR)) {
3533 spin_lock_irqsave(&np->lock, flags);
3534 if (!np->in_shutdown) {
3535 np->nic_poll_irq = np->irqmask;
3536 np->recover_error = 1;
3537 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3538 }
3539 spin_unlock_irqrestore(&np->lock, flags);
3540 napi_complete(napi);
3541 return rx_work;
3542 }
3543
3544 if (rx_work < budget) {
3545 /* re-enable interrupts
3546 (msix not enabled in napi) */
3547 napi_complete(napi);
3548
3549 writel(np->irqmask, base + NvRegIrqMask);
3550 }
3551 return rx_work;
3552 }
3553
3554 static irqreturn_t nv_nic_irq_rx(int foo, void *data)
3555 {
3556 struct net_device *dev = (struct net_device *) data;
3557 struct fe_priv *np = netdev_priv(dev);
3558 u8 __iomem *base = get_hwbase(dev);
3559 u32 events;
3560 int i;
3561 unsigned long flags;
3562
3563 for (i = 0;; i++) {
3564 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL;
3565 writel(events, base + NvRegMSIXIrqStatus);
3566 netdev_dbg(dev, "rx irq events: %08x\n", events);
3567 if (!(events & np->irqmask))
3568 break;
3569
3570 if (nv_rx_process_optimized(dev, RX_WORK_PER_LOOP)) {
3571 if (unlikely(nv_alloc_rx_optimized(dev))) {
3572 spin_lock_irqsave(&np->lock, flags);
3573 if (!np->in_shutdown)
3574 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
3575 spin_unlock_irqrestore(&np->lock, flags);
3576 }
3577 }
3578
3579 if (unlikely(i > max_interrupt_work)) {
3580 spin_lock_irqsave(&np->lock, flags);
3581 /* disable interrupts on the nic */
3582 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
3583 pci_push(base);
3584
3585 if (!np->in_shutdown) {
3586 np->nic_poll_irq |= NVREG_IRQ_RX_ALL;
3587 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3588 }
3589 spin_unlock_irqrestore(&np->lock, flags);
3590 netdev_dbg(dev, "%s: too many iterations (%d)\n",
3591 __func__, i);
3592 break;
3593 }
3594 }
3595
3596 return IRQ_RETVAL(i);
3597 }
3598
3599 static irqreturn_t nv_nic_irq_other(int foo, void *data)
3600 {
3601 struct net_device *dev = (struct net_device *) data;
3602 struct fe_priv *np = netdev_priv(dev);
3603 u8 __iomem *base = get_hwbase(dev);
3604 u32 events;
3605 int i;
3606 unsigned long flags;
3607
3608 for (i = 0;; i++) {
3609 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_OTHER;
3610 writel(events, base + NvRegMSIXIrqStatus);
3611 netdev_dbg(dev, "irq events: %08x\n", events);
3612 if (!(events & np->irqmask))
3613 break;
3614
3615 /* check tx in case we reached max loop limit in tx isr */
3616 spin_lock_irqsave(&np->lock, flags);
3617 nv_tx_done_optimized(dev, TX_WORK_PER_LOOP);
3618 spin_unlock_irqrestore(&np->lock, flags);
3619
3620 if (events & NVREG_IRQ_LINK) {
3621 spin_lock_irqsave(&np->lock, flags);
3622 nv_link_irq(dev);
3623 spin_unlock_irqrestore(&np->lock, flags);
3624 }
3625 if (np->need_linktimer && time_after(jiffies, np->link_timeout)) {
3626 spin_lock_irqsave(&np->lock, flags);
3627 nv_linkchange(dev);
3628 spin_unlock_irqrestore(&np->lock, flags);
3629 np->link_timeout = jiffies + LINK_TIMEOUT;
3630 }
3631 if (events & NVREG_IRQ_RECOVER_ERROR) {
3632 spin_lock_irq(&np->lock);
3633 /* disable interrupts on the nic */
3634 writel(NVREG_IRQ_OTHER, base + NvRegIrqMask);
3635 pci_push(base);
3636
3637 if (!np->in_shutdown) {
3638 np->nic_poll_irq |= NVREG_IRQ_OTHER;
3639 np->recover_error = 1;
3640 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3641 }
3642 spin_unlock_irq(&np->lock);
3643 break;
3644 }
3645 if (unlikely(i > max_interrupt_work)) {
3646 spin_lock_irqsave(&np->lock, flags);
3647 /* disable interrupts on the nic */
3648 writel(NVREG_IRQ_OTHER, base + NvRegIrqMask);
3649 pci_push(base);
3650
3651 if (!np->in_shutdown) {
3652 np->nic_poll_irq |= NVREG_IRQ_OTHER;
3653 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3654 }
3655 spin_unlock_irqrestore(&np->lock, flags);
3656 netdev_dbg(dev, "%s: too many iterations (%d)\n",
3657 __func__, i);
3658 break;
3659 }
3660
3661 }
3662
3663 return IRQ_RETVAL(i);
3664 }
3665
3666 static irqreturn_t nv_nic_irq_test(int foo, void *data)
3667 {
3668 struct net_device *dev = (struct net_device *) data;
3669 struct fe_priv *np = netdev_priv(dev);
3670 u8 __iomem *base = get_hwbase(dev);
3671 u32 events;
3672
3673 if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
3674 events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
3675 writel(events & NVREG_IRQ_TIMER, base + NvRegIrqStatus);
3676 } else {
3677 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
3678 writel(events & NVREG_IRQ_TIMER, base + NvRegMSIXIrqStatus);
3679 }
3680 pci_push(base);
3681 if (!(events & NVREG_IRQ_TIMER))
3682 return IRQ_RETVAL(0);
3683
3684 nv_msi_workaround(np);
3685
3686 spin_lock(&np->lock);
3687 np->intr_test = 1;
3688 spin_unlock(&np->lock);
3689
3690 return IRQ_RETVAL(1);
3691 }
3692
3693 static void set_msix_vector_map(struct net_device *dev, u32 vector, u32 irqmask)
3694 {
3695 u8 __iomem *base = get_hwbase(dev);
3696 int i;
3697 u32 msixmap = 0;
3698
3699 /* Each interrupt bit can be mapped to a MSIX vector (4 bits).
3700 * MSIXMap0 represents the first 8 interrupts and MSIXMap1 represents
3701 * the remaining 8 interrupts.
3702 */
3703 for (i = 0; i < 8; i++) {
3704 if ((irqmask >> i) & 0x1)
3705 msixmap |= vector << (i << 2);
3706 }
3707 writel(readl(base + NvRegMSIXMap0) | msixmap, base + NvRegMSIXMap0);
3708
3709 msixmap = 0;
3710 for (i = 0; i < 8; i++) {
3711 if ((irqmask >> (i + 8)) & 0x1)
3712 msixmap |= vector << (i << 2);
3713 }
3714 writel(readl(base + NvRegMSIXMap1) | msixmap, base + NvRegMSIXMap1);
3715 }
3716
3717 static int nv_request_irq(struct net_device *dev, int intr_test)
3718 {
3719 struct fe_priv *np = get_nvpriv(dev);
3720 u8 __iomem *base = get_hwbase(dev);
3721 int ret = 1;
3722 int i;
3723 irqreturn_t (*handler)(int foo, void *data);
3724
3725 if (intr_test) {
3726 handler = nv_nic_irq_test;
3727 } else {
3728 if (nv_optimized(np))
3729 handler = nv_nic_irq_optimized;
3730 else
3731 handler = nv_nic_irq;
3732 }
3733
3734 if (np->msi_flags & NV_MSI_X_CAPABLE) {
3735 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++)
3736 np->msi_x_entry[i].entry = i;
3737 ret = pci_enable_msix(np->pci_dev, np->msi_x_entry, (np->msi_flags & NV_MSI_X_VECTORS_MASK));
3738 if (ret == 0) {
3739 np->msi_flags |= NV_MSI_X_ENABLED;
3740 if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT && !intr_test) {
3741 /* Request irq for rx handling */
3742 sprintf(np->name_rx, "%s-rx", dev->name);
3743 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector,
3744 nv_nic_irq_rx, IRQF_SHARED, np->name_rx, dev) != 0) {
3745 netdev_info(dev,
3746 "request_irq failed for rx %d\n",
3747 ret);
3748 pci_disable_msix(np->pci_dev);
3749 np->msi_flags &= ~NV_MSI_X_ENABLED;
3750 goto out_err;
3751 }
3752 /* Request irq for tx handling */
3753 sprintf(np->name_tx, "%s-tx", dev->name);
3754 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector,
3755 nv_nic_irq_tx, IRQF_SHARED, np->name_tx, dev) != 0) {
3756 netdev_info(dev,
3757 "request_irq failed for tx %d\n",
3758 ret);
3759 pci_disable_msix(np->pci_dev);
3760 np->msi_flags &= ~NV_MSI_X_ENABLED;
3761 goto out_free_rx;
3762 }
3763 /* Request irq for link and timer handling */
3764 sprintf(np->name_other, "%s-other", dev->name);
3765 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector,
3766 nv_nic_irq_other, IRQF_SHARED, np->name_other, dev) != 0) {
3767 netdev_info(dev,
3768 "request_irq failed for link %d\n",
3769 ret);
3770 pci_disable_msix(np->pci_dev);
3771 np->msi_flags &= ~NV_MSI_X_ENABLED;
3772 goto out_free_tx;
3773 }
3774 /* map interrupts to their respective vector */
3775 writel(0, base + NvRegMSIXMap0);
3776 writel(0, base + NvRegMSIXMap1);
3777 set_msix_vector_map(dev, NV_MSI_X_VECTOR_RX, NVREG_IRQ_RX_ALL);
3778 set_msix_vector_map(dev, NV_MSI_X_VECTOR_TX, NVREG_IRQ_TX_ALL);
3779 set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER);
3780 } else {
3781 /* Request irq for all interrupts */
3782 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, handler, IRQF_SHARED, dev->name, dev) != 0) {
3783 netdev_info(dev,
3784 "request_irq failed %d\n",
3785 ret);
3786 pci_disable_msix(np->pci_dev);
3787 np->msi_flags &= ~NV_MSI_X_ENABLED;
3788 goto out_err;
3789 }
3790
3791 /* map interrupts to vector 0 */
3792 writel(0, base + NvRegMSIXMap0);
3793 writel(0, base + NvRegMSIXMap1);
3794 }
3795 }
3796 }
3797 if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) {
3798 ret = pci_enable_msi(np->pci_dev);
3799 if (ret == 0) {
3800 np->msi_flags |= NV_MSI_ENABLED;
3801 dev->irq = np->pci_dev->irq;
3802 if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0) {
3803 netdev_info(dev, "request_irq failed %d\n",
3804 ret);
3805 pci_disable_msi(np->pci_dev);
3806 np->msi_flags &= ~NV_MSI_ENABLED;
3807 dev->irq = np->pci_dev->irq;
3808 goto out_err;
3809 }
3810
3811 /* map interrupts to vector 0 */
3812 writel(0, base + NvRegMSIMap0);
3813 writel(0, base + NvRegMSIMap1);
3814 /* enable msi vector 0 */
3815 writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask);
3816 }
3817 }
3818 if (ret != 0) {
3819 if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0)
3820 goto out_err;
3821
3822 }
3823
3824 return 0;
3825 out_free_tx:
3826 free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, dev);
3827 out_free_rx:
3828 free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, dev);
3829 out_err:
3830 return 1;
3831 }
3832
3833 static void nv_free_irq(struct net_device *dev)
3834 {
3835 struct fe_priv *np = get_nvpriv(dev);
3836 int i;
3837
3838 if (np->msi_flags & NV_MSI_X_ENABLED) {
3839 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++)
3840 free_irq(np->msi_x_entry[i].vector, dev);
3841 pci_disable_msix(np->pci_dev);
3842 np->msi_flags &= ~NV_MSI_X_ENABLED;
3843 } else {
3844 free_irq(np->pci_dev->irq, dev);
3845 if (np->msi_flags & NV_MSI_ENABLED) {
3846 pci_disable_msi(np->pci_dev);
3847 np->msi_flags &= ~NV_MSI_ENABLED;
3848 }
3849 }
3850 }
3851
3852 static void nv_do_nic_poll(unsigned long data)
3853 {
3854 struct net_device *dev = (struct net_device *) data;
3855 struct fe_priv *np = netdev_priv(dev);
3856 u8 __iomem *base = get_hwbase(dev);
3857 u32 mask = 0;
3858
3859 /*
3860 * First disable irq(s) and then
3861 * reenable interrupts on the nic, we have to do this before calling
3862 * nv_nic_irq because that may decide to do otherwise
3863 */
3864
3865 if (!using_multi_irqs(dev)) {
3866 if (np->msi_flags & NV_MSI_X_ENABLED)
3867 disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
3868 else
3869 disable_irq_lockdep(np->pci_dev->irq);
3870 mask = np->irqmask;
3871 } else {
3872 if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
3873 disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
3874 mask |= NVREG_IRQ_RX_ALL;
3875 }
3876 if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) {
3877 disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
3878 mask |= NVREG_IRQ_TX_ALL;
3879 }
3880 if (np->nic_poll_irq & NVREG_IRQ_OTHER) {
3881 disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
3882 mask |= NVREG_IRQ_OTHER;
3883 }
3884 }
3885 /* disable_irq() contains synchronize_irq, thus no irq handler can run now */
3886
3887 if (np->recover_error) {
3888 np->recover_error = 0;
3889 netdev_info(dev, "MAC in recoverable error state\n");
3890 if (netif_running(dev)) {
3891 netif_tx_lock_bh(dev);
3892 netif_addr_lock(dev);
3893 spin_lock(&np->lock);
3894 /* stop engines */
3895 nv_stop_rxtx(dev);
3896 if (np->driver_data & DEV_HAS_POWER_CNTRL)
3897 nv_mac_reset(dev);
3898 nv_txrx_reset(dev);
3899 /* drain rx queue */
3900 nv_drain_rxtx(dev);
3901 /* reinit driver view of the rx queue */
3902 set_bufsize(dev);
3903 if (nv_init_ring(dev)) {
3904 if (!np->in_shutdown)
3905 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
3906 }
3907 /* reinit nic view of the rx queue */
3908 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
3909 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
3910 writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
3911 base + NvRegRingSizes);
3912 pci_push(base);
3913 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
3914 pci_push(base);
3915 /* clear interrupts */
3916 if (!(np->msi_flags & NV_MSI_X_ENABLED))
3917 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
3918 else
3919 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
3920
3921 /* restart rx engine */
3922 nv_start_rxtx(dev);
3923 spin_unlock(&np->lock);
3924 netif_addr_unlock(dev);
3925 netif_tx_unlock_bh(dev);
3926 }
3927 }
3928
3929 writel(mask, base + NvRegIrqMask);
3930 pci_push(base);
3931
3932 if (!using_multi_irqs(dev)) {
3933 np->nic_poll_irq = 0;
3934 if (nv_optimized(np))
3935 nv_nic_irq_optimized(0, dev);
3936 else
3937 nv_nic_irq(0, dev);
3938 if (np->msi_flags & NV_MSI_X_ENABLED)
3939 enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
3940 else
3941 enable_irq_lockdep(np->pci_dev->irq);
3942 } else {
3943 if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
3944 np->nic_poll_irq &= ~NVREG_IRQ_RX_ALL;
3945 nv_nic_irq_rx(0, dev);
3946 enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
3947 }
3948 if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) {
3949 np->nic_poll_irq &= ~NVREG_IRQ_TX_ALL;
3950 nv_nic_irq_tx(0, dev);
3951 enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
3952 }
3953 if (np->nic_poll_irq & NVREG_IRQ_OTHER) {
3954 np->nic_poll_irq &= ~NVREG_IRQ_OTHER;
3955 nv_nic_irq_other(0, dev);
3956 enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
3957 }
3958 }
3959
3960 }
3961
3962 #ifdef CONFIG_NET_POLL_CONTROLLER
3963 static void nv_poll_controller(struct net_device *dev)
3964 {
3965 nv_do_nic_poll((unsigned long) dev);
3966 }
3967 #endif
3968
3969 static void nv_do_stats_poll(unsigned long data)
3970 {
3971 struct net_device *dev = (struct net_device *) data;
3972 struct fe_priv *np = netdev_priv(dev);
3973
3974 nv_get_hw_stats(dev);
3975
3976 if (!np->in_shutdown)
3977 mod_timer(&np->stats_poll,
3978 round_jiffies(jiffies + STATS_INTERVAL));
3979 }
3980
3981 static void nv_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
3982 {
3983 struct fe_priv *np = netdev_priv(dev);
3984 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
3985 strlcpy(info->version, FORCEDETH_VERSION, sizeof(info->version));
3986 strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
3987 }
3988
3989 static void nv_get_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo)
3990 {
3991 struct fe_priv *np = netdev_priv(dev);
3992 wolinfo->supported = WAKE_MAGIC;
3993
3994 spin_lock_irq(&np->lock);
3995 if (np->wolenabled)
3996 wolinfo->wolopts = WAKE_MAGIC;
3997 spin_unlock_irq(&np->lock);
3998 }
3999
4000 static int nv_set_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo)
4001 {
4002 struct fe_priv *np = netdev_priv(dev);
4003 u8 __iomem *base = get_hwbase(dev);
4004 u32 flags = 0;
4005
4006 if (wolinfo->wolopts == 0) {
4007 np->wolenabled = 0;
4008 } else if (wolinfo->wolopts & WAKE_MAGIC) {
4009 np->wolenabled = 1;
4010 flags = NVREG_WAKEUPFLAGS_ENABLE;
4011 }
4012 if (netif_running(dev)) {
4013 spin_lock_irq(&np->lock);
4014 writel(flags, base + NvRegWakeUpFlags);
4015 spin_unlock_irq(&np->lock);
4016 }
4017 device_set_wakeup_enable(&np->pci_dev->dev, np->wolenabled);
4018 return 0;
4019 }
4020
4021 static int nv_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
4022 {
4023 struct fe_priv *np = netdev_priv(dev);
4024 u32 speed;
4025 int adv;
4026
4027 spin_lock_irq(&np->lock);
4028 ecmd->port = PORT_MII;
4029 if (!netif_running(dev)) {
4030 /* We do not track link speed / duplex setting if the
4031 * interface is disabled. Force a link check */
4032 if (nv_update_linkspeed(dev)) {
4033 if (!netif_carrier_ok(dev))
4034 netif_carrier_on(dev);
4035 } else {
4036 if (netif_carrier_ok(dev))
4037 netif_carrier_off(dev);
4038 }
4039 }
4040
4041 if (netif_carrier_ok(dev)) {
4042 switch (np->linkspeed & (NVREG_LINKSPEED_MASK)) {
4043 case NVREG_LINKSPEED_10:
4044 speed = SPEED_10;
4045 break;
4046 case NVREG_LINKSPEED_100:
4047 speed = SPEED_100;
4048 break;
4049 case NVREG_LINKSPEED_1000:
4050 speed = SPEED_1000;
4051 break;
4052 default:
4053 speed = -1;
4054 break;
4055 }
4056 ecmd->duplex = DUPLEX_HALF;
4057 if (np->duplex)
4058 ecmd->duplex = DUPLEX_FULL;
4059 } else {
4060 speed = -1;
4061 ecmd->duplex = -1;
4062 }
4063 ethtool_cmd_speed_set(ecmd, speed);
4064 ecmd->autoneg = np->autoneg;
4065
4066 ecmd->advertising = ADVERTISED_MII;
4067 if (np->autoneg) {
4068 ecmd->advertising |= ADVERTISED_Autoneg;
4069 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
4070 if (adv & ADVERTISE_10HALF)
4071 ecmd->advertising |= ADVERTISED_10baseT_Half;
4072 if (adv & ADVERTISE_10FULL)
4073 ecmd->advertising |= ADVERTISED_10baseT_Full;
4074 if (adv & ADVERTISE_100HALF)
4075 ecmd->advertising |= ADVERTISED_100baseT_Half;
4076 if (adv & ADVERTISE_100FULL)
4077 ecmd->advertising |= ADVERTISED_100baseT_Full;
4078 if (np->gigabit == PHY_GIGABIT) {
4079 adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
4080 if (adv & ADVERTISE_1000FULL)
4081 ecmd->advertising |= ADVERTISED_1000baseT_Full;
4082 }
4083 }
4084 ecmd->supported = (SUPPORTED_Autoneg |
4085 SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
4086 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
4087 SUPPORTED_MII);
4088 if (np->gigabit == PHY_GIGABIT)
4089 ecmd->supported |= SUPPORTED_1000baseT_Full;
4090
4091 ecmd->phy_address = np->phyaddr;
4092 ecmd->transceiver = XCVR_EXTERNAL;
4093
4094 /* ignore maxtxpkt, maxrxpkt for now */
4095 spin_unlock_irq(&np->lock);
4096 return 0;
4097 }
4098
4099 static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
4100 {
4101 struct fe_priv *np = netdev_priv(dev);
4102 u32 speed = ethtool_cmd_speed(ecmd);
4103
4104 if (ecmd->port != PORT_MII)
4105 return -EINVAL;
4106 if (ecmd->transceiver != XCVR_EXTERNAL)
4107 return -EINVAL;
4108 if (ecmd->phy_address != np->phyaddr) {
4109 /* TODO: support switching between multiple phys. Should be
4110 * trivial, but not enabled due to lack of test hardware. */
4111 return -EINVAL;
4112 }
4113 if (ecmd->autoneg == AUTONEG_ENABLE) {
4114 u32 mask;
4115
4116 mask = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
4117 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full;
4118 if (np->gigabit == PHY_GIGABIT)
4119 mask |= ADVERTISED_1000baseT_Full;
4120
4121 if ((ecmd->advertising & mask) == 0)
4122 return -EINVAL;
4123
4124 } else if (ecmd->autoneg == AUTONEG_DISABLE) {
4125 /* Note: autonegotiation disable, speed 1000 intentionally
4126 * forbidden - no one should need that. */
4127
4128 if (speed != SPEED_10 && speed != SPEED_100)
4129 return -EINVAL;
4130 if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL)
4131 return -EINVAL;
4132 } else {
4133 return -EINVAL;
4134 }
4135
4136 netif_carrier_off(dev);
4137 if (netif_running(dev)) {
4138 unsigned long flags;
4139
4140 nv_disable_irq(dev);
4141 netif_tx_lock_bh(dev);
4142 netif_addr_lock(dev);
4143 /* with plain spinlock lockdep complains */
4144 spin_lock_irqsave(&np->lock, flags);
4145 /* stop engines */
4146 /* FIXME:
4147 * this can take some time, and interrupts are disabled
4148 * due to spin_lock_irqsave, but let's hope no daemon
4149 * is going to change the settings very often...
4150 * Worst case:
4151 * NV_RXSTOP_DELAY1MAX + NV_TXSTOP_DELAY1MAX
4152 * + some minor delays, which is up to a second approximately
4153 */
4154 nv_stop_rxtx(dev);
4155 spin_unlock_irqrestore(&np->lock, flags);
4156 netif_addr_unlock(dev);
4157 netif_tx_unlock_bh(dev);
4158 }
4159
4160 if (ecmd->autoneg == AUTONEG_ENABLE) {
4161 int adv, bmcr;
4162
4163 np->autoneg = 1;
4164
4165 /* advertise only what has been requested */
4166 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
4167 adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
4168 if (ecmd->advertising & ADVERTISED_10baseT_Half)
4169 adv |= ADVERTISE_10HALF;
4170 if (ecmd->advertising & ADVERTISED_10baseT_Full)
4171 adv |= ADVERTISE_10FULL;
4172 if (ecmd->advertising & ADVERTISED_100baseT_Half)
4173 adv |= ADVERTISE_100HALF;
4174 if (ecmd->advertising & ADVERTISED_100baseT_Full)
4175 adv |= ADVERTISE_100FULL;
4176 if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisements but disable tx pause */
4177 adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4178 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
4179 adv |= ADVERTISE_PAUSE_ASYM;
4180 mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
4181
4182 if (np->gigabit == PHY_GIGABIT) {
4183 adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
4184 adv &= ~ADVERTISE_1000FULL;
4185 if (ecmd->advertising & ADVERTISED_1000baseT_Full)
4186 adv |= ADVERTISE_1000FULL;
4187 mii_rw(dev, np->phyaddr, MII_CTRL1000, adv);
4188 }
4189
4190 if (netif_running(dev))
4191 netdev_info(dev, "link down\n");
4192 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
4193 if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
4194 bmcr |= BMCR_ANENABLE;
4195 /* reset the phy in order for settings to stick,
4196 * and cause autoneg to start */
4197 if (phy_reset(dev, bmcr)) {
4198 netdev_info(dev, "phy reset failed\n");
4199 return -EINVAL;
4200 }
4201 } else {
4202 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
4203 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
4204 }
4205 } else {
4206 int adv, bmcr;
4207
4208 np->autoneg = 0;
4209
4210 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
4211 adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
4212 if (speed == SPEED_10 && ecmd->duplex == DUPLEX_HALF)
4213 adv |= ADVERTISE_10HALF;
4214 if (speed == SPEED_10 && ecmd->duplex == DUPLEX_FULL)
4215 adv |= ADVERTISE_10FULL;
4216 if (speed == SPEED_100 && ecmd->duplex == DUPLEX_HALF)
4217 adv |= ADVERTISE_100HALF;
4218 if (speed == SPEED_100 && ecmd->duplex == DUPLEX_FULL)
4219 adv |= ADVERTISE_100FULL;
4220 np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE);
4221 if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) {/* for rx we set both advertisements but disable tx pause */
4222 adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4223 np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
4224 }
4225 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) {
4226 adv |= ADVERTISE_PAUSE_ASYM;
4227 np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
4228 }
4229 mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
4230 np->fixed_mode = adv;
4231
4232 if (np->gigabit == PHY_GIGABIT) {
4233 adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
4234 adv &= ~ADVERTISE_1000FULL;
4235 mii_rw(dev, np->phyaddr, MII_CTRL1000, adv);
4236 }
4237
4238 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
4239 bmcr &= ~(BMCR_ANENABLE|BMCR_SPEED100|BMCR_SPEED1000|BMCR_FULLDPLX);
4240 if (np->fixed_mode & (ADVERTISE_10FULL|ADVERTISE_100FULL))
4241 bmcr |= BMCR_FULLDPLX;
4242 if (np->fixed_mode & (ADVERTISE_100HALF|ADVERTISE_100FULL))
4243 bmcr |= BMCR_SPEED100;
4244 if (np->phy_oui == PHY_OUI_MARVELL) {
4245 /* reset the phy in order for forced mode settings to stick */
4246 if (phy_reset(dev, bmcr)) {
4247 netdev_info(dev, "phy reset failed\n");
4248 return -EINVAL;
4249 }
4250 } else {
4251 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
4252 if (netif_running(dev)) {
4253 /* Wait a bit and then reconfigure the nic. */
4254 udelay(10);
4255 nv_linkchange(dev);
4256 }
4257 }
4258 }
4259
4260 if (netif_running(dev)) {
4261 nv_start_rxtx(dev);
4262 nv_enable_irq(dev);
4263 }
4264
4265 return 0;
4266 }
4267
4268 #define FORCEDETH_REGS_VER 1
4269
4270 static int nv_get_regs_len(struct net_device *dev)
4271 {
4272 struct fe_priv *np = netdev_priv(dev);
4273 return np->register_size;
4274 }
4275
4276 static void nv_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf)
4277 {
4278 struct fe_priv *np = netdev_priv(dev);
4279 u8 __iomem *base = get_hwbase(dev);
4280 u32 *rbuf = buf;
4281 int i;
4282
4283 regs->version = FORCEDETH_REGS_VER;
4284 spin_lock_irq(&np->lock);
4285 for (i = 0; i <= np->register_size/sizeof(u32); i++)
4286 rbuf[i] = readl(base + i*sizeof(u32));
4287 spin_unlock_irq(&np->lock);
4288 }
4289
4290 static int nv_nway_reset(struct net_device *dev)
4291 {
4292 struct fe_priv *np = netdev_priv(dev);
4293 int ret;
4294
4295 if (np->autoneg) {
4296 int bmcr;
4297
4298 netif_carrier_off(dev);
4299 if (netif_running(dev)) {
4300 nv_disable_irq(dev);
4301 netif_tx_lock_bh(dev);
4302 netif_addr_lock(dev);
4303 spin_lock(&np->lock);
4304 /* stop engines */
4305 nv_stop_rxtx(dev);
4306 spin_unlock(&np->lock);
4307 netif_addr_unlock(dev);
4308 netif_tx_unlock_bh(dev);
4309 netdev_info(dev, "link down\n");
4310 }
4311
4312 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
4313 if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
4314 bmcr |= BMCR_ANENABLE;
4315 /* reset the phy in order for settings to stick*/
4316 if (phy_reset(dev, bmcr)) {
4317 netdev_info(dev, "phy reset failed\n");
4318 return -EINVAL;
4319 }
4320 } else {
4321 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
4322 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
4323 }
4324
4325 if (netif_running(dev)) {
4326 nv_start_rxtx(dev);
4327 nv_enable_irq(dev);
4328 }
4329 ret = 0;
4330 } else {
4331 ret = -EINVAL;
4332 }
4333
4334 return ret;
4335 }
4336
4337 static void nv_get_ringparam(struct net_device *dev, struct ethtool_ringparam* ring)
4338 {
4339 struct fe_priv *np = netdev_priv(dev);
4340
4341 ring->rx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3;
4342 ring->tx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3;
4343
4344 ring->rx_pending = np->rx_ring_size;
4345 ring->tx_pending = np->tx_ring_size;
4346 }
4347
4348 static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ring)
4349 {
4350 struct fe_priv *np = netdev_priv(dev);
4351 u8 __iomem *base = get_hwbase(dev);
4352 u8 *rxtx_ring, *rx_skbuff, *tx_skbuff;
4353 dma_addr_t ring_addr;
4354
4355 if (ring->rx_pending < RX_RING_MIN ||
4356 ring->tx_pending < TX_RING_MIN ||
4357 ring->rx_mini_pending != 0 ||
4358 ring->rx_jumbo_pending != 0 ||
4359 (np->desc_ver == DESC_VER_1 &&
4360 (ring->rx_pending > RING_MAX_DESC_VER_1 ||
4361 ring->tx_pending > RING_MAX_DESC_VER_1)) ||
4362 (np->desc_ver != DESC_VER_1 &&
4363 (ring->rx_pending > RING_MAX_DESC_VER_2_3 ||
4364 ring->tx_pending > RING_MAX_DESC_VER_2_3))) {
4365 return -EINVAL;
4366 }
4367
4368 /* allocate new rings */
4369 if (!nv_optimized(np)) {
4370 rxtx_ring = pci_alloc_consistent(np->pci_dev,
4371 sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending),
4372 &ring_addr);
4373 } else {
4374 rxtx_ring = pci_alloc_consistent(np->pci_dev,
4375 sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending),
4376 &ring_addr);
4377 }
4378 rx_skbuff = kmalloc(sizeof(struct nv_skb_map) * ring->rx_pending, GFP_KERNEL);
4379 tx_skbuff = kmalloc(sizeof(struct nv_skb_map) * ring->tx_pending, GFP_KERNEL);
4380 if (!rxtx_ring || !rx_skbuff || !tx_skbuff) {
4381 /* fall back to old rings */
4382 if (!nv_optimized(np)) {
4383 if (rxtx_ring)
4384 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending),
4385 rxtx_ring, ring_addr);
4386 } else {
4387 if (rxtx_ring)
4388 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending),
4389 rxtx_ring, ring_addr);
4390 }
4391
4392 kfree(rx_skbuff);
4393 kfree(tx_skbuff);
4394 goto exit;
4395 }
4396
4397 if (netif_running(dev)) {
4398 nv_disable_irq(dev);
4399 nv_napi_disable(dev);
4400 netif_tx_lock_bh(dev);
4401 netif_addr_lock(dev);
4402 spin_lock(&np->lock);
4403 /* stop engines */
4404 nv_stop_rxtx(dev);
4405 nv_txrx_reset(dev);
4406 /* drain queues */
4407 nv_drain_rxtx(dev);
4408 /* delete queues */
4409 free_rings(dev);
4410 }
4411
4412 /* set new values */
4413 np->rx_ring_size = ring->rx_pending;
4414 np->tx_ring_size = ring->tx_pending;
4415
4416 if (!nv_optimized(np)) {
4417 np->rx_ring.orig = (struct ring_desc *)rxtx_ring;
4418 np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size];
4419 } else {
4420 np->rx_ring.ex = (struct ring_desc_ex *)rxtx_ring;
4421 np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size];
4422 }
4423 np->rx_skb = (struct nv_skb_map *)rx_skbuff;
4424 np->tx_skb = (struct nv_skb_map *)tx_skbuff;
4425 np->ring_addr = ring_addr;
4426
4427 memset(np->rx_skb, 0, sizeof(struct nv_skb_map) * np->rx_ring_size);
4428 memset(np->tx_skb, 0, sizeof(struct nv_skb_map) * np->tx_ring_size);
4429
4430 if (netif_running(dev)) {
4431 /* reinit driver view of the queues */
4432 set_bufsize(dev);
4433 if (nv_init_ring(dev)) {
4434 if (!np->in_shutdown)
4435 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
4436 }
4437
4438 /* reinit nic view of the queues */
4439 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
4440 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
4441 writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
4442 base + NvRegRingSizes);
4443 pci_push(base);
4444 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
4445 pci_push(base);
4446
4447 /* restart engines */
4448 nv_start_rxtx(dev);
4449 spin_unlock(&np->lock);
4450 netif_addr_unlock(dev);
4451 netif_tx_unlock_bh(dev);
4452 nv_napi_enable(dev);
4453 nv_enable_irq(dev);
4454 }
4455 return 0;
4456 exit:
4457 return -ENOMEM;
4458 }
4459
4460 static void nv_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam* pause)
4461 {
4462 struct fe_priv *np = netdev_priv(dev);
4463
4464 pause->autoneg = (np->pause_flags & NV_PAUSEFRAME_AUTONEG) != 0;
4465 pause->rx_pause = (np->pause_flags & NV_PAUSEFRAME_RX_ENABLE) != 0;
4466 pause->tx_pause = (np->pause_flags & NV_PAUSEFRAME_TX_ENABLE) != 0;
4467 }
4468
4469 static int nv_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam* pause)
4470 {
4471 struct fe_priv *np = netdev_priv(dev);
4472 int adv, bmcr;
4473
4474 if ((!np->autoneg && np->duplex == 0) ||
4475 (np->autoneg && !pause->autoneg && np->duplex == 0)) {
4476 netdev_info(dev, "can not set pause settings when forced link is in half duplex\n");
4477 return -EINVAL;
4478 }
4479 if (pause->tx_pause && !(np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE)) {
4480 netdev_info(dev, "hardware does not support tx pause frames\n");
4481 return -EINVAL;
4482 }
4483
4484 netif_carrier_off(dev);
4485 if (netif_running(dev)) {
4486 nv_disable_irq(dev);
4487 netif_tx_lock_bh(dev);
4488 netif_addr_lock(dev);
4489 spin_lock(&np->lock);
4490 /* stop engines */
4491 nv_stop_rxtx(dev);
4492 spin_unlock(&np->lock);
4493 netif_addr_unlock(dev);
4494 netif_tx_unlock_bh(dev);
4495 }
4496
4497 np->pause_flags &= ~(NV_PAUSEFRAME_RX_REQ|NV_PAUSEFRAME_TX_REQ);
4498 if (pause->rx_pause)
4499 np->pause_flags |= NV_PAUSEFRAME_RX_REQ;
4500 if (pause->tx_pause)
4501 np->pause_flags |= NV_PAUSEFRAME_TX_REQ;
4502
4503 if (np->autoneg && pause->autoneg) {
4504 np->pause_flags |= NV_PAUSEFRAME_AUTONEG;
4505
4506 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
4507 adv &= ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
4508 if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisements but disable tx pause */
4509 adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4510 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
4511 adv |= ADVERTISE_PAUSE_ASYM;
4512 mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
4513
4514 if (netif_running(dev))
4515 netdev_info(dev, "link down\n");
4516 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
4517 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
4518 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
4519 } else {
4520 np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE);
4521 if (pause->rx_pause)
4522 np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
4523 if (pause->tx_pause)
4524 np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
4525
4526 if (!netif_running(dev))
4527 nv_update_linkspeed(dev);
4528 else
4529 nv_update_pause(dev, np->pause_flags);
4530 }
4531
4532 if (netif_running(dev)) {
4533 nv_start_rxtx(dev);
4534 nv_enable_irq(dev);
4535 }
4536 return 0;
4537 }
4538
4539 static int nv_set_loopback(struct net_device *dev, netdev_features_t features)
4540 {
4541 struct fe_priv *np = netdev_priv(dev);
4542 unsigned long flags;
4543 u32 miicontrol;
4544 int err, retval = 0;
4545
4546 spin_lock_irqsave(&np->lock, flags);
4547 miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
4548 if (features & NETIF_F_LOOPBACK) {
4549 if (miicontrol & BMCR_LOOPBACK) {
4550 spin_unlock_irqrestore(&np->lock, flags);
4551 netdev_info(dev, "Loopback already enabled\n");
4552 return 0;
4553 }
4554 nv_disable_irq(dev);
4555 /* Turn on loopback mode */
4556 miicontrol |= BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
4557 err = mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol);
4558 if (err) {
4559 retval = PHY_ERROR;
4560 spin_unlock_irqrestore(&np->lock, flags);
4561 phy_init(dev);
4562 } else {
4563 if (netif_running(dev)) {
4564 /* Force 1000 Mbps full-duplex */
4565 nv_force_linkspeed(dev, NVREG_LINKSPEED_1000,
4566 1);
4567 /* Force link up */
4568 netif_carrier_on(dev);
4569 }
4570 spin_unlock_irqrestore(&np->lock, flags);
4571 netdev_info(dev,
4572 "Internal PHY loopback mode enabled.\n");
4573 }
4574 } else {
4575 if (!(miicontrol & BMCR_LOOPBACK)) {
4576 spin_unlock_irqrestore(&np->lock, flags);
4577 netdev_info(dev, "Loopback already disabled\n");
4578 return 0;
4579 }
4580 nv_disable_irq(dev);
4581 /* Turn off loopback */
4582 spin_unlock_irqrestore(&np->lock, flags);
4583 netdev_info(dev, "Internal PHY loopback mode disabled.\n");
4584 phy_init(dev);
4585 }
4586 msleep(500);
4587 spin_lock_irqsave(&np->lock, flags);
4588 nv_enable_irq(dev);
4589 spin_unlock_irqrestore(&np->lock, flags);
4590
4591 return retval;
4592 }
4593
4594 static netdev_features_t nv_fix_features(struct net_device *dev,
4595 netdev_features_t features)
4596 {
4597 /* vlan is dependent on rx checksum offload */
4598 if (features & (NETIF_F_HW_VLAN_TX|NETIF_F_HW_VLAN_RX))
4599 features |= NETIF_F_RXCSUM;
4600
4601 return features;
4602 }
4603
4604 static void nv_vlan_mode(struct net_device *dev, netdev_features_t features)
4605 {
4606 struct fe_priv *np = get_nvpriv(dev);
4607
4608 spin_lock_irq(&np->lock);
4609
4610 if (features & NETIF_F_HW_VLAN_RX)
4611 np->txrxctl_bits |= NVREG_TXRXCTL_VLANSTRIP;
4612 else
4613 np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANSTRIP;
4614
4615 if (features & NETIF_F_HW_VLAN_TX)
4616 np->txrxctl_bits |= NVREG_TXRXCTL_VLANINS;
4617 else
4618 np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANINS;
4619
4620 writel(np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
4621
4622 spin_unlock_irq(&np->lock);
4623 }
4624
4625 static int nv_set_features(struct net_device *dev, netdev_features_t features)
4626 {
4627 struct fe_priv *np = netdev_priv(dev);
4628 u8 __iomem *base = get_hwbase(dev);
4629 netdev_features_t changed = dev->features ^ features;
4630 int retval;
4631
4632 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev)) {
4633 retval = nv_set_loopback(dev, features);
4634 if (retval != 0)
4635 return retval;
4636 }
4637
4638 if (changed & NETIF_F_RXCSUM) {
4639 spin_lock_irq(&np->lock);
4640
4641 if (features & NETIF_F_RXCSUM)
4642 np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
4643 else
4644 np->txrxctl_bits &= ~NVREG_TXRXCTL_RXCHECK;
4645
4646 if (netif_running(dev))
4647 writel(np->txrxctl_bits, base + NvRegTxRxControl);
4648
4649 spin_unlock_irq(&np->lock);
4650 }
4651
4652 if (changed & (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX))
4653 nv_vlan_mode(dev, features);
4654
4655 return 0;
4656 }
4657
4658 static int nv_get_sset_count(struct net_device *dev, int sset)
4659 {
4660 struct fe_priv *np = netdev_priv(dev);
4661
4662 switch (sset) {
4663 case ETH_SS_TEST:
4664 if (np->driver_data & DEV_HAS_TEST_EXTENDED)
4665 return NV_TEST_COUNT_EXTENDED;
4666 else
4667 return NV_TEST_COUNT_BASE;
4668 case ETH_SS_STATS:
4669 if (np->driver_data & DEV_HAS_STATISTICS_V3)
4670 return NV_DEV_STATISTICS_V3_COUNT;
4671 else if (np->driver_data & DEV_HAS_STATISTICS_V2)
4672 return NV_DEV_STATISTICS_V2_COUNT;
4673 else if (np->driver_data & DEV_HAS_STATISTICS_V1)
4674 return NV_DEV_STATISTICS_V1_COUNT;
4675 else
4676 return 0;
4677 default:
4678 return -EOPNOTSUPP;
4679 }
4680 }
4681
4682 static void nv_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *estats, u64 *buffer)
4683 {
4684 struct fe_priv *np = netdev_priv(dev);
4685
4686 /* update stats */
4687 nv_get_hw_stats(dev);
4688
4689 memcpy(buffer, &np->estats, nv_get_sset_count(dev, ETH_SS_STATS)*sizeof(u64));
4690 }
4691
4692 static int nv_link_test(struct net_device *dev)
4693 {
4694 struct fe_priv *np = netdev_priv(dev);
4695 int mii_status;
4696
4697 mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
4698 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
4699
4700 /* check phy link status */
4701 if (!(mii_status & BMSR_LSTATUS))
4702 return 0;
4703 else
4704 return 1;
4705 }
4706
4707 static int nv_register_test(struct net_device *dev)
4708 {
4709 u8 __iomem *base = get_hwbase(dev);
4710 int i = 0;
4711 u32 orig_read, new_read;
4712
4713 do {
4714 orig_read = readl(base + nv_registers_test[i].reg);
4715
4716 /* xor with mask to toggle bits */
4717 orig_read ^= nv_registers_test[i].mask;
4718
4719 writel(orig_read, base + nv_registers_test[i].reg);
4720
4721 new_read = readl(base + nv_registers_test[i].reg);
4722
4723 if ((new_read & nv_registers_test[i].mask) != (orig_read & nv_registers_test[i].mask))
4724 return 0;
4725
4726 /* restore original value */
4727 orig_read ^= nv_registers_test[i].mask;
4728 writel(orig_read, base + nv_registers_test[i].reg);
4729
4730 } while (nv_registers_test[++i].reg != 0);
4731
4732 return 1;
4733 }
4734
4735 static int nv_interrupt_test(struct net_device *dev)
4736 {
4737 struct fe_priv *np = netdev_priv(dev);
4738 u8 __iomem *base = get_hwbase(dev);
4739 int ret = 1;
4740 int testcnt;
4741 u32 save_msi_flags, save_poll_interval = 0;
4742
4743 if (netif_running(dev)) {
4744 /* free current irq */
4745 nv_free_irq(dev);
4746 save_poll_interval = readl(base+NvRegPollingInterval);
4747 }
4748
4749 /* flag to test interrupt handler */
4750 np->intr_test = 0;
4751
4752 /* setup test irq */
4753 save_msi_flags = np->msi_flags;
4754 np->msi_flags &= ~NV_MSI_X_VECTORS_MASK;
4755 np->msi_flags |= 0x001; /* setup 1 vector */
4756 if (nv_request_irq(dev, 1))
4757 return 0;
4758
4759 /* setup timer interrupt */
4760 writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval);
4761 writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
4762
4763 nv_enable_hw_interrupts(dev, NVREG_IRQ_TIMER);
4764
4765 /* wait for at least one interrupt */
4766 msleep(100);
4767
4768 spin_lock_irq(&np->lock);
4769
4770 /* flag should be set within ISR */
4771 testcnt = np->intr_test;
4772 if (!testcnt)
4773 ret = 2;
4774
4775 nv_disable_hw_interrupts(dev, NVREG_IRQ_TIMER);
4776 if (!(np->msi_flags & NV_MSI_X_ENABLED))
4777 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
4778 else
4779 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
4780
4781 spin_unlock_irq(&np->lock);
4782
4783 nv_free_irq(dev);
4784
4785 np->msi_flags = save_msi_flags;
4786
4787 if (netif_running(dev)) {
4788 writel(save_poll_interval, base + NvRegPollingInterval);
4789 writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
4790 /* restore original irq */
4791 if (nv_request_irq(dev, 0))
4792 return 0;
4793 }
4794
4795 return ret;
4796 }
4797
4798 static int nv_loopback_test(struct net_device *dev)
4799 {
4800 struct fe_priv *np = netdev_priv(dev);
4801 u8 __iomem *base = get_hwbase(dev);
4802 struct sk_buff *tx_skb, *rx_skb;
4803 dma_addr_t test_dma_addr;
4804 u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET);
4805 u32 flags;
4806 int len, i, pkt_len;
4807 u8 *pkt_data;
4808 u32 filter_flags = 0;
4809 u32 misc1_flags = 0;
4810 int ret = 1;
4811
4812 if (netif_running(dev)) {
4813 nv_disable_irq(dev);
4814 filter_flags = readl(base + NvRegPacketFilterFlags);
4815 misc1_flags = readl(base + NvRegMisc1);
4816 } else {
4817 nv_txrx_reset(dev);
4818 }
4819
4820 /* reinit driver view of the rx queue */
4821 set_bufsize(dev);
4822 nv_init_ring(dev);
4823
4824 /* setup hardware for loopback */
4825 writel(NVREG_MISC1_FORCE, base + NvRegMisc1);
4826 writel(NVREG_PFF_ALWAYS | NVREG_PFF_LOOPBACK, base + NvRegPacketFilterFlags);
4827
4828 /* reinit nic view of the rx queue */
4829 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
4830 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
4831 writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
4832 base + NvRegRingSizes);
4833 pci_push(base);
4834
4835 /* restart rx engine */
4836 nv_start_rxtx(dev);
4837
4838 /* setup packet for tx */
4839 pkt_len = ETH_DATA_LEN;
4840 tx_skb = dev_alloc_skb(pkt_len);
4841 if (!tx_skb) {
4842 netdev_err(dev, "dev_alloc_skb() failed during loopback test\n");
4843 ret = 0;
4844 goto out;
4845 }
4846 test_dma_addr = pci_map_single(np->pci_dev, tx_skb->data,
4847 skb_tailroom(tx_skb),
4848 PCI_DMA_FROMDEVICE);
4849 pkt_data = skb_put(tx_skb, pkt_len);
4850 for (i = 0; i < pkt_len; i++)
4851 pkt_data[i] = (u8)(i & 0xff);
4852
4853 if (!nv_optimized(np)) {
4854 np->tx_ring.orig[0].buf = cpu_to_le32(test_dma_addr);
4855 np->tx_ring.orig[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra);
4856 } else {
4857 np->tx_ring.ex[0].bufhigh = cpu_to_le32(dma_high(test_dma_addr));
4858 np->tx_ring.ex[0].buflow = cpu_to_le32(dma_low(test_dma_addr));
4859 np->tx_ring.ex[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra);
4860 }
4861 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
4862 pci_push(get_hwbase(dev));
4863
4864 msleep(500);
4865
4866 /* check for rx of the packet */
4867 if (!nv_optimized(np)) {
4868 flags = le32_to_cpu(np->rx_ring.orig[0].flaglen);
4869 len = nv_descr_getlength(&np->rx_ring.orig[0], np->desc_ver);
4870
4871 } else {
4872 flags = le32_to_cpu(np->rx_ring.ex[0].flaglen);
4873 len = nv_descr_getlength_ex(&np->rx_ring.ex[0], np->desc_ver);
4874 }
4875
4876 if (flags & NV_RX_AVAIL) {
4877 ret = 0;
4878 } else if (np->desc_ver == DESC_VER_1) {
4879 if (flags & NV_RX_ERROR)
4880 ret = 0;
4881 } else {
4882 if (flags & NV_RX2_ERROR)
4883 ret = 0;
4884 }
4885
4886 if (ret) {
4887 if (len != pkt_len) {
4888 ret = 0;
4889 } else {
4890 rx_skb = np->rx_skb[0].skb;
4891 for (i = 0; i < pkt_len; i++) {
4892 if (rx_skb->data[i] != (u8)(i & 0xff)) {
4893 ret = 0;
4894 break;
4895 }
4896 }
4897 }
4898 }
4899
4900 pci_unmap_single(np->pci_dev, test_dma_addr,
4901 (skb_end_pointer(tx_skb) - tx_skb->data),
4902 PCI_DMA_TODEVICE);
4903 dev_kfree_skb_any(tx_skb);
4904 out:
4905 /* stop engines */
4906 nv_stop_rxtx(dev);
4907 nv_txrx_reset(dev);
4908 /* drain rx queue */
4909 nv_drain_rxtx(dev);
4910
4911 if (netif_running(dev)) {
4912 writel(misc1_flags, base + NvRegMisc1);
4913 writel(filter_flags, base + NvRegPacketFilterFlags);
4914 nv_enable_irq(dev);
4915 }
4916
4917 return ret;
4918 }
4919
4920 static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64 *buffer)
4921 {
4922 struct fe_priv *np = netdev_priv(dev);
4923 u8 __iomem *base = get_hwbase(dev);
4924 int result;
4925 memset(buffer, 0, nv_get_sset_count(dev, ETH_SS_TEST)*sizeof(u64));
4926
4927 if (!nv_link_test(dev)) {
4928 test->flags |= ETH_TEST_FL_FAILED;
4929 buffer[0] = 1;
4930 }
4931
4932 if (test->flags & ETH_TEST_FL_OFFLINE) {
4933 if (netif_running(dev)) {
4934 netif_stop_queue(dev);
4935 nv_napi_disable(dev);
4936 netif_tx_lock_bh(dev);
4937 netif_addr_lock(dev);
4938 spin_lock_irq(&np->lock);
4939 nv_disable_hw_interrupts(dev, np->irqmask);
4940 if (!(np->msi_flags & NV_MSI_X_ENABLED))
4941 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
4942 else
4943 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
4944 /* stop engines */
4945 nv_stop_rxtx(dev);
4946 nv_txrx_reset(dev);
4947 /* drain rx queue */
4948 nv_drain_rxtx(dev);
4949 spin_unlock_irq(&np->lock);
4950 netif_addr_unlock(dev);
4951 netif_tx_unlock_bh(dev);
4952 }
4953
4954 if (!nv_register_test(dev)) {
4955 test->flags |= ETH_TEST_FL_FAILED;
4956 buffer[1] = 1;
4957 }
4958
4959 result = nv_interrupt_test(dev);
4960 if (result != 1) {
4961 test->flags |= ETH_TEST_FL_FAILED;
4962 buffer[2] = 1;
4963 }
4964 if (result == 0) {
4965 /* bail out */
4966 return;
4967 }
4968
4969 if (!nv_loopback_test(dev)) {
4970 test->flags |= ETH_TEST_FL_FAILED;
4971 buffer[3] = 1;
4972 }
4973
4974 if (netif_running(dev)) {
4975 /* reinit driver view of the rx queue */
4976 set_bufsize(dev);
4977 if (nv_init_ring(dev)) {
4978 if (!np->in_shutdown)
4979 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
4980 }
4981 /* reinit nic view of the rx queue */
4982 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
4983 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
4984 writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
4985 base + NvRegRingSizes);
4986 pci_push(base);
4987 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
4988 pci_push(base);
4989 /* restart rx engine */
4990 nv_start_rxtx(dev);
4991 netif_start_queue(dev);
4992 nv_napi_enable(dev);
4993 nv_enable_hw_interrupts(dev, np->irqmask);
4994 }
4995 }
4996 }
4997
4998 static void nv_get_strings(struct net_device *dev, u32 stringset, u8 *buffer)
4999 {
5000 switch (stringset) {
5001 case ETH_SS_STATS:
5002 memcpy(buffer, &nv_estats_str, nv_get_sset_count(dev, ETH_SS_STATS)*sizeof(struct nv_ethtool_str));
5003 break;
5004 case ETH_SS_TEST:
5005 memcpy(buffer, &nv_etests_str, nv_get_sset_count(dev, ETH_SS_TEST)*sizeof(struct nv_ethtool_str));
5006 break;
5007 }
5008 }
5009
5010 static const struct ethtool_ops ops = {
5011 .get_drvinfo = nv_get_drvinfo,
5012 .get_link = ethtool_op_get_link,
5013 .get_wol = nv_get_wol,
5014 .set_wol = nv_set_wol,
5015 .get_settings = nv_get_settings,
5016 .set_settings = nv_set_settings,
5017 .get_regs_len = nv_get_regs_len,
5018 .get_regs = nv_get_regs,
5019 .nway_reset = nv_nway_reset,
5020 .get_ringparam = nv_get_ringparam,
5021 .set_ringparam = nv_set_ringparam,
5022 .get_pauseparam = nv_get_pauseparam,
5023 .set_pauseparam = nv_set_pauseparam,
5024 .get_strings = nv_get_strings,
5025 .get_ethtool_stats = nv_get_ethtool_stats,
5026 .get_sset_count = nv_get_sset_count,
5027 .self_test = nv_self_test,
5028 };
5029
5030 /* The mgmt unit and driver use a semaphore to access the phy during init */
5031 static int nv_mgmt_acquire_sema(struct net_device *dev)
5032 {
5033 struct fe_priv *np = netdev_priv(dev);
5034 u8 __iomem *base = get_hwbase(dev);
5035 int i;
5036 u32 tx_ctrl, mgmt_sema;
5037
5038 for (i = 0; i < 10; i++) {
5039 mgmt_sema = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_SEMA_MASK;
5040 if (mgmt_sema == NVREG_XMITCTL_MGMT_SEMA_FREE)
5041 break;
5042 msleep(500);
5043 }
5044
5045 if (mgmt_sema != NVREG_XMITCTL_MGMT_SEMA_FREE)
5046 return 0;
5047
5048 for (i = 0; i < 2; i++) {
5049 tx_ctrl = readl(base + NvRegTransmitterControl);
5050 tx_ctrl |= NVREG_XMITCTL_HOST_SEMA_ACQ;
5051 writel(tx_ctrl, base + NvRegTransmitterControl);
5052
5053 /* verify that semaphore was acquired */
5054 tx_ctrl = readl(base + NvRegTransmitterControl);
5055 if (((tx_ctrl & NVREG_XMITCTL_HOST_SEMA_MASK) == NVREG_XMITCTL_HOST_SEMA_ACQ) &&
5056 ((tx_ctrl & NVREG_XMITCTL_MGMT_SEMA_MASK) == NVREG_XMITCTL_MGMT_SEMA_FREE)) {
5057 np->mgmt_sema = 1;
5058 return 1;
5059 } else
5060 udelay(50);
5061 }
5062
5063 return 0;
5064 }
5065
5066 static void nv_mgmt_release_sema(struct net_device *dev)
5067 {
5068 struct fe_priv *np = netdev_priv(dev);
5069 u8 __iomem *base = get_hwbase(dev);
5070 u32 tx_ctrl;
5071
5072 if (np->driver_data & DEV_HAS_MGMT_UNIT) {
5073 if (np->mgmt_sema) {
5074 tx_ctrl = readl(base + NvRegTransmitterControl);
5075 tx_ctrl &= ~NVREG_XMITCTL_HOST_SEMA_ACQ;
5076 writel(tx_ctrl, base + NvRegTransmitterControl);
5077 }
5078 }
5079 }
5080
5081
5082 static int nv_mgmt_get_version(struct net_device *dev)
5083 {
5084 struct fe_priv *np = netdev_priv(dev);
5085 u8 __iomem *base = get_hwbase(dev);
5086 u32 data_ready = readl(base + NvRegTransmitterControl);
5087 u32 data_ready2 = 0;
5088 unsigned long start;
5089 int ready = 0;
5090
5091 writel(NVREG_MGMTUNITGETVERSION, base + NvRegMgmtUnitGetVersion);
5092 writel(data_ready ^ NVREG_XMITCTL_DATA_START, base + NvRegTransmitterControl);
5093 start = jiffies;
5094 while (time_before(jiffies, start + 5*HZ)) {
5095 data_ready2 = readl(base + NvRegTransmitterControl);
5096 if ((data_ready & NVREG_XMITCTL_DATA_READY) != (data_ready2 & NVREG_XMITCTL_DATA_READY)) {
5097 ready = 1;
5098 break;
5099 }
5100 schedule_timeout_uninterruptible(1);
5101 }
5102
5103 if (!ready || (data_ready2 & NVREG_XMITCTL_DATA_ERROR))
5104 return 0;
5105
5106 np->mgmt_version = readl(base + NvRegMgmtUnitVersion) & NVREG_MGMTUNITVERSION;
5107
5108 return 1;
5109 }
5110
5111 static int nv_open(struct net_device *dev)
5112 {
5113 struct fe_priv *np = netdev_priv(dev);
5114 u8 __iomem *base = get_hwbase(dev);
5115 int ret = 1;
5116 int oom, i;
5117 u32 low;
5118
5119 /* power up phy */
5120 mii_rw(dev, np->phyaddr, MII_BMCR,
5121 mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ) & ~BMCR_PDOWN);
5122
5123 nv_txrx_gate(dev, false);
5124 /* erase previous misconfiguration */
5125 if (np->driver_data & DEV_HAS_POWER_CNTRL)
5126 nv_mac_reset(dev);
5127 writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA);
5128 writel(0, base + NvRegMulticastAddrB);
5129 writel(NVREG_MCASTMASKA_NONE, base + NvRegMulticastMaskA);
5130 writel(NVREG_MCASTMASKB_NONE, base + NvRegMulticastMaskB);
5131 writel(0, base + NvRegPacketFilterFlags);
5132
5133 writel(0, base + NvRegTransmitterControl);
5134 writel(0, base + NvRegReceiverControl);
5135
5136 writel(0, base + NvRegAdapterControl);
5137
5138 if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE)
5139 writel(NVREG_TX_PAUSEFRAME_DISABLE, base + NvRegTxPauseFrame);
5140
5141 /* initialize descriptor rings */
5142 set_bufsize(dev);
5143 oom = nv_init_ring(dev);
5144
5145 writel(0, base + NvRegLinkSpeed);
5146 writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll);
5147 nv_txrx_reset(dev);
5148 writel(0, base + NvRegUnknownSetupReg6);
5149
5150 np->in_shutdown = 0;
5151
5152 /* give hw rings */
5153 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
5154 writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
5155 base + NvRegRingSizes);
5156
5157 writel(np->linkspeed, base + NvRegLinkSpeed);
5158 if (np->desc_ver == DESC_VER_1)
5159 writel(NVREG_TX_WM_DESC1_DEFAULT, base + NvRegTxWatermark);
5160 else
5161 writel(NVREG_TX_WM_DESC2_3_DEFAULT, base + NvRegTxWatermark);
5162 writel(np->txrxctl_bits, base + NvRegTxRxControl);
5163 writel(np->vlanctl_bits, base + NvRegVlanControl);
5164 pci_push(base);
5165 writel(NVREG_TXRXCTL_BIT1|np->txrxctl_bits, base + NvRegTxRxControl);
5166 if (reg_delay(dev, NvRegUnknownSetupReg5,
5167 NVREG_UNKSETUP5_BIT31, NVREG_UNKSETUP5_BIT31,
5168 NV_SETUP5_DELAY, NV_SETUP5_DELAYMAX))
5169 netdev_info(dev,
5170 "%s: SetupReg5, Bit 31 remained off\n", __func__);
5171
5172 writel(0, base + NvRegMIIMask);
5173 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
5174 writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
5175
5176 writel(NVREG_MISC1_FORCE | NVREG_MISC1_HD, base + NvRegMisc1);
5177 writel(readl(base + NvRegTransmitterStatus), base + NvRegTransmitterStatus);
5178 writel(NVREG_PFF_ALWAYS, base + NvRegPacketFilterFlags);
5179 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
5180
5181 writel(readl(base + NvRegReceiverStatus), base + NvRegReceiverStatus);
5182
5183 get_random_bytes(&low, sizeof(low));
5184 low &= NVREG_SLOTTIME_MASK;
5185 if (np->desc_ver == DESC_VER_1) {
5186 writel(low|NVREG_SLOTTIME_DEFAULT, base + NvRegSlotTime);
5187 } else {
5188 if (!(np->driver_data & DEV_HAS_GEAR_MODE)) {
5189 /* setup legacy backoff */
5190 writel(NVREG_SLOTTIME_LEGBF_ENABLED|NVREG_SLOTTIME_10_100_FULL|low, base + NvRegSlotTime);
5191 } else {
5192 writel(NVREG_SLOTTIME_10_100_FULL, base + NvRegSlotTime);
5193 nv_gear_backoff_reseed(dev);
5194 }
5195 }
5196 writel(NVREG_TX_DEFERRAL_DEFAULT, base + NvRegTxDeferral);
5197 writel(NVREG_RX_DEFERRAL_DEFAULT, base + NvRegRxDeferral);
5198 if (poll_interval == -1) {
5199 if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT)
5200 writel(NVREG_POLL_DEFAULT_THROUGHPUT, base + NvRegPollingInterval);
5201 else
5202 writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval);
5203 } else
5204 writel(poll_interval & 0xFFFF, base + NvRegPollingInterval);
5205 writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
5206 writel((np->phyaddr << NVREG_ADAPTCTL_PHYSHIFT)|NVREG_ADAPTCTL_PHYVALID|NVREG_ADAPTCTL_RUNNING,
5207 base + NvRegAdapterControl);
5208 writel(NVREG_MIISPEED_BIT8|NVREG_MIIDELAY, base + NvRegMIISpeed);
5209 writel(NVREG_MII_LINKCHANGE, base + NvRegMIIMask);
5210 if (np->wolenabled)
5211 writel(NVREG_WAKEUPFLAGS_ENABLE , base + NvRegWakeUpFlags);
5212
5213 i = readl(base + NvRegPowerState);
5214 if ((i & NVREG_POWERSTATE_POWEREDUP) == 0)
5215 writel(NVREG_POWERSTATE_POWEREDUP|i, base + NvRegPowerState);
5216
5217 pci_push(base);
5218 udelay(10);
5219 writel(readl(base + NvRegPowerState) | NVREG_POWERSTATE_VALID, base + NvRegPowerState);
5220
5221 nv_disable_hw_interrupts(dev, np->irqmask);
5222 pci_push(base);
5223 writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
5224 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
5225 pci_push(base);
5226
5227 if (nv_request_irq(dev, 0))
5228 goto out_drain;
5229
5230 /* ask for interrupts */
5231 nv_enable_hw_interrupts(dev, np->irqmask);
5232
5233 spin_lock_irq(&np->lock);
5234 writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA);
5235 writel(0, base + NvRegMulticastAddrB);
5236 writel(NVREG_MCASTMASKA_NONE, base + NvRegMulticastMaskA);
5237 writel(NVREG_MCASTMASKB_NONE, base + NvRegMulticastMaskB);
5238 writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags);
5239 /* One manual link speed update: Interrupts are enabled, future link
5240 * speed changes cause interrupts and are handled by nv_link_irq().
5241 */
5242 {
5243 u32 miistat;
5244 miistat = readl(base + NvRegMIIStatus);
5245 writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
5246 }
5247 /* set linkspeed to invalid value, thus force nv_update_linkspeed
5248 * to init hw */
5249 np->linkspeed = 0;
5250 ret = nv_update_linkspeed(dev);
5251 nv_start_rxtx(dev);
5252 netif_start_queue(dev);
5253 nv_napi_enable(dev);
5254
5255 if (ret) {
5256 netif_carrier_on(dev);
5257 } else {
5258 netdev_info(dev, "no link during initialization\n");
5259 netif_carrier_off(dev);
5260 }
5261 if (oom)
5262 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
5263
5264 /* start statistics timer */
5265 if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_STATISTICS_V3))
5266 mod_timer(&np->stats_poll,
5267 round_jiffies(jiffies + STATS_INTERVAL));
5268
5269 spin_unlock_irq(&np->lock);
5270
5271 /* If the loopback feature was set while the device was down, make sure
5272 * that it's set correctly now.
5273 */
5274 if (dev->features & NETIF_F_LOOPBACK)
5275 nv_set_loopback(dev, dev->features);
5276
5277 return 0;
5278 out_drain:
5279 nv_drain_rxtx(dev);
5280 return ret;
5281 }
5282
5283 static int nv_close(struct net_device *dev)
5284 {
5285 struct fe_priv *np = netdev_priv(dev);
5286 u8 __iomem *base;
5287
5288 spin_lock_irq(&np->lock);
5289 np->in_shutdown = 1;
5290 spin_unlock_irq(&np->lock);
5291 nv_napi_disable(dev);
5292 synchronize_irq(np->pci_dev->irq);
5293
5294 del_timer_sync(&np->oom_kick);
5295 del_timer_sync(&np->nic_poll);
5296 del_timer_sync(&np->stats_poll);
5297
5298 netif_stop_queue(dev);
5299 spin_lock_irq(&np->lock);
5300 nv_stop_rxtx(dev);
5301 nv_txrx_reset(dev);
5302
5303 /* disable interrupts on the nic or we will lock up */
5304 base = get_hwbase(dev);
5305 nv_disable_hw_interrupts(dev, np->irqmask);
5306 pci_push(base);
5307
5308 spin_unlock_irq(&np->lock);
5309
5310 nv_free_irq(dev);
5311
5312 nv_drain_rxtx(dev);
5313
5314 if (np->wolenabled || !phy_power_down) {
5315 nv_txrx_gate(dev, false);
5316 writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags);
5317 nv_start_rx(dev);
5318 } else {
5319 /* power down phy */
5320 mii_rw(dev, np->phyaddr, MII_BMCR,
5321 mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ)|BMCR_PDOWN);
5322 nv_txrx_gate(dev, true);
5323 }
5324
5325 /* FIXME: power down nic */
5326
5327 return 0;
5328 }
5329
5330 static const struct net_device_ops nv_netdev_ops = {
5331 .ndo_open = nv_open,
5332 .ndo_stop = nv_close,
5333 .ndo_get_stats = nv_get_stats,
5334 .ndo_start_xmit = nv_start_xmit,
5335 .ndo_tx_timeout = nv_tx_timeout,
5336 .ndo_change_mtu = nv_change_mtu,
5337 .ndo_fix_features = nv_fix_features,
5338 .ndo_set_features = nv_set_features,
5339 .ndo_validate_addr = eth_validate_addr,
5340 .ndo_set_mac_address = nv_set_mac_address,
5341 .ndo_set_rx_mode = nv_set_multicast,
5342 #ifdef CONFIG_NET_POLL_CONTROLLER
5343 .ndo_poll_controller = nv_poll_controller,
5344 #endif
5345 };
5346
5347 static const struct net_device_ops nv_netdev_ops_optimized = {
5348 .ndo_open = nv_open,
5349 .ndo_stop = nv_close,
5350 .ndo_get_stats = nv_get_stats,
5351 .ndo_start_xmit = nv_start_xmit_optimized,
5352 .ndo_tx_timeout = nv_tx_timeout,
5353 .ndo_change_mtu = nv_change_mtu,
5354 .ndo_fix_features = nv_fix_features,
5355 .ndo_set_features = nv_set_features,
5356 .ndo_validate_addr = eth_validate_addr,
5357 .ndo_set_mac_address = nv_set_mac_address,
5358 .ndo_set_rx_mode = nv_set_multicast,
5359 #ifdef CONFIG_NET_POLL_CONTROLLER
5360 .ndo_poll_controller = nv_poll_controller,
5361 #endif
5362 };
5363
5364 static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
5365 {
5366 struct net_device *dev;
5367 struct fe_priv *np;
5368 unsigned long addr;
5369 u8 __iomem *base;
5370 int err, i;
5371 u32 powerstate, txreg;
5372 u32 phystate_orig = 0, phystate;
5373 int phyinitialized = 0;
5374 static int printed_version;
5375
5376 if (!printed_version++)
5377 pr_info("Reverse Engineered nForce ethernet driver. Version %s.\n",
5378 FORCEDETH_VERSION);
5379
5380 dev = alloc_etherdev(sizeof(struct fe_priv));
5381 err = -ENOMEM;
5382 if (!dev)
5383 goto out;
5384
5385 np = netdev_priv(dev);
5386 np->dev = dev;
5387 np->pci_dev = pci_dev;
5388 spin_lock_init(&np->lock);
5389 SET_NETDEV_DEV(dev, &pci_dev->dev);
5390
5391 init_timer(&np->oom_kick);
5392 np->oom_kick.data = (unsigned long) dev;
5393 np->oom_kick.function = nv_do_rx_refill; /* timer handler */
5394 init_timer(&np->nic_poll);
5395 np->nic_poll.data = (unsigned long) dev;
5396 np->nic_poll.function = nv_do_nic_poll; /* timer handler */
5397 init_timer(&np->stats_poll);
5398 np->stats_poll.data = (unsigned long) dev;
5399 np->stats_poll.function = nv_do_stats_poll; /* timer handler */
5400
5401 err = pci_enable_device(pci_dev);
5402 if (err)
5403 goto out_free;
5404
5405 pci_set_master(pci_dev);
5406
5407 err = pci_request_regions(pci_dev, DRV_NAME);
5408 if (err < 0)
5409 goto out_disable;
5410
5411 if (id->driver_data & (DEV_HAS_VLAN|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V2|DEV_HAS_STATISTICS_V3))
5412 np->register_size = NV_PCI_REGSZ_VER3;
5413 else if (id->driver_data & DEV_HAS_STATISTICS_V1)
5414 np->register_size = NV_PCI_REGSZ_VER2;
5415 else
5416 np->register_size = NV_PCI_REGSZ_VER1;
5417
5418 err = -EINVAL;
5419 addr = 0;
5420 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
5421 if (pci_resource_flags(pci_dev, i) & IORESOURCE_MEM &&
5422 pci_resource_len(pci_dev, i) >= np->register_size) {
5423 addr = pci_resource_start(pci_dev, i);
5424 break;
5425 }
5426 }
5427 if (i == DEVICE_COUNT_RESOURCE) {
5428 dev_info(&pci_dev->dev, "Couldn't find register window\n");
5429 goto out_relreg;
5430 }
5431
5432 /* copy of driver data */
5433 np->driver_data = id->driver_data;
5434 /* copy of device id */
5435 np->device_id = id->device;
5436
5437 /* handle different descriptor versions */
5438 if (id->driver_data & DEV_HAS_HIGH_DMA) {
5439 /* packet format 3: supports 40-bit addressing */
5440 np->desc_ver = DESC_VER_3;
5441 np->txrxctl_bits = NVREG_TXRXCTL_DESC_3;
5442 if (dma_64bit) {
5443 if (pci_set_dma_mask(pci_dev, DMA_BIT_MASK(39)))
5444 dev_info(&pci_dev->dev,
5445 "64-bit DMA failed, using 32-bit addressing\n");
5446 else
5447 dev->features |= NETIF_F_HIGHDMA;
5448 if (pci_set_consistent_dma_mask(pci_dev, DMA_BIT_MASK(39))) {
5449 dev_info(&pci_dev->dev,
5450 "64-bit DMA (consistent) failed, using 32-bit ring buffers\n");
5451 }
5452 }
5453 } else if (id->driver_data & DEV_HAS_LARGEDESC) {
5454 /* packet format 2: supports jumbo frames */
5455 np->desc_ver = DESC_VER_2;
5456 np->txrxctl_bits = NVREG_TXRXCTL_DESC_2;
5457 } else {
5458 /* original packet format */
5459 np->desc_ver = DESC_VER_1;
5460 np->txrxctl_bits = NVREG_TXRXCTL_DESC_1;
5461 }
5462
5463 np->pkt_limit = NV_PKTLIMIT_1;
5464 if (id->driver_data & DEV_HAS_LARGEDESC)
5465 np->pkt_limit = NV_PKTLIMIT_2;
5466
5467 if (id->driver_data & DEV_HAS_CHECKSUM) {
5468 np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
5469 dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_SG |
5470 NETIF_F_TSO | NETIF_F_RXCSUM;
5471 }
5472
5473 np->vlanctl_bits = 0;
5474 if (id->driver_data & DEV_HAS_VLAN) {
5475 np->vlanctl_bits = NVREG_VLANCONTROL_ENABLE;
5476 dev->hw_features |= NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX;
5477 }
5478
5479 dev->features |= dev->hw_features;
5480
5481 /* Add loopback capability to the device. */
5482 dev->hw_features |= NETIF_F_LOOPBACK;
5483
5484 np->pause_flags = NV_PAUSEFRAME_RX_CAPABLE | NV_PAUSEFRAME_RX_REQ | NV_PAUSEFRAME_AUTONEG;
5485 if ((id->driver_data & DEV_HAS_PAUSEFRAME_TX_V1) ||
5486 (id->driver_data & DEV_HAS_PAUSEFRAME_TX_V2) ||
5487 (id->driver_data & DEV_HAS_PAUSEFRAME_TX_V3)) {
5488 np->pause_flags |= NV_PAUSEFRAME_TX_CAPABLE | NV_PAUSEFRAME_TX_REQ;
5489 }
5490
5491 err = -ENOMEM;
5492 np->base = ioremap(addr, np->register_size);
5493 if (!np->base)
5494 goto out_relreg;
5495 dev->base_addr = (unsigned long)np->base;
5496
5497 dev->irq = pci_dev->irq;
5498
5499 np->rx_ring_size = RX_RING_DEFAULT;
5500 np->tx_ring_size = TX_RING_DEFAULT;
5501
5502 if (!nv_optimized(np)) {
5503 np->rx_ring.orig = pci_alloc_consistent(pci_dev,
5504 sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size),
5505 &np->ring_addr);
5506 if (!np->rx_ring.orig)
5507 goto out_unmap;
5508 np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size];
5509 } else {
5510 np->rx_ring.ex = pci_alloc_consistent(pci_dev,
5511 sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size),
5512 &np->ring_addr);
5513 if (!np->rx_ring.ex)
5514 goto out_unmap;
5515 np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size];
5516 }
5517 np->rx_skb = kcalloc(np->rx_ring_size, sizeof(struct nv_skb_map), GFP_KERNEL);
5518 np->tx_skb = kcalloc(np->tx_ring_size, sizeof(struct nv_skb_map), GFP_KERNEL);
5519 if (!np->rx_skb || !np->tx_skb)
5520 goto out_freering;
5521
5522 if (!nv_optimized(np))
5523 dev->netdev_ops = &nv_netdev_ops;
5524 else
5525 dev->netdev_ops = &nv_netdev_ops_optimized;
5526
5527 netif_napi_add(dev, &np->napi, nv_napi_poll, RX_WORK_PER_LOOP);
5528 SET_ETHTOOL_OPS(dev, &ops);
5529 dev->watchdog_timeo = NV_WATCHDOG_TIMEO;
5530
5531 pci_set_drvdata(pci_dev, dev);
5532
5533 /* read the mac address */
5534 base = get_hwbase(dev);
5535 np->orig_mac[0] = readl(base + NvRegMacAddrA);
5536 np->orig_mac[1] = readl(base + NvRegMacAddrB);
5537
5538 /* check the workaround bit for correct mac address order */
5539 txreg = readl(base + NvRegTransmitPoll);
5540 if (id->driver_data & DEV_HAS_CORRECT_MACADDR) {
5541 /* mac address is already in correct order */
5542 dev->dev_addr[0] = (np->orig_mac[0] >> 0) & 0xff;
5543 dev->dev_addr[1] = (np->orig_mac[0] >> 8) & 0xff;
5544 dev->dev_addr[2] = (np->orig_mac[0] >> 16) & 0xff;
5545 dev->dev_addr[3] = (np->orig_mac[0] >> 24) & 0xff;
5546 dev->dev_addr[4] = (np->orig_mac[1] >> 0) & 0xff;
5547 dev->dev_addr[5] = (np->orig_mac[1] >> 8) & 0xff;
5548 } else if (txreg & NVREG_TRANSMITPOLL_MAC_ADDR_REV) {
5549 /* mac address is already in correct order */
5550 dev->dev_addr[0] = (np->orig_mac[0] >> 0) & 0xff;
5551 dev->dev_addr[1] = (np->orig_mac[0] >> 8) & 0xff;
5552 dev->dev_addr[2] = (np->orig_mac[0] >> 16) & 0xff;
5553 dev->dev_addr[3] = (np->orig_mac[0] >> 24) & 0xff;
5554 dev->dev_addr[4] = (np->orig_mac[1] >> 0) & 0xff;
5555 dev->dev_addr[5] = (np->orig_mac[1] >> 8) & 0xff;
5556 /*
5557 * Set orig mac address back to the reversed version.
5558 * This flag will be cleared during low power transition.
5559 * Therefore, we should always put back the reversed address.
5560 */
5561 np->orig_mac[0] = (dev->dev_addr[5] << 0) + (dev->dev_addr[4] << 8) +
5562 (dev->dev_addr[3] << 16) + (dev->dev_addr[2] << 24);
5563 np->orig_mac[1] = (dev->dev_addr[1] << 0) + (dev->dev_addr[0] << 8);
5564 } else {
5565 /* need to reverse mac address to correct order */
5566 dev->dev_addr[0] = (np->orig_mac[1] >> 8) & 0xff;
5567 dev->dev_addr[1] = (np->orig_mac[1] >> 0) & 0xff;
5568 dev->dev_addr[2] = (np->orig_mac[0] >> 24) & 0xff;
5569 dev->dev_addr[3] = (np->orig_mac[0] >> 16) & 0xff;
5570 dev->dev_addr[4] = (np->orig_mac[0] >> 8) & 0xff;
5571 dev->dev_addr[5] = (np->orig_mac[0] >> 0) & 0xff;
5572 writel(txreg|NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll);
5573 dev_dbg(&pci_dev->dev,
5574 "%s: set workaround bit for reversed mac addr\n",
5575 __func__);
5576 }
5577 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
5578
5579 if (!is_valid_ether_addr(dev->perm_addr)) {
5580 /*
5581 * Bad mac address. At least one bios sets the mac address
5582 * to 01:23:45:67:89:ab
5583 */
5584 dev_err(&pci_dev->dev,
5585 "Invalid MAC address detected: %pM - Please complain to your hardware vendor.\n",
5586 dev->dev_addr);
5587 random_ether_addr(dev->dev_addr);
5588 dev_err(&pci_dev->dev,
5589 "Using random MAC address: %pM\n", dev->dev_addr);
5590 }
5591
5592 /* set mac address */
5593 nv_copy_mac_to_hw(dev);
5594
5595 /* disable WOL */
5596 writel(0, base + NvRegWakeUpFlags);
5597 np->wolenabled = 0;
5598 device_set_wakeup_enable(&pci_dev->dev, false);
5599
5600 if (id->driver_data & DEV_HAS_POWER_CNTRL) {
5601
5602 /* take phy and nic out of low power mode */
5603 powerstate = readl(base + NvRegPowerState2);
5604 powerstate &= ~NVREG_POWERSTATE2_POWERUP_MASK;
5605 if ((id->driver_data & DEV_NEED_LOW_POWER_FIX) &&
5606 pci_dev->revision >= 0xA3)
5607 powerstate |= NVREG_POWERSTATE2_POWERUP_REV_A3;
5608 writel(powerstate, base + NvRegPowerState2);
5609 }
5610
5611 if (np->desc_ver == DESC_VER_1)
5612 np->tx_flags = NV_TX_VALID;
5613 else
5614 np->tx_flags = NV_TX2_VALID;
5615
5616 np->msi_flags = 0;
5617 if ((id->driver_data & DEV_HAS_MSI) && msi)
5618 np->msi_flags |= NV_MSI_CAPABLE;
5619
5620 if ((id->driver_data & DEV_HAS_MSI_X) && msix) {
5621 /* msix has had reported issues when modifying irqmask
5622 as in the case of napi, therefore, disable for now
5623 */
5624 #if 0
5625 np->msi_flags |= NV_MSI_X_CAPABLE;
5626 #endif
5627 }
5628
5629 if (optimization_mode == NV_OPTIMIZATION_MODE_CPU) {
5630 np->irqmask = NVREG_IRQMASK_CPU;
5631 if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */
5632 np->msi_flags |= 0x0001;
5633 } else if (optimization_mode == NV_OPTIMIZATION_MODE_DYNAMIC &&
5634 !(id->driver_data & DEV_NEED_TIMERIRQ)) {
5635 /* start off in throughput mode */
5636 np->irqmask = NVREG_IRQMASK_THROUGHPUT;
5637 /* remove support for msix mode */
5638 np->msi_flags &= ~NV_MSI_X_CAPABLE;
5639 } else {
5640 optimization_mode = NV_OPTIMIZATION_MODE_THROUGHPUT;
5641 np->irqmask = NVREG_IRQMASK_THROUGHPUT;
5642 if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */
5643 np->msi_flags |= 0x0003;
5644 }
5645
5646 if (id->driver_data & DEV_NEED_TIMERIRQ)
5647 np->irqmask |= NVREG_IRQ_TIMER;
5648 if (id->driver_data & DEV_NEED_LINKTIMER) {
5649 np->need_linktimer = 1;
5650 np->link_timeout = jiffies + LINK_TIMEOUT;
5651 } else {
5652 np->need_linktimer = 0;
5653 }
5654
5655 /* Limit the number of tx's outstanding for hw bug */
5656 if (id->driver_data & DEV_NEED_TX_LIMIT) {
5657 np->tx_limit = 1;
5658 if (((id->driver_data & DEV_NEED_TX_LIMIT2) == DEV_NEED_TX_LIMIT2) &&
5659 pci_dev->revision >= 0xA2)
5660 np->tx_limit = 0;
5661 }
5662
5663 /* clear phy state and temporarily halt phy interrupts */
5664 writel(0, base + NvRegMIIMask);
5665 phystate = readl(base + NvRegAdapterControl);
5666 if (phystate & NVREG_ADAPTCTL_RUNNING) {
5667 phystate_orig = 1;
5668 phystate &= ~NVREG_ADAPTCTL_RUNNING;
5669 writel(phystate, base + NvRegAdapterControl);
5670 }
5671 writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
5672
5673 if (id->driver_data & DEV_HAS_MGMT_UNIT) {
5674 /* management unit running on the mac? */
5675 if ((readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_ST) &&
5676 (readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_PHY_INIT) &&
5677 nv_mgmt_acquire_sema(dev) &&
5678 nv_mgmt_get_version(dev)) {
5679 np->mac_in_use = 1;
5680 if (np->mgmt_version > 0)
5681 np->mac_in_use = readl(base + NvRegMgmtUnitControl) & NVREG_MGMTUNITCONTROL_INUSE;
5682 /* management unit setup the phy already? */
5683 if (np->mac_in_use &&
5684 ((readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_MASK) ==
5685 NVREG_XMITCTL_SYNC_PHY_INIT)) {
5686 /* phy is inited by mgmt unit */
5687 phyinitialized = 1;
5688 } else {
5689 /* we need to init the phy */
5690 }
5691 }
5692 }
5693
5694 /* find a suitable phy */
5695 for (i = 1; i <= 32; i++) {
5696 int id1, id2;
5697 int phyaddr = i & 0x1F;
5698
5699 spin_lock_irq(&np->lock);
5700 id1 = mii_rw(dev, phyaddr, MII_PHYSID1, MII_READ);
5701 spin_unlock_irq(&np->lock);
5702 if (id1 < 0 || id1 == 0xffff)
5703 continue;
5704 spin_lock_irq(&np->lock);
5705 id2 = mii_rw(dev, phyaddr, MII_PHYSID2, MII_READ);
5706 spin_unlock_irq(&np->lock);
5707 if (id2 < 0 || id2 == 0xffff)
5708 continue;
5709
5710 np->phy_model = id2 & PHYID2_MODEL_MASK;
5711 id1 = (id1 & PHYID1_OUI_MASK) << PHYID1_OUI_SHFT;
5712 id2 = (id2 & PHYID2_OUI_MASK) >> PHYID2_OUI_SHFT;
5713 np->phyaddr = phyaddr;
5714 np->phy_oui = id1 | id2;
5715
5716 /* Realtek hardcoded phy id1 to all zero's on certain phys */
5717 if (np->phy_oui == PHY_OUI_REALTEK2)
5718 np->phy_oui = PHY_OUI_REALTEK;
5719 /* Setup phy revision for Realtek */
5720 if (np->phy_oui == PHY_OUI_REALTEK && np->phy_model == PHY_MODEL_REALTEK_8211)
5721 np->phy_rev = mii_rw(dev, phyaddr, MII_RESV1, MII_READ) & PHY_REV_MASK;
5722
5723 break;
5724 }
5725 if (i == 33) {
5726 dev_info(&pci_dev->dev, "open: Could not find a valid PHY\n");
5727 goto out_error;
5728 }
5729
5730 if (!phyinitialized) {
5731 /* reset it */
5732 phy_init(dev);
5733 } else {
5734 /* see if it is a gigabit phy */
5735 u32 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
5736 if (mii_status & PHY_GIGABIT)
5737 np->gigabit = PHY_GIGABIT;
5738 }
5739
5740 /* set default link speed settings */
5741 np->linkspeed = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
5742 np->duplex = 0;
5743 np->autoneg = 1;
5744
5745 err = register_netdev(dev);
5746 if (err) {
5747 dev_info(&pci_dev->dev, "unable to register netdev: %d\n", err);
5748 goto out_error;
5749 }
5750
5751 if (id->driver_data & DEV_HAS_VLAN)
5752 nv_vlan_mode(dev, dev->features);
5753
5754 netif_carrier_off(dev);
5755
5756 dev_info(&pci_dev->dev, "ifname %s, PHY OUI 0x%x @ %d, addr %pM\n",
5757 dev->name, np->phy_oui, np->phyaddr, dev->dev_addr);
5758
5759 dev_info(&pci_dev->dev, "%s%s%s%s%s%s%s%s%s%s%sdesc-v%u\n",
5760 dev->features & NETIF_F_HIGHDMA ? "highdma " : "",
5761 dev->features & (NETIF_F_IP_CSUM | NETIF_F_SG) ?
5762 "csum " : "",
5763 dev->features & (NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX) ?
5764 "vlan " : "",
5765 dev->features & (NETIF_F_LOOPBACK) ?
5766 "loopback " : "",
5767 id->driver_data & DEV_HAS_POWER_CNTRL ? "pwrctl " : "",
5768 id->driver_data & DEV_HAS_MGMT_UNIT ? "mgmt " : "",
5769 id->driver_data & DEV_NEED_TIMERIRQ ? "timirq " : "",
5770 np->gigabit == PHY_GIGABIT ? "gbit " : "",
5771 np->need_linktimer ? "lnktim " : "",
5772 np->msi_flags & NV_MSI_CAPABLE ? "msi " : "",
5773 np->msi_flags & NV_MSI_X_CAPABLE ? "msi-x " : "",
5774 np->desc_ver);
5775
5776 return 0;
5777
5778 out_error:
5779 if (phystate_orig)
5780 writel(phystate|NVREG_ADAPTCTL_RUNNING, base + NvRegAdapterControl);
5781 pci_set_drvdata(pci_dev, NULL);
5782 out_freering:
5783 free_rings(dev);
5784 out_unmap:
5785 iounmap(get_hwbase(dev));
5786 out_relreg:
5787 pci_release_regions(pci_dev);
5788 out_disable:
5789 pci_disable_device(pci_dev);
5790 out_free:
5791 free_netdev(dev);
5792 out:
5793 return err;
5794 }
5795
5796 static void nv_restore_phy(struct net_device *dev)
5797 {
5798 struct fe_priv *np = netdev_priv(dev);
5799 u16 phy_reserved, mii_control;
5800
5801 if (np->phy_oui == PHY_OUI_REALTEK &&
5802 np->phy_model == PHY_MODEL_REALTEK_8201 &&
5803 phy_cross == NV_CROSSOVER_DETECTION_DISABLED) {
5804 mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3);
5805 phy_reserved = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, MII_READ);
5806 phy_reserved &= ~PHY_REALTEK_INIT_MSK1;
5807 phy_reserved |= PHY_REALTEK_INIT8;
5808 mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, phy_reserved);
5809 mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1);
5810
5811 /* restart auto negotiation */
5812 mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
5813 mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE);
5814 mii_rw(dev, np->phyaddr, MII_BMCR, mii_control);
5815 }
5816 }
5817
5818 static void nv_restore_mac_addr(struct pci_dev *pci_dev)
5819 {
5820 struct net_device *dev = pci_get_drvdata(pci_dev);
5821 struct fe_priv *np = netdev_priv(dev);
5822 u8 __iomem *base = get_hwbase(dev);
5823
5824 /* special op: write back the misordered MAC address - otherwise
5825 * the next nv_probe would see a wrong address.
5826 */
5827 writel(np->orig_mac[0], base + NvRegMacAddrA);
5828 writel(np->orig_mac[1], base + NvRegMacAddrB);
5829 writel(readl(base + NvRegTransmitPoll) & ~NVREG_TRANSMITPOLL_MAC_ADDR_REV,
5830 base + NvRegTransmitPoll);
5831 }
5832
5833 static void __devexit nv_remove(struct pci_dev *pci_dev)
5834 {
5835 struct net_device *dev = pci_get_drvdata(pci_dev);
5836
5837 unregister_netdev(dev);
5838
5839 nv_restore_mac_addr(pci_dev);
5840
5841 /* restore any phy related changes */
5842 nv_restore_phy(dev);
5843
5844 nv_mgmt_release_sema(dev);
5845
5846 /* free all structures */
5847 free_rings(dev);
5848 iounmap(get_hwbase(dev));
5849 pci_release_regions(pci_dev);
5850 pci_disable_device(pci_dev);
5851 free_netdev(dev);
5852 pci_set_drvdata(pci_dev, NULL);
5853 }
5854
5855 #ifdef CONFIG_PM_SLEEP
5856 static int nv_suspend(struct device *device)
5857 {
5858 struct pci_dev *pdev = to_pci_dev(device);
5859 struct net_device *dev = pci_get_drvdata(pdev);
5860 struct fe_priv *np = netdev_priv(dev);
5861 u8 __iomem *base = get_hwbase(dev);
5862 int i;
5863
5864 if (netif_running(dev)) {
5865 /* Gross. */
5866 nv_close(dev);
5867 }
5868 netif_device_detach(dev);
5869
5870 /* save non-pci configuration space */
5871 for (i = 0; i <= np->register_size/sizeof(u32); i++)
5872 np->saved_config_space[i] = readl(base + i*sizeof(u32));
5873
5874 return 0;
5875 }
5876
5877 static int nv_resume(struct device *device)
5878 {
5879 struct pci_dev *pdev = to_pci_dev(device);
5880 struct net_device *dev = pci_get_drvdata(pdev);
5881 struct fe_priv *np = netdev_priv(dev);
5882 u8 __iomem *base = get_hwbase(dev);
5883 int i, rc = 0;
5884
5885 /* restore non-pci configuration space */
5886 for (i = 0; i <= np->register_size/sizeof(u32); i++)
5887 writel(np->saved_config_space[i], base+i*sizeof(u32));
5888
5889 if (np->driver_data & DEV_NEED_MSI_FIX)
5890 pci_write_config_dword(pdev, NV_MSI_PRIV_OFFSET, NV_MSI_PRIV_VALUE);
5891
5892 /* restore phy state, including autoneg */
5893 phy_init(dev);
5894
5895 netif_device_attach(dev);
5896 if (netif_running(dev)) {
5897 rc = nv_open(dev);
5898 nv_set_multicast(dev);
5899 }
5900 return rc;
5901 }
5902
5903 static SIMPLE_DEV_PM_OPS(nv_pm_ops, nv_suspend, nv_resume);
5904 #define NV_PM_OPS (&nv_pm_ops)
5905
5906 #else
5907 #define NV_PM_OPS NULL
5908 #endif /* CONFIG_PM_SLEEP */
5909
5910 #ifdef CONFIG_PM
5911 static void nv_shutdown(struct pci_dev *pdev)
5912 {
5913 struct net_device *dev = pci_get_drvdata(pdev);
5914 struct fe_priv *np = netdev_priv(dev);
5915
5916 if (netif_running(dev))
5917 nv_close(dev);
5918
5919 /*
5920 * Restore the MAC so a kernel started by kexec won't get confused.
5921 * If we really go for poweroff, we must not restore the MAC,
5922 * otherwise the MAC for WOL will be reversed at least on some boards.
5923 */
5924 if (system_state != SYSTEM_POWER_OFF)
5925 nv_restore_mac_addr(pdev);
5926
5927 pci_disable_device(pdev);
5928 /*
5929 * Apparently it is not possible to reinitialise from D3 hot,
5930 * only put the device into D3 if we really go for poweroff.
5931 */
5932 if (system_state == SYSTEM_POWER_OFF) {
5933 pci_wake_from_d3(pdev, np->wolenabled);
5934 pci_set_power_state(pdev, PCI_D3hot);
5935 }
5936 }
5937 #else
5938 #define nv_shutdown NULL
5939 #endif /* CONFIG_PM */
5940
5941 static DEFINE_PCI_DEVICE_TABLE(pci_tbl) = {
5942 { /* nForce Ethernet Controller */
5943 PCI_DEVICE(0x10DE, 0x01C3),
5944 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
5945 },
5946 { /* nForce2 Ethernet Controller */
5947 PCI_DEVICE(0x10DE, 0x0066),
5948 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
5949 },
5950 { /* nForce3 Ethernet Controller */
5951 PCI_DEVICE(0x10DE, 0x00D6),
5952 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
5953 },
5954 { /* nForce3 Ethernet Controller */
5955 PCI_DEVICE(0x10DE, 0x0086),
5956 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
5957 },
5958 { /* nForce3 Ethernet Controller */
5959 PCI_DEVICE(0x10DE, 0x008C),
5960 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
5961 },
5962 { /* nForce3 Ethernet Controller */
5963 PCI_DEVICE(0x10DE, 0x00E6),
5964 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
5965 },
5966 { /* nForce3 Ethernet Controller */
5967 PCI_DEVICE(0x10DE, 0x00DF),
5968 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
5969 },
5970 { /* CK804 Ethernet Controller */
5971 PCI_DEVICE(0x10DE, 0x0056),
5972 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT,
5973 },
5974 { /* CK804 Ethernet Controller */
5975 PCI_DEVICE(0x10DE, 0x0057),
5976 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT,
5977 },
5978 { /* MCP04 Ethernet Controller */
5979 PCI_DEVICE(0x10DE, 0x0037),
5980 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT,
5981 },
5982 { /* MCP04 Ethernet Controller */
5983 PCI_DEVICE(0x10DE, 0x0038),
5984 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT,
5985 },
5986 { /* MCP51 Ethernet Controller */
5987 PCI_DEVICE(0x10DE, 0x0268),
5988 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V1|DEV_NEED_LOW_POWER_FIX,
5989 },
5990 { /* MCP51 Ethernet Controller */
5991 PCI_DEVICE(0x10DE, 0x0269),
5992 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V1|DEV_NEED_LOW_POWER_FIX,
5993 },
5994 { /* MCP55 Ethernet Controller */
5995 PCI_DEVICE(0x10DE, 0x0372),
5996 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_NEED_TX_LIMIT|DEV_NEED_MSI_FIX,
5997 },
5998 { /* MCP55 Ethernet Controller */
5999 PCI_DEVICE(0x10DE, 0x0373),
6000 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_NEED_TX_LIMIT|DEV_NEED_MSI_FIX,
6001 },
6002 { /* MCP61 Ethernet Controller */
6003 PCI_DEVICE(0x10DE, 0x03E5),
6004 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX,
6005 },
6006 { /* MCP61 Ethernet Controller */
6007 PCI_DEVICE(0x10DE, 0x03E6),
6008 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX,
6009 },
6010 { /* MCP61 Ethernet Controller */
6011 PCI_DEVICE(0x10DE, 0x03EE),
6012 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX,
6013 },
6014 { /* MCP61 Ethernet Controller */
6015 PCI_DEVICE(0x10DE, 0x03EF),
6016 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX,
6017 },
6018 { /* MCP65 Ethernet Controller */
6019 PCI_DEVICE(0x10DE, 0x0450),
6020 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
6021 },
6022 { /* MCP65 Ethernet Controller */
6023 PCI_DEVICE(0x10DE, 0x0451),
6024 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
6025 },
6026 { /* MCP65 Ethernet Controller */
6027 PCI_DEVICE(0x10DE, 0x0452),
6028 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
6029 },
6030 { /* MCP65 Ethernet Controller */
6031 PCI_DEVICE(0x10DE, 0x0453),
6032 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
6033 },
6034 { /* MCP67 Ethernet Controller */
6035 PCI_DEVICE(0x10DE, 0x054C),
6036 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
6037 },
6038 { /* MCP67 Ethernet Controller */
6039 PCI_DEVICE(0x10DE, 0x054D),
6040 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
6041 },
6042 { /* MCP67 Ethernet Controller */
6043 PCI_DEVICE(0x10DE, 0x054E),
6044 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
6045 },
6046 { /* MCP67 Ethernet Controller */
6047 PCI_DEVICE(0x10DE, 0x054F),
6048 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
6049 },
6050 { /* MCP73 Ethernet Controller */
6051 PCI_DEVICE(0x10DE, 0x07DC),
6052 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
6053 },
6054 { /* MCP73 Ethernet Controller */
6055 PCI_DEVICE(0x10DE, 0x07DD),
6056 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
6057 },
6058 { /* MCP73 Ethernet Controller */
6059 PCI_DEVICE(0x10DE, 0x07DE),
6060 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
6061 },
6062 { /* MCP73 Ethernet Controller */
6063 PCI_DEVICE(0x10DE, 0x07DF),
6064 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
6065 },
6066 { /* MCP77 Ethernet Controller */
6067 PCI_DEVICE(0x10DE, 0x0760),
6068 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
6069 },
6070 { /* MCP77 Ethernet Controller */
6071 PCI_DEVICE(0x10DE, 0x0761),
6072 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
6073 },
6074 { /* MCP77 Ethernet Controller */
6075 PCI_DEVICE(0x10DE, 0x0762),
6076 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
6077 },
6078 { /* MCP77 Ethernet Controller */
6079 PCI_DEVICE(0x10DE, 0x0763),
6080 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
6081 },
6082 { /* MCP79 Ethernet Controller */
6083 PCI_DEVICE(0x10DE, 0x0AB0),
6084 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
6085 },
6086 { /* MCP79 Ethernet Controller */
6087 PCI_DEVICE(0x10DE, 0x0AB1),
6088 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
6089 },
6090 { /* MCP79 Ethernet Controller */
6091 PCI_DEVICE(0x10DE, 0x0AB2),
6092 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
6093 },
6094 { /* MCP79 Ethernet Controller */
6095 PCI_DEVICE(0x10DE, 0x0AB3),
6096 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
6097 },
6098 { /* MCP89 Ethernet Controller */
6099 PCI_DEVICE(0x10DE, 0x0D7D),
6100 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX,
6101 },
6102 {0,},
6103 };
6104
6105 static struct pci_driver driver = {
6106 .name = DRV_NAME,
6107 .id_table = pci_tbl,
6108 .probe = nv_probe,
6109 .remove = __devexit_p(nv_remove),
6110 .shutdown = nv_shutdown,
6111 .driver.pm = NV_PM_OPS,
6112 };
6113
6114 static int __init init_nic(void)
6115 {
6116 return pci_register_driver(&driver);
6117 }
6118
6119 static void __exit exit_nic(void)
6120 {
6121 pci_unregister_driver(&driver);
6122 }
6123
6124 module_param(max_interrupt_work, int, 0);
6125 MODULE_PARM_DESC(max_interrupt_work, "forcedeth maximum events handled per interrupt");
6126 module_param(optimization_mode, int, 0);
6127 MODULE_PARM_DESC(optimization_mode, "In throughput mode (0), every tx & rx packet will generate an interrupt. In CPU mode (1), interrupts are controlled by a timer. In dynamic mode (2), the mode toggles between throughput and CPU mode based on network load.");
6128 module_param(poll_interval, int, 0);
6129 MODULE_PARM_DESC(poll_interval, "Interval determines how frequent timer interrupt is generated by [(time_in_micro_secs * 100) / (2^10)]. Min is 0 and Max is 65535.");
6130 module_param(msi, int, 0);
6131 MODULE_PARM_DESC(msi, "MSI interrupts are enabled by setting to 1 and disabled by setting to 0.");
6132 module_param(msix, int, 0);
6133 MODULE_PARM_DESC(msix, "MSIX interrupts are enabled by setting to 1 and disabled by setting to 0.");
6134 module_param(dma_64bit, int, 0);
6135 MODULE_PARM_DESC(dma_64bit, "High DMA is enabled by setting to 1 and disabled by setting to 0.");
6136 module_param(phy_cross, int, 0);
6137 MODULE_PARM_DESC(phy_cross, "Phy crossover detection for Realtek 8201 phy is enabled by setting to 1 and disabled by setting to 0.");
6138 module_param(phy_power_down, int, 0);
6139 MODULE_PARM_DESC(phy_power_down, "Power down phy and disable link when interface is down (1), or leave phy powered up (0).");
6140
6141 MODULE_AUTHOR("Manfred Spraul <manfred@colorfullife.com>");
6142 MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver");
6143 MODULE_LICENSE("GPL");
6144
6145 MODULE_DEVICE_TABLE(pci, pci_tbl);
6146
6147 module_init(init_nic);
6148 module_exit(exit_nic);