]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blame - drivers/net/ethernet/broadcom/tg3.c
tg3: Reduce UMP event collision window
[mirror_ubuntu-eoan-kernel.git] / drivers / net / ethernet / broadcom / tg3.c
CommitLineData
1da177e4
LT
1/*
2 * tg3.c: Broadcom Tigon3 ethernet driver.
3 *
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
b86fb2cf 7 * Copyright (C) 2005-2011 Broadcom Corporation.
1da177e4
LT
8 *
9 * Firmware is:
49cabf49
MC
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
12 *
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
1da177e4
LT
16 */
17
1da177e4
LT
18
19#include <linux/module.h>
20#include <linux/moduleparam.h>
6867c843 21#include <linux/stringify.h>
1da177e4
LT
22#include <linux/kernel.h>
23#include <linux/types.h>
24#include <linux/compiler.h>
25#include <linux/slab.h>
26#include <linux/delay.h>
14c85021 27#include <linux/in.h>
1da177e4 28#include <linux/init.h>
a6b7a407 29#include <linux/interrupt.h>
1da177e4
LT
30#include <linux/ioport.h>
31#include <linux/pci.h>
32#include <linux/netdevice.h>
33#include <linux/etherdevice.h>
34#include <linux/skbuff.h>
35#include <linux/ethtool.h>
3110f5f5 36#include <linux/mdio.h>
1da177e4 37#include <linux/mii.h>
158d7abd 38#include <linux/phy.h>
a9daf367 39#include <linux/brcmphy.h>
1da177e4
LT
40#include <linux/if_vlan.h>
41#include <linux/ip.h>
42#include <linux/tcp.h>
43#include <linux/workqueue.h>
61487480 44#include <linux/prefetch.h>
f9a5f7d3 45#include <linux/dma-mapping.h>
077f849d 46#include <linux/firmware.h>
1da177e4
LT
47
48#include <net/checksum.h>
c9bdd4b5 49#include <net/ip.h>
1da177e4
LT
50
51#include <asm/system.h>
27fd9de8 52#include <linux/io.h>
1da177e4 53#include <asm/byteorder.h>
27fd9de8 54#include <linux/uaccess.h>
1da177e4 55
49b6e95f 56#ifdef CONFIG_SPARC
1da177e4 57#include <asm/idprom.h>
49b6e95f 58#include <asm/prom.h>
1da177e4
LT
59#endif
60
63532394
MC
61#define BAR_0 0
62#define BAR_2 2
63
1da177e4
LT
64#include "tg3.h"
65
63c3a66f
JP
66/* Functions & macros to verify TG3_FLAGS types */
67
68static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
69{
70 return test_bit(flag, bits);
71}
72
73static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
74{
75 set_bit(flag, bits);
76}
77
78static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
79{
80 clear_bit(flag, bits);
81}
82
83#define tg3_flag(tp, flag) \
84 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
85#define tg3_flag_set(tp, flag) \
86 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
87#define tg3_flag_clear(tp, flag) \
88 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
89
1da177e4 90#define DRV_MODULE_NAME "tg3"
6867c843 91#define TG3_MAJ_NUM 3
efab79c5 92#define TG3_MIN_NUM 122
6867c843
MC
93#define DRV_MODULE_VERSION \
94 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
efab79c5 95#define DRV_MODULE_RELDATE "December 7, 2011"
1da177e4 96
fd6d3f0e
MC
97#define RESET_KIND_SHUTDOWN 0
98#define RESET_KIND_INIT 1
99#define RESET_KIND_SUSPEND 2
100
1da177e4
LT
101#define TG3_DEF_RX_MODE 0
102#define TG3_DEF_TX_MODE 0
103#define TG3_DEF_MSG_ENABLE \
104 (NETIF_MSG_DRV | \
105 NETIF_MSG_PROBE | \
106 NETIF_MSG_LINK | \
107 NETIF_MSG_TIMER | \
108 NETIF_MSG_IFDOWN | \
109 NETIF_MSG_IFUP | \
110 NETIF_MSG_RX_ERR | \
111 NETIF_MSG_TX_ERR)
112
520b2756
MC
113#define TG3_GRC_LCLCTL_PWRSW_DELAY 100
114
1da177e4
LT
115/* length of time before we decide the hardware is borked,
116 * and dev->tx_timeout() should be called to fix the problem
117 */
63c3a66f 118
1da177e4
LT
119#define TG3_TX_TIMEOUT (5 * HZ)
120
121/* hardware minimum and maximum for a single frame's data payload */
122#define TG3_MIN_MTU 60
123#define TG3_MAX_MTU(tp) \
63c3a66f 124 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
1da177e4
LT
125
126/* These numbers seem to be hard coded in the NIC firmware somehow.
127 * You can't change the ring sizes, but you can change where you place
128 * them in the NIC onboard memory.
129 */
7cb32cf2 130#define TG3_RX_STD_RING_SIZE(tp) \
63c3a66f 131 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
de9f5230 132 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
1da177e4 133#define TG3_DEF_RX_RING_PENDING 200
7cb32cf2 134#define TG3_RX_JMB_RING_SIZE(tp) \
63c3a66f 135 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
de9f5230 136 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
1da177e4
LT
137#define TG3_DEF_RX_JUMBO_RING_PENDING 100
138
139/* Do not place this n-ring entries value into the tp struct itself,
140 * we really want to expose these constants to GCC so that modulo et
141 * al. operations are done with shifts and masks instead of with
142 * hw multiply/modulo instructions. Another solution would be to
143 * replace things like '% foo' with '& (foo - 1)'.
144 */
1da177e4
LT
145
146#define TG3_TX_RING_SIZE 512
147#define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
148
2c49a44d
MC
149#define TG3_RX_STD_RING_BYTES(tp) \
150 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
151#define TG3_RX_JMB_RING_BYTES(tp) \
152 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
153#define TG3_RX_RCB_RING_BYTES(tp) \
7cb32cf2 154 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
1da177e4
LT
155#define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
156 TG3_TX_RING_SIZE)
1da177e4
LT
157#define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
158
287be12e
MC
159#define TG3_DMA_BYTE_ENAB 64
160
161#define TG3_RX_STD_DMA_SZ 1536
162#define TG3_RX_JMB_DMA_SZ 9046
163
164#define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
165
166#define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
167#define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
1da177e4 168
2c49a44d
MC
169#define TG3_RX_STD_BUFF_RING_SIZE(tp) \
170 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
2b2cdb65 171
2c49a44d
MC
172#define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
173 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
2b2cdb65 174
d2757fc4
MC
175/* Due to a hardware bug, the 5701 can only DMA to memory addresses
176 * that are at least dword aligned when used in PCIX mode. The driver
177 * works around this bug by double copying the packet. This workaround
178 * is built into the normal double copy length check for efficiency.
179 *
180 * However, the double copy is only necessary on those architectures
181 * where unaligned memory accesses are inefficient. For those architectures
182 * where unaligned memory accesses incur little penalty, we can reintegrate
183 * the 5701 in the normal rx path. Doing so saves a device structure
184 * dereference by hardcoding the double copy threshold in place.
185 */
186#define TG3_RX_COPY_THRESHOLD 256
187#if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
188 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
189#else
190 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
191#endif
192
81389f57
MC
193#if (NET_IP_ALIGN != 0)
194#define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
195#else
9205fd9c 196#define TG3_RX_OFFSET(tp) (NET_SKB_PAD)
81389f57
MC
197#endif
198
1da177e4 199/* minimum number of free TX descriptors required to wake up TX process */
f3f3f27e 200#define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
55086ad9 201#define TG3_TX_BD_DMA_MAX_2K 2048
a4cb428d 202#define TG3_TX_BD_DMA_MAX_4K 4096
1da177e4 203
ad829268
MC
204#define TG3_RAW_IP_ALIGN 2
205
c6cdf436
MC
206#define TG3_FW_UPDATE_TIMEOUT_SEC 5
207
077f849d
JSR
208#define FIRMWARE_TG3 "tigon/tg3.bin"
209#define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
210#define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
211
1da177e4 212static char version[] __devinitdata =
05dbe005 213 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
1da177e4
LT
214
215MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
216MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
217MODULE_LICENSE("GPL");
218MODULE_VERSION(DRV_MODULE_VERSION);
077f849d
JSR
219MODULE_FIRMWARE(FIRMWARE_TG3);
220MODULE_FIRMWARE(FIRMWARE_TG3TSO);
221MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
222
1da177e4
LT
223static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
224module_param(tg3_debug, int, 0);
225MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
226
a3aa1884 227static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
13185217
HK
228 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
229 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
230 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
231 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
232 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
233 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
234 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
235 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
236 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
237 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
13185217 250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
126a3368 251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
13185217 252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
13185217
HK
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
257 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
258 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
260 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
261 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
263 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
126a3368 264 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
13185217
HK
265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
676917d4 268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
13185217
HK
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
272 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
b5d3772c
MC
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
d30cdd28
MC
278 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
6c7af27c 280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
9936bcf6
MC
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
c88e668b
MC
283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
2befdcea
MC
285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
286 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
321d32a0
MC
287 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
288 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
289 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
5e7ccf20 290 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
5001e2f6
MC
291 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
292 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
b0f75221
MC
293 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
294 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
295 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
297 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
298 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
302b500b 299 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
ba1f3c76 300 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
13185217
HK
301 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
302 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
303 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
304 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
305 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
306 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
307 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
1dcb14d9 308 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
13185217 309 {}
1da177e4
LT
310};
311
312MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
313
50da859d 314static const struct {
1da177e4 315 const char string[ETH_GSTRING_LEN];
48fa55a0 316} ethtool_stats_keys[] = {
1da177e4
LT
317 { "rx_octets" },
318 { "rx_fragments" },
319 { "rx_ucast_packets" },
320 { "rx_mcast_packets" },
321 { "rx_bcast_packets" },
322 { "rx_fcs_errors" },
323 { "rx_align_errors" },
324 { "rx_xon_pause_rcvd" },
325 { "rx_xoff_pause_rcvd" },
326 { "rx_mac_ctrl_rcvd" },
327 { "rx_xoff_entered" },
328 { "rx_frame_too_long_errors" },
329 { "rx_jabbers" },
330 { "rx_undersize_packets" },
331 { "rx_in_length_errors" },
332 { "rx_out_length_errors" },
333 { "rx_64_or_less_octet_packets" },
334 { "rx_65_to_127_octet_packets" },
335 { "rx_128_to_255_octet_packets" },
336 { "rx_256_to_511_octet_packets" },
337 { "rx_512_to_1023_octet_packets" },
338 { "rx_1024_to_1522_octet_packets" },
339 { "rx_1523_to_2047_octet_packets" },
340 { "rx_2048_to_4095_octet_packets" },
341 { "rx_4096_to_8191_octet_packets" },
342 { "rx_8192_to_9022_octet_packets" },
343
344 { "tx_octets" },
345 { "tx_collisions" },
346
347 { "tx_xon_sent" },
348 { "tx_xoff_sent" },
349 { "tx_flow_control" },
350 { "tx_mac_errors" },
351 { "tx_single_collisions" },
352 { "tx_mult_collisions" },
353 { "tx_deferred" },
354 { "tx_excessive_collisions" },
355 { "tx_late_collisions" },
356 { "tx_collide_2times" },
357 { "tx_collide_3times" },
358 { "tx_collide_4times" },
359 { "tx_collide_5times" },
360 { "tx_collide_6times" },
361 { "tx_collide_7times" },
362 { "tx_collide_8times" },
363 { "tx_collide_9times" },
364 { "tx_collide_10times" },
365 { "tx_collide_11times" },
366 { "tx_collide_12times" },
367 { "tx_collide_13times" },
368 { "tx_collide_14times" },
369 { "tx_collide_15times" },
370 { "tx_ucast_packets" },
371 { "tx_mcast_packets" },
372 { "tx_bcast_packets" },
373 { "tx_carrier_sense_errors" },
374 { "tx_discards" },
375 { "tx_errors" },
376
377 { "dma_writeq_full" },
378 { "dma_write_prioq_full" },
379 { "rxbds_empty" },
380 { "rx_discards" },
381 { "rx_errors" },
382 { "rx_threshold_hit" },
383
384 { "dma_readq_full" },
385 { "dma_read_prioq_full" },
386 { "tx_comp_queue_full" },
387
388 { "ring_set_send_prod_index" },
389 { "ring_status_update" },
390 { "nic_irqs" },
391 { "nic_avoided_irqs" },
4452d099
MC
392 { "nic_tx_threshold_hit" },
393
394 { "mbuf_lwm_thresh_hit" },
1da177e4
LT
395};
396
48fa55a0
MC
397#define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
398
399
50da859d 400static const struct {
4cafd3f5 401 const char string[ETH_GSTRING_LEN];
48fa55a0 402} ethtool_test_keys[] = {
28a45957
MC
403 { "nvram test (online) " },
404 { "link test (online) " },
405 { "register test (offline)" },
406 { "memory test (offline)" },
407 { "mac loopback test (offline)" },
408 { "phy loopback test (offline)" },
941ec90f 409 { "ext loopback test (offline)" },
28a45957 410 { "interrupt test (offline)" },
4cafd3f5
MC
411};
412
48fa55a0
MC
413#define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
414
415
b401e9e2
MC
416static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
417{
418 writel(val, tp->regs + off);
419}
420
421static u32 tg3_read32(struct tg3 *tp, u32 off)
422{
de6f31eb 423 return readl(tp->regs + off);
b401e9e2
MC
424}
425
0d3031d9
MC
426static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
427{
428 writel(val, tp->aperegs + off);
429}
430
431static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
432{
de6f31eb 433 return readl(tp->aperegs + off);
0d3031d9
MC
434}
435
1da177e4
LT
436static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
437{
6892914f
MC
438 unsigned long flags;
439
440 spin_lock_irqsave(&tp->indirect_lock, flags);
1ee582d8
MC
441 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
442 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
6892914f 443 spin_unlock_irqrestore(&tp->indirect_lock, flags);
1ee582d8
MC
444}
445
446static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
447{
448 writel(val, tp->regs + off);
449 readl(tp->regs + off);
1da177e4
LT
450}
451
6892914f 452static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
1da177e4 453{
6892914f
MC
454 unsigned long flags;
455 u32 val;
456
457 spin_lock_irqsave(&tp->indirect_lock, flags);
458 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
459 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
460 spin_unlock_irqrestore(&tp->indirect_lock, flags);
461 return val;
462}
463
464static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
465{
466 unsigned long flags;
467
468 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
469 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
470 TG3_64BIT_REG_LOW, val);
471 return;
472 }
66711e66 473 if (off == TG3_RX_STD_PROD_IDX_REG) {
6892914f
MC
474 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
475 TG3_64BIT_REG_LOW, val);
476 return;
1da177e4 477 }
6892914f
MC
478
479 spin_lock_irqsave(&tp->indirect_lock, flags);
480 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
481 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
482 spin_unlock_irqrestore(&tp->indirect_lock, flags);
483
484 /* In indirect mode when disabling interrupts, we also need
485 * to clear the interrupt bit in the GRC local ctrl register.
486 */
487 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
488 (val == 0x1)) {
489 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
490 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
491 }
492}
493
494static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
495{
496 unsigned long flags;
497 u32 val;
498
499 spin_lock_irqsave(&tp->indirect_lock, flags);
500 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
501 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
502 spin_unlock_irqrestore(&tp->indirect_lock, flags);
503 return val;
504}
505
b401e9e2
MC
506/* usec_wait specifies the wait time in usec when writing to certain registers
507 * where it is unsafe to read back the register without some delay.
508 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
509 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
510 */
511static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
6892914f 512{
63c3a66f 513 if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
b401e9e2
MC
514 /* Non-posted methods */
515 tp->write32(tp, off, val);
516 else {
517 /* Posted method */
518 tg3_write32(tp, off, val);
519 if (usec_wait)
520 udelay(usec_wait);
521 tp->read32(tp, off);
522 }
523 /* Wait again after the read for the posted method to guarantee that
524 * the wait time is met.
525 */
526 if (usec_wait)
527 udelay(usec_wait);
1da177e4
LT
528}
529
09ee929c
MC
530static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
531{
532 tp->write32_mbox(tp, off, val);
63c3a66f 533 if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
6892914f 534 tp->read32_mbox(tp, off);
09ee929c
MC
535}
536
20094930 537static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
1da177e4
LT
538{
539 void __iomem *mbox = tp->regs + off;
540 writel(val, mbox);
63c3a66f 541 if (tg3_flag(tp, TXD_MBOX_HWBUG))
1da177e4 542 writel(val, mbox);
63c3a66f 543 if (tg3_flag(tp, MBOX_WRITE_REORDER))
1da177e4
LT
544 readl(mbox);
545}
546
b5d3772c
MC
547static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
548{
de6f31eb 549 return readl(tp->regs + off + GRCMBOX_BASE);
b5d3772c
MC
550}
551
552static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
553{
554 writel(val, tp->regs + off + GRCMBOX_BASE);
555}
556
c6cdf436 557#define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
09ee929c 558#define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
c6cdf436
MC
559#define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
560#define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
561#define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
20094930 562
c6cdf436
MC
563#define tw32(reg, val) tp->write32(tp, reg, val)
564#define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
565#define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
566#define tr32(reg) tp->read32(tp, reg)
1da177e4
LT
567
568static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
569{
6892914f
MC
570 unsigned long flags;
571
6ff6f81d 572 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
b5d3772c
MC
573 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
574 return;
575
6892914f 576 spin_lock_irqsave(&tp->indirect_lock, flags);
63c3a66f 577 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
bbadf503
MC
578 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
579 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
1da177e4 580
bbadf503
MC
581 /* Always leave this as zero. */
582 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
583 } else {
584 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
585 tw32_f(TG3PCI_MEM_WIN_DATA, val);
28fbef78 586
bbadf503
MC
587 /* Always leave this as zero. */
588 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
589 }
590 spin_unlock_irqrestore(&tp->indirect_lock, flags);
758a6139
DM
591}
592
1da177e4
LT
593static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
594{
6892914f
MC
595 unsigned long flags;
596
6ff6f81d 597 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
b5d3772c
MC
598 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
599 *val = 0;
600 return;
601 }
602
6892914f 603 spin_lock_irqsave(&tp->indirect_lock, flags);
63c3a66f 604 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
bbadf503
MC
605 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
606 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
1da177e4 607
bbadf503
MC
608 /* Always leave this as zero. */
609 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
610 } else {
611 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
612 *val = tr32(TG3PCI_MEM_WIN_DATA);
613
614 /* Always leave this as zero. */
615 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
616 }
6892914f 617 spin_unlock_irqrestore(&tp->indirect_lock, flags);
1da177e4
LT
618}
619
0d3031d9
MC
620static void tg3_ape_lock_init(struct tg3 *tp)
621{
622 int i;
6f5c8f83 623 u32 regbase, bit;
f92d9dc1
MC
624
625 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
626 regbase = TG3_APE_LOCK_GRANT;
627 else
628 regbase = TG3_APE_PER_LOCK_GRANT;
0d3031d9
MC
629
630 /* Make sure the driver hasn't any stale locks. */
78f94dc7
MC
631 for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
632 switch (i) {
633 case TG3_APE_LOCK_PHY0:
634 case TG3_APE_LOCK_PHY1:
635 case TG3_APE_LOCK_PHY2:
636 case TG3_APE_LOCK_PHY3:
637 bit = APE_LOCK_GRANT_DRIVER;
638 break;
639 default:
640 if (!tp->pci_fn)
641 bit = APE_LOCK_GRANT_DRIVER;
642 else
643 bit = 1 << tp->pci_fn;
644 }
645 tg3_ape_write32(tp, regbase + 4 * i, bit);
6f5c8f83
MC
646 }
647
0d3031d9
MC
648}
649
650static int tg3_ape_lock(struct tg3 *tp, int locknum)
651{
652 int i, off;
653 int ret = 0;
6f5c8f83 654 u32 status, req, gnt, bit;
0d3031d9 655
63c3a66f 656 if (!tg3_flag(tp, ENABLE_APE))
0d3031d9
MC
657 return 0;
658
659 switch (locknum) {
6f5c8f83
MC
660 case TG3_APE_LOCK_GPIO:
661 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
662 return 0;
33f401ae
MC
663 case TG3_APE_LOCK_GRC:
664 case TG3_APE_LOCK_MEM:
78f94dc7
MC
665 if (!tp->pci_fn)
666 bit = APE_LOCK_REQ_DRIVER;
667 else
668 bit = 1 << tp->pci_fn;
33f401ae
MC
669 break;
670 default:
671 return -EINVAL;
0d3031d9
MC
672 }
673
f92d9dc1
MC
674 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
675 req = TG3_APE_LOCK_REQ;
676 gnt = TG3_APE_LOCK_GRANT;
677 } else {
678 req = TG3_APE_PER_LOCK_REQ;
679 gnt = TG3_APE_PER_LOCK_GRANT;
680 }
681
0d3031d9
MC
682 off = 4 * locknum;
683
6f5c8f83 684 tg3_ape_write32(tp, req + off, bit);
0d3031d9
MC
685
686 /* Wait for up to 1 millisecond to acquire lock. */
687 for (i = 0; i < 100; i++) {
f92d9dc1 688 status = tg3_ape_read32(tp, gnt + off);
6f5c8f83 689 if (status == bit)
0d3031d9
MC
690 break;
691 udelay(10);
692 }
693
6f5c8f83 694 if (status != bit) {
0d3031d9 695 /* Revoke the lock request. */
6f5c8f83 696 tg3_ape_write32(tp, gnt + off, bit);
0d3031d9
MC
697 ret = -EBUSY;
698 }
699
700 return ret;
701}
702
703static void tg3_ape_unlock(struct tg3 *tp, int locknum)
704{
6f5c8f83 705 u32 gnt, bit;
0d3031d9 706
63c3a66f 707 if (!tg3_flag(tp, ENABLE_APE))
0d3031d9
MC
708 return;
709
710 switch (locknum) {
6f5c8f83
MC
711 case TG3_APE_LOCK_GPIO:
712 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
713 return;
33f401ae
MC
714 case TG3_APE_LOCK_GRC:
715 case TG3_APE_LOCK_MEM:
78f94dc7
MC
716 if (!tp->pci_fn)
717 bit = APE_LOCK_GRANT_DRIVER;
718 else
719 bit = 1 << tp->pci_fn;
33f401ae
MC
720 break;
721 default:
722 return;
0d3031d9
MC
723 }
724
f92d9dc1
MC
725 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
726 gnt = TG3_APE_LOCK_GRANT;
727 else
728 gnt = TG3_APE_PER_LOCK_GRANT;
729
6f5c8f83 730 tg3_ape_write32(tp, gnt + 4 * locknum, bit);
0d3031d9
MC
731}
732
fd6d3f0e
MC
733static void tg3_ape_send_event(struct tg3 *tp, u32 event)
734{
735 int i;
736 u32 apedata;
737
738 /* NCSI does not support APE events */
739 if (tg3_flag(tp, APE_HAS_NCSI))
740 return;
741
742 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
743 if (apedata != APE_SEG_SIG_MAGIC)
744 return;
745
746 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
747 if (!(apedata & APE_FW_STATUS_READY))
748 return;
749
750 /* Wait for up to 1 millisecond for APE to service previous event. */
751 for (i = 0; i < 10; i++) {
752 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
753 return;
754
755 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
756
757 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
758 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
759 event | APE_EVENT_STATUS_EVENT_PENDING);
760
761 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
762
763 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
764 break;
765
766 udelay(100);
767 }
768
769 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
770 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
771}
772
773static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
774{
775 u32 event;
776 u32 apedata;
777
778 if (!tg3_flag(tp, ENABLE_APE))
779 return;
780
781 switch (kind) {
782 case RESET_KIND_INIT:
783 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
784 APE_HOST_SEG_SIG_MAGIC);
785 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
786 APE_HOST_SEG_LEN_MAGIC);
787 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
788 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
789 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
790 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
791 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
792 APE_HOST_BEHAV_NO_PHYLOCK);
793 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
794 TG3_APE_HOST_DRVR_STATE_START);
795
796 event = APE_EVENT_STATUS_STATE_START;
797 break;
798 case RESET_KIND_SHUTDOWN:
799 /* With the interface we are currently using,
800 * APE does not track driver state. Wiping
801 * out the HOST SEGMENT SIGNATURE forces
802 * the APE to assume OS absent status.
803 */
804 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
805
806 if (device_may_wakeup(&tp->pdev->dev) &&
807 tg3_flag(tp, WOL_ENABLE)) {
808 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
809 TG3_APE_HOST_WOL_SPEED_AUTO);
810 apedata = TG3_APE_HOST_DRVR_STATE_WOL;
811 } else
812 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
813
814 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
815
816 event = APE_EVENT_STATUS_STATE_UNLOAD;
817 break;
818 case RESET_KIND_SUSPEND:
819 event = APE_EVENT_STATUS_STATE_SUSPEND;
820 break;
821 default:
822 return;
823 }
824
825 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
826
827 tg3_ape_send_event(tp, event);
828}
829
1da177e4
LT
830static void tg3_disable_ints(struct tg3 *tp)
831{
89aeb3bc
MC
832 int i;
833
1da177e4
LT
834 tw32(TG3PCI_MISC_HOST_CTRL,
835 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
89aeb3bc
MC
836 for (i = 0; i < tp->irq_max; i++)
837 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
1da177e4
LT
838}
839
1da177e4
LT
840static void tg3_enable_ints(struct tg3 *tp)
841{
89aeb3bc 842 int i;
89aeb3bc 843
bbe832c0
MC
844 tp->irq_sync = 0;
845 wmb();
846
1da177e4
LT
847 tw32(TG3PCI_MISC_HOST_CTRL,
848 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
89aeb3bc 849
f89f38b8 850 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
89aeb3bc
MC
851 for (i = 0; i < tp->irq_cnt; i++) {
852 struct tg3_napi *tnapi = &tp->napi[i];
c6cdf436 853
898a56f8 854 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
63c3a66f 855 if (tg3_flag(tp, 1SHOT_MSI))
89aeb3bc 856 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
f19af9c2 857
f89f38b8 858 tp->coal_now |= tnapi->coal_now;
89aeb3bc 859 }
f19af9c2
MC
860
861 /* Force an initial interrupt */
63c3a66f 862 if (!tg3_flag(tp, TAGGED_STATUS) &&
f19af9c2
MC
863 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
864 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
865 else
f89f38b8
MC
866 tw32(HOSTCC_MODE, tp->coal_now);
867
868 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1da177e4
LT
869}
870
17375d25 871static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
04237ddd 872{
17375d25 873 struct tg3 *tp = tnapi->tp;
898a56f8 874 struct tg3_hw_status *sblk = tnapi->hw_status;
04237ddd
MC
875 unsigned int work_exists = 0;
876
877 /* check for phy events */
63c3a66f 878 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
04237ddd
MC
879 if (sblk->status & SD_STATUS_LINK_CHG)
880 work_exists = 1;
881 }
882 /* check for RX/TX work to do */
f3f3f27e 883 if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
8d9d7cfc 884 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
04237ddd
MC
885 work_exists = 1;
886
887 return work_exists;
888}
889
17375d25 890/* tg3_int_reenable
04237ddd
MC
891 * similar to tg3_enable_ints, but it accurately determines whether there
892 * is new work pending and can return without flushing the PIO write
6aa20a22 893 * which reenables interrupts
1da177e4 894 */
17375d25 895static void tg3_int_reenable(struct tg3_napi *tnapi)
1da177e4 896{
17375d25
MC
897 struct tg3 *tp = tnapi->tp;
898
898a56f8 899 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1da177e4
LT
900 mmiowb();
901
fac9b83e
DM
902 /* When doing tagged status, this work check is unnecessary.
903 * The last_tag we write above tells the chip which piece of
904 * work we've completed.
905 */
63c3a66f 906 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
04237ddd 907 tw32(HOSTCC_MODE, tp->coalesce_mode |
fd2ce37f 908 HOSTCC_MODE_ENABLE | tnapi->coal_now);
1da177e4
LT
909}
910
1da177e4
LT
911static void tg3_switch_clocks(struct tg3 *tp)
912{
f6eb9b1f 913 u32 clock_ctrl;
1da177e4
LT
914 u32 orig_clock_ctrl;
915
63c3a66f 916 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
4cf78e4f
MC
917 return;
918
f6eb9b1f
MC
919 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
920
1da177e4
LT
921 orig_clock_ctrl = clock_ctrl;
922 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
923 CLOCK_CTRL_CLKRUN_OENABLE |
924 0x1f);
925 tp->pci_clock_ctrl = clock_ctrl;
926
63c3a66f 927 if (tg3_flag(tp, 5705_PLUS)) {
1da177e4 928 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
b401e9e2
MC
929 tw32_wait_f(TG3PCI_CLOCK_CTRL,
930 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1da177e4
LT
931 }
932 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
b401e9e2
MC
933 tw32_wait_f(TG3PCI_CLOCK_CTRL,
934 clock_ctrl |
935 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
936 40);
937 tw32_wait_f(TG3PCI_CLOCK_CTRL,
938 clock_ctrl | (CLOCK_CTRL_ALTCLK),
939 40);
1da177e4 940 }
b401e9e2 941 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1da177e4
LT
942}
943
944#define PHY_BUSY_LOOPS 5000
945
946static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
947{
948 u32 frame_val;
949 unsigned int loops;
950 int ret;
951
952 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
953 tw32_f(MAC_MI_MODE,
954 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
955 udelay(80);
956 }
957
958 *val = 0x0;
959
882e9793 960 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1da177e4
LT
961 MI_COM_PHY_ADDR_MASK);
962 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
963 MI_COM_REG_ADDR_MASK);
964 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
6aa20a22 965
1da177e4
LT
966 tw32_f(MAC_MI_COM, frame_val);
967
968 loops = PHY_BUSY_LOOPS;
969 while (loops != 0) {
970 udelay(10);
971 frame_val = tr32(MAC_MI_COM);
972
973 if ((frame_val & MI_COM_BUSY) == 0) {
974 udelay(5);
975 frame_val = tr32(MAC_MI_COM);
976 break;
977 }
978 loops -= 1;
979 }
980
981 ret = -EBUSY;
982 if (loops != 0) {
983 *val = frame_val & MI_COM_DATA_MASK;
984 ret = 0;
985 }
986
987 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
988 tw32_f(MAC_MI_MODE, tp->mi_mode);
989 udelay(80);
990 }
991
992 return ret;
993}
994
995static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
996{
997 u32 frame_val;
998 unsigned int loops;
999 int ret;
1000
f07e9af3 1001 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
221c5637 1002 (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
b5d3772c
MC
1003 return 0;
1004
1da177e4
LT
1005 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1006 tw32_f(MAC_MI_MODE,
1007 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1008 udelay(80);
1009 }
1010
882e9793 1011 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1da177e4
LT
1012 MI_COM_PHY_ADDR_MASK);
1013 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1014 MI_COM_REG_ADDR_MASK);
1015 frame_val |= (val & MI_COM_DATA_MASK);
1016 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
6aa20a22 1017
1da177e4
LT
1018 tw32_f(MAC_MI_COM, frame_val);
1019
1020 loops = PHY_BUSY_LOOPS;
1021 while (loops != 0) {
1022 udelay(10);
1023 frame_val = tr32(MAC_MI_COM);
1024 if ((frame_val & MI_COM_BUSY) == 0) {
1025 udelay(5);
1026 frame_val = tr32(MAC_MI_COM);
1027 break;
1028 }
1029 loops -= 1;
1030 }
1031
1032 ret = -EBUSY;
1033 if (loops != 0)
1034 ret = 0;
1035
1036 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1037 tw32_f(MAC_MI_MODE, tp->mi_mode);
1038 udelay(80);
1039 }
1040
1041 return ret;
1042}
1043
b0988c15
MC
1044static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1045{
1046 int err;
1047
1048 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1049 if (err)
1050 goto done;
1051
1052 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1053 if (err)
1054 goto done;
1055
1056 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1057 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1058 if (err)
1059 goto done;
1060
1061 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1062
1063done:
1064 return err;
1065}
1066
1067static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1068{
1069 int err;
1070
1071 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1072 if (err)
1073 goto done;
1074
1075 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1076 if (err)
1077 goto done;
1078
1079 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1080 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1081 if (err)
1082 goto done;
1083
1084 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1085
1086done:
1087 return err;
1088}
1089
1090static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1091{
1092 int err;
1093
1094 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1095 if (!err)
1096 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1097
1098 return err;
1099}
1100
1101static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1102{
1103 int err;
1104
1105 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1106 if (!err)
1107 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1108
1109 return err;
1110}
1111
15ee95c3
MC
1112static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1113{
1114 int err;
1115
1116 err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1117 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1118 MII_TG3_AUXCTL_SHDWSEL_MISC);
1119 if (!err)
1120 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1121
1122 return err;
1123}
1124
b4bd2929
MC
1125static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1126{
1127 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1128 set |= MII_TG3_AUXCTL_MISC_WREN;
1129
1130 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1131}
1132
1d36ba45
MC
1133#define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
1134 tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1135 MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
1136 MII_TG3_AUXCTL_ACTL_TX_6DB)
1137
1138#define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
1139 tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1140 MII_TG3_AUXCTL_ACTL_TX_6DB);
1141
95e2869a
MC
1142static int tg3_bmcr_reset(struct tg3 *tp)
1143{
1144 u32 phy_control;
1145 int limit, err;
1146
1147 /* OK, reset it, and poll the BMCR_RESET bit until it
1148 * clears or we time out.
1149 */
1150 phy_control = BMCR_RESET;
1151 err = tg3_writephy(tp, MII_BMCR, phy_control);
1152 if (err != 0)
1153 return -EBUSY;
1154
1155 limit = 5000;
1156 while (limit--) {
1157 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1158 if (err != 0)
1159 return -EBUSY;
1160
1161 if ((phy_control & BMCR_RESET) == 0) {
1162 udelay(40);
1163 break;
1164 }
1165 udelay(10);
1166 }
d4675b52 1167 if (limit < 0)
95e2869a
MC
1168 return -EBUSY;
1169
1170 return 0;
1171}
1172
158d7abd
MC
1173static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1174{
3d16543d 1175 struct tg3 *tp = bp->priv;
158d7abd
MC
1176 u32 val;
1177
24bb4fb6 1178 spin_lock_bh(&tp->lock);
158d7abd
MC
1179
1180 if (tg3_readphy(tp, reg, &val))
24bb4fb6
MC
1181 val = -EIO;
1182
1183 spin_unlock_bh(&tp->lock);
158d7abd
MC
1184
1185 return val;
1186}
1187
1188static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1189{
3d16543d 1190 struct tg3 *tp = bp->priv;
24bb4fb6 1191 u32 ret = 0;
158d7abd 1192
24bb4fb6 1193 spin_lock_bh(&tp->lock);
158d7abd
MC
1194
1195 if (tg3_writephy(tp, reg, val))
24bb4fb6 1196 ret = -EIO;
158d7abd 1197
24bb4fb6
MC
1198 spin_unlock_bh(&tp->lock);
1199
1200 return ret;
158d7abd
MC
1201}
1202
1203static int tg3_mdio_reset(struct mii_bus *bp)
1204{
1205 return 0;
1206}
1207
9c61d6bc 1208static void tg3_mdio_config_5785(struct tg3 *tp)
a9daf367
MC
1209{
1210 u32 val;
fcb389df 1211 struct phy_device *phydev;
a9daf367 1212
3f0e3ad7 1213 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
fcb389df 1214 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
6a443a0f
MC
1215 case PHY_ID_BCM50610:
1216 case PHY_ID_BCM50610M:
fcb389df
MC
1217 val = MAC_PHYCFG2_50610_LED_MODES;
1218 break;
6a443a0f 1219 case PHY_ID_BCMAC131:
fcb389df
MC
1220 val = MAC_PHYCFG2_AC131_LED_MODES;
1221 break;
6a443a0f 1222 case PHY_ID_RTL8211C:
fcb389df
MC
1223 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1224 break;
6a443a0f 1225 case PHY_ID_RTL8201E:
fcb389df
MC
1226 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1227 break;
1228 default:
a9daf367 1229 return;
fcb389df
MC
1230 }
1231
1232 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1233 tw32(MAC_PHYCFG2, val);
1234
1235 val = tr32(MAC_PHYCFG1);
bb85fbb6
MC
1236 val &= ~(MAC_PHYCFG1_RGMII_INT |
1237 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1238 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
fcb389df
MC
1239 tw32(MAC_PHYCFG1, val);
1240
1241 return;
1242 }
1243
63c3a66f 1244 if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
fcb389df
MC
1245 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1246 MAC_PHYCFG2_FMODE_MASK_MASK |
1247 MAC_PHYCFG2_GMODE_MASK_MASK |
1248 MAC_PHYCFG2_ACT_MASK_MASK |
1249 MAC_PHYCFG2_QUAL_MASK_MASK |
1250 MAC_PHYCFG2_INBAND_ENABLE;
1251
1252 tw32(MAC_PHYCFG2, val);
a9daf367 1253
bb85fbb6
MC
1254 val = tr32(MAC_PHYCFG1);
1255 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1256 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
63c3a66f
JP
1257 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1258 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
a9daf367 1259 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
63c3a66f 1260 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
a9daf367
MC
1261 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1262 }
bb85fbb6
MC
1263 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1264 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1265 tw32(MAC_PHYCFG1, val);
a9daf367 1266
a9daf367
MC
1267 val = tr32(MAC_EXT_RGMII_MODE);
1268 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1269 MAC_RGMII_MODE_RX_QUALITY |
1270 MAC_RGMII_MODE_RX_ACTIVITY |
1271 MAC_RGMII_MODE_RX_ENG_DET |
1272 MAC_RGMII_MODE_TX_ENABLE |
1273 MAC_RGMII_MODE_TX_LOWPWR |
1274 MAC_RGMII_MODE_TX_RESET);
63c3a66f
JP
1275 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1276 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
a9daf367
MC
1277 val |= MAC_RGMII_MODE_RX_INT_B |
1278 MAC_RGMII_MODE_RX_QUALITY |
1279 MAC_RGMII_MODE_RX_ACTIVITY |
1280 MAC_RGMII_MODE_RX_ENG_DET;
63c3a66f 1281 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
a9daf367
MC
1282 val |= MAC_RGMII_MODE_TX_ENABLE |
1283 MAC_RGMII_MODE_TX_LOWPWR |
1284 MAC_RGMII_MODE_TX_RESET;
1285 }
1286 tw32(MAC_EXT_RGMII_MODE, val);
1287}
1288
158d7abd
MC
1289static void tg3_mdio_start(struct tg3 *tp)
1290{
158d7abd
MC
1291 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1292 tw32_f(MAC_MI_MODE, tp->mi_mode);
1293 udelay(80);
a9daf367 1294
63c3a66f 1295 if (tg3_flag(tp, MDIOBUS_INITED) &&
9ea4818d
MC
1296 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1297 tg3_mdio_config_5785(tp);
1298}
1299
1300static int tg3_mdio_init(struct tg3 *tp)
1301{
1302 int i;
1303 u32 reg;
1304 struct phy_device *phydev;
1305
63c3a66f 1306 if (tg3_flag(tp, 5717_PLUS)) {
9c7df915 1307 u32 is_serdes;
882e9793 1308
69f11c99 1309 tp->phy_addr = tp->pci_fn + 1;
882e9793 1310
d1ec96af
MC
1311 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1312 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1313 else
1314 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1315 TG3_CPMU_PHY_STRAP_IS_SERDES;
882e9793
MC
1316 if (is_serdes)
1317 tp->phy_addr += 7;
1318 } else
3f0e3ad7 1319 tp->phy_addr = TG3_PHY_MII_ADDR;
882e9793 1320
158d7abd
MC
1321 tg3_mdio_start(tp);
1322
63c3a66f 1323 if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
158d7abd
MC
1324 return 0;
1325
298cf9be
LB
1326 tp->mdio_bus = mdiobus_alloc();
1327 if (tp->mdio_bus == NULL)
1328 return -ENOMEM;
158d7abd 1329
298cf9be
LB
1330 tp->mdio_bus->name = "tg3 mdio bus";
1331 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
158d7abd 1332 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
298cf9be
LB
1333 tp->mdio_bus->priv = tp;
1334 tp->mdio_bus->parent = &tp->pdev->dev;
1335 tp->mdio_bus->read = &tg3_mdio_read;
1336 tp->mdio_bus->write = &tg3_mdio_write;
1337 tp->mdio_bus->reset = &tg3_mdio_reset;
3f0e3ad7 1338 tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
298cf9be 1339 tp->mdio_bus->irq = &tp->mdio_irq[0];
158d7abd
MC
1340
1341 for (i = 0; i < PHY_MAX_ADDR; i++)
298cf9be 1342 tp->mdio_bus->irq[i] = PHY_POLL;
158d7abd
MC
1343
1344 /* The bus registration will look for all the PHYs on the mdio bus.
1345 * Unfortunately, it does not ensure the PHY is powered up before
1346 * accessing the PHY ID registers. A chip reset is the
1347 * quickest way to bring the device back to an operational state..
1348 */
1349 if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1350 tg3_bmcr_reset(tp);
1351
298cf9be 1352 i = mdiobus_register(tp->mdio_bus);
a9daf367 1353 if (i) {
ab96b241 1354 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
9c61d6bc 1355 mdiobus_free(tp->mdio_bus);
a9daf367
MC
1356 return i;
1357 }
158d7abd 1358
3f0e3ad7 1359 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
a9daf367 1360
9c61d6bc 1361 if (!phydev || !phydev->drv) {
ab96b241 1362 dev_warn(&tp->pdev->dev, "No PHY devices\n");
9c61d6bc
MC
1363 mdiobus_unregister(tp->mdio_bus);
1364 mdiobus_free(tp->mdio_bus);
1365 return -ENODEV;
1366 }
1367
1368 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
6a443a0f 1369 case PHY_ID_BCM57780:
321d32a0 1370 phydev->interface = PHY_INTERFACE_MODE_GMII;
c704dc23 1371 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
321d32a0 1372 break;
6a443a0f
MC
1373 case PHY_ID_BCM50610:
1374 case PHY_ID_BCM50610M:
32e5a8d6 1375 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
c704dc23 1376 PHY_BRCM_RX_REFCLK_UNUSED |
52fae083 1377 PHY_BRCM_DIS_TXCRXC_NOENRGY |
c704dc23 1378 PHY_BRCM_AUTO_PWRDWN_ENABLE;
63c3a66f 1379 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
a9daf367 1380 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
63c3a66f 1381 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
a9daf367 1382 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
63c3a66f 1383 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
a9daf367 1384 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
fcb389df 1385 /* fallthru */
6a443a0f 1386 case PHY_ID_RTL8211C:
fcb389df 1387 phydev->interface = PHY_INTERFACE_MODE_RGMII;
a9daf367 1388 break;
6a443a0f
MC
1389 case PHY_ID_RTL8201E:
1390 case PHY_ID_BCMAC131:
a9daf367 1391 phydev->interface = PHY_INTERFACE_MODE_MII;
cdd4e09d 1392 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
f07e9af3 1393 tp->phy_flags |= TG3_PHYFLG_IS_FET;
a9daf367
MC
1394 break;
1395 }
1396
63c3a66f 1397 tg3_flag_set(tp, MDIOBUS_INITED);
9c61d6bc
MC
1398
1399 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1400 tg3_mdio_config_5785(tp);
a9daf367
MC
1401
1402 return 0;
158d7abd
MC
1403}
1404
1405static void tg3_mdio_fini(struct tg3 *tp)
1406{
63c3a66f
JP
1407 if (tg3_flag(tp, MDIOBUS_INITED)) {
1408 tg3_flag_clear(tp, MDIOBUS_INITED);
298cf9be
LB
1409 mdiobus_unregister(tp->mdio_bus);
1410 mdiobus_free(tp->mdio_bus);
158d7abd
MC
1411 }
1412}
1413
4ba526ce
MC
1414/* tp->lock is held. */
1415static inline void tg3_generate_fw_event(struct tg3 *tp)
1416{
1417 u32 val;
1418
1419 val = tr32(GRC_RX_CPU_EVENT);
1420 val |= GRC_RX_CPU_DRIVER_EVENT;
1421 tw32_f(GRC_RX_CPU_EVENT, val);
1422
1423 tp->last_event_jiffies = jiffies;
1424}
1425
1426#define TG3_FW_EVENT_TIMEOUT_USEC 2500
1427
95e2869a
MC
1428/* tp->lock is held. */
1429static void tg3_wait_for_event_ack(struct tg3 *tp)
1430{
1431 int i;
4ba526ce
MC
1432 unsigned int delay_cnt;
1433 long time_remain;
1434
1435 /* If enough time has passed, no wait is necessary. */
1436 time_remain = (long)(tp->last_event_jiffies + 1 +
1437 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1438 (long)jiffies;
1439 if (time_remain < 0)
1440 return;
1441
1442 /* Check if we can shorten the wait time. */
1443 delay_cnt = jiffies_to_usecs(time_remain);
1444 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1445 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1446 delay_cnt = (delay_cnt >> 3) + 1;
95e2869a 1447
4ba526ce 1448 for (i = 0; i < delay_cnt; i++) {
95e2869a
MC
1449 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1450 break;
4ba526ce 1451 udelay(8);
95e2869a
MC
1452 }
1453}
1454
1455/* tp->lock is held. */
b28f389d 1456static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
95e2869a 1457{
b28f389d 1458 u32 reg, val;
95e2869a
MC
1459
1460 val = 0;
1461 if (!tg3_readphy(tp, MII_BMCR, &reg))
1462 val = reg << 16;
1463 if (!tg3_readphy(tp, MII_BMSR, &reg))
1464 val |= (reg & 0xffff);
b28f389d 1465 *data++ = val;
95e2869a
MC
1466
1467 val = 0;
1468 if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1469 val = reg << 16;
1470 if (!tg3_readphy(tp, MII_LPA, &reg))
1471 val |= (reg & 0xffff);
b28f389d 1472 *data++ = val;
95e2869a
MC
1473
1474 val = 0;
f07e9af3 1475 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
95e2869a
MC
1476 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1477 val = reg << 16;
1478 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1479 val |= (reg & 0xffff);
1480 }
b28f389d 1481 *data++ = val;
95e2869a
MC
1482
1483 if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1484 val = reg << 16;
1485 else
1486 val = 0;
b28f389d
MC
1487 *data++ = val;
1488}
1489
1490/* tp->lock is held. */
1491static void tg3_ump_link_report(struct tg3 *tp)
1492{
1493 u32 data[4];
1494
1495 if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1496 return;
1497
1498 tg3_phy_gather_ump_data(tp, data);
1499
1500 tg3_wait_for_event_ack(tp);
1501
1502 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1503 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1504 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1505 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1506 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1507 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
95e2869a 1508
4ba526ce 1509 tg3_generate_fw_event(tp);
95e2869a
MC
1510}
1511
8d5a89b3
MC
1512/* tp->lock is held. */
1513static void tg3_stop_fw(struct tg3 *tp)
1514{
1515 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1516 /* Wait for RX cpu to ACK the previous event. */
1517 tg3_wait_for_event_ack(tp);
1518
1519 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1520
1521 tg3_generate_fw_event(tp);
1522
1523 /* Wait for RX cpu to ACK this event. */
1524 tg3_wait_for_event_ack(tp);
1525 }
1526}
1527
fd6d3f0e
MC
1528/* tp->lock is held. */
1529static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1530{
1531 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1532 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1533
1534 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1535 switch (kind) {
1536 case RESET_KIND_INIT:
1537 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1538 DRV_STATE_START);
1539 break;
1540
1541 case RESET_KIND_SHUTDOWN:
1542 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1543 DRV_STATE_UNLOAD);
1544 break;
1545
1546 case RESET_KIND_SUSPEND:
1547 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1548 DRV_STATE_SUSPEND);
1549 break;
1550
1551 default:
1552 break;
1553 }
1554 }
1555
1556 if (kind == RESET_KIND_INIT ||
1557 kind == RESET_KIND_SUSPEND)
1558 tg3_ape_driver_state_change(tp, kind);
1559}
1560
1561/* tp->lock is held. */
1562static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1563{
1564 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1565 switch (kind) {
1566 case RESET_KIND_INIT:
1567 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1568 DRV_STATE_START_DONE);
1569 break;
1570
1571 case RESET_KIND_SHUTDOWN:
1572 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1573 DRV_STATE_UNLOAD_DONE);
1574 break;
1575
1576 default:
1577 break;
1578 }
1579 }
1580
1581 if (kind == RESET_KIND_SHUTDOWN)
1582 tg3_ape_driver_state_change(tp, kind);
1583}
1584
1585/* tp->lock is held. */
1586static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1587{
1588 if (tg3_flag(tp, ENABLE_ASF)) {
1589 switch (kind) {
1590 case RESET_KIND_INIT:
1591 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1592 DRV_STATE_START);
1593 break;
1594
1595 case RESET_KIND_SHUTDOWN:
1596 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1597 DRV_STATE_UNLOAD);
1598 break;
1599
1600 case RESET_KIND_SUSPEND:
1601 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1602 DRV_STATE_SUSPEND);
1603 break;
1604
1605 default:
1606 break;
1607 }
1608 }
1609}
1610
1611static int tg3_poll_fw(struct tg3 *tp)
1612{
1613 int i;
1614 u32 val;
1615
1616 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1617 /* Wait up to 20ms for init done. */
1618 for (i = 0; i < 200; i++) {
1619 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1620 return 0;
1621 udelay(100);
1622 }
1623 return -ENODEV;
1624 }
1625
1626 /* Wait for firmware initialization to complete. */
1627 for (i = 0; i < 100000; i++) {
1628 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1629 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1630 break;
1631 udelay(10);
1632 }
1633
1634 /* Chip might not be fitted with firmware. Some Sun onboard
1635 * parts are configured like that. So don't signal the timeout
1636 * of the above loop as an error, but do report the lack of
1637 * running firmware once.
1638 */
1639 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1640 tg3_flag_set(tp, NO_FWARE_REPORTED);
1641
1642 netdev_info(tp->dev, "No firmware running\n");
1643 }
1644
1645 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
1646 /* The 57765 A0 needs a little more
1647 * time to do some important work.
1648 */
1649 mdelay(10);
1650 }
1651
1652 return 0;
1653}
1654
95e2869a
MC
1655static void tg3_link_report(struct tg3 *tp)
1656{
1657 if (!netif_carrier_ok(tp->dev)) {
05dbe005 1658 netif_info(tp, link, tp->dev, "Link is down\n");
95e2869a
MC
1659 tg3_ump_link_report(tp);
1660 } else if (netif_msg_link(tp)) {
05dbe005
JP
1661 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1662 (tp->link_config.active_speed == SPEED_1000 ?
1663 1000 :
1664 (tp->link_config.active_speed == SPEED_100 ?
1665 100 : 10)),
1666 (tp->link_config.active_duplex == DUPLEX_FULL ?
1667 "full" : "half"));
1668
1669 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1670 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1671 "on" : "off",
1672 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1673 "on" : "off");
47007831
MC
1674
1675 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1676 netdev_info(tp->dev, "EEE is %s\n",
1677 tp->setlpicnt ? "enabled" : "disabled");
1678
95e2869a
MC
1679 tg3_ump_link_report(tp);
1680 }
1681}
1682
95e2869a
MC
1683static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1684{
1685 u16 miireg;
1686
e18ce346 1687 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
95e2869a 1688 miireg = ADVERTISE_1000XPAUSE;
e18ce346 1689 else if (flow_ctrl & FLOW_CTRL_TX)
95e2869a 1690 miireg = ADVERTISE_1000XPSE_ASYM;
e18ce346 1691 else if (flow_ctrl & FLOW_CTRL_RX)
95e2869a
MC
1692 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1693 else
1694 miireg = 0;
1695
1696 return miireg;
1697}
1698
95e2869a
MC
1699static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1700{
1701 u8 cap = 0;
1702
f3791cdf
MC
1703 if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1704 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1705 } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1706 if (lcladv & ADVERTISE_1000XPAUSE)
1707 cap = FLOW_CTRL_RX;
1708 if (rmtadv & ADVERTISE_1000XPAUSE)
e18ce346 1709 cap = FLOW_CTRL_TX;
95e2869a
MC
1710 }
1711
1712 return cap;
1713}
1714
f51f3562 1715static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
95e2869a 1716{
b02fd9e3 1717 u8 autoneg;
f51f3562 1718 u8 flowctrl = 0;
95e2869a
MC
1719 u32 old_rx_mode = tp->rx_mode;
1720 u32 old_tx_mode = tp->tx_mode;
1721
63c3a66f 1722 if (tg3_flag(tp, USE_PHYLIB))
3f0e3ad7 1723 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
b02fd9e3
MC
1724 else
1725 autoneg = tp->link_config.autoneg;
1726
63c3a66f 1727 if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
f07e9af3 1728 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
f51f3562 1729 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
95e2869a 1730 else
bc02ff95 1731 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
f51f3562
MC
1732 } else
1733 flowctrl = tp->link_config.flowctrl;
95e2869a 1734
f51f3562 1735 tp->link_config.active_flowctrl = flowctrl;
95e2869a 1736
e18ce346 1737 if (flowctrl & FLOW_CTRL_RX)
95e2869a
MC
1738 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1739 else
1740 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1741
f51f3562 1742 if (old_rx_mode != tp->rx_mode)
95e2869a 1743 tw32_f(MAC_RX_MODE, tp->rx_mode);
95e2869a 1744
e18ce346 1745 if (flowctrl & FLOW_CTRL_TX)
95e2869a
MC
1746 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1747 else
1748 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1749
f51f3562 1750 if (old_tx_mode != tp->tx_mode)
95e2869a 1751 tw32_f(MAC_TX_MODE, tp->tx_mode);
95e2869a
MC
1752}
1753
b02fd9e3
MC
1754static void tg3_adjust_link(struct net_device *dev)
1755{
1756 u8 oldflowctrl, linkmesg = 0;
1757 u32 mac_mode, lcl_adv, rmt_adv;
1758 struct tg3 *tp = netdev_priv(dev);
3f0e3ad7 1759 struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
b02fd9e3 1760
24bb4fb6 1761 spin_lock_bh(&tp->lock);
b02fd9e3
MC
1762
1763 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1764 MAC_MODE_HALF_DUPLEX);
1765
1766 oldflowctrl = tp->link_config.active_flowctrl;
1767
1768 if (phydev->link) {
1769 lcl_adv = 0;
1770 rmt_adv = 0;
1771
1772 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1773 mac_mode |= MAC_MODE_PORT_MODE_MII;
c3df0748
MC
1774 else if (phydev->speed == SPEED_1000 ||
1775 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
b02fd9e3 1776 mac_mode |= MAC_MODE_PORT_MODE_GMII;
c3df0748
MC
1777 else
1778 mac_mode |= MAC_MODE_PORT_MODE_MII;
b02fd9e3
MC
1779
1780 if (phydev->duplex == DUPLEX_HALF)
1781 mac_mode |= MAC_MODE_HALF_DUPLEX;
1782 else {
f88788f0 1783 lcl_adv = mii_advertise_flowctrl(
b02fd9e3
MC
1784 tp->link_config.flowctrl);
1785
1786 if (phydev->pause)
1787 rmt_adv = LPA_PAUSE_CAP;
1788 if (phydev->asym_pause)
1789 rmt_adv |= LPA_PAUSE_ASYM;
1790 }
1791
1792 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1793 } else
1794 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1795
1796 if (mac_mode != tp->mac_mode) {
1797 tp->mac_mode = mac_mode;
1798 tw32_f(MAC_MODE, tp->mac_mode);
1799 udelay(40);
1800 }
1801
fcb389df
MC
1802 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1803 if (phydev->speed == SPEED_10)
1804 tw32(MAC_MI_STAT,
1805 MAC_MI_STAT_10MBPS_MODE |
1806 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1807 else
1808 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1809 }
1810
b02fd9e3
MC
1811 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1812 tw32(MAC_TX_LENGTHS,
1813 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1814 (6 << TX_LENGTHS_IPG_SHIFT) |
1815 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1816 else
1817 tw32(MAC_TX_LENGTHS,
1818 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1819 (6 << TX_LENGTHS_IPG_SHIFT) |
1820 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1821
1822 if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1823 (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1824 phydev->speed != tp->link_config.active_speed ||
1825 phydev->duplex != tp->link_config.active_duplex ||
1826 oldflowctrl != tp->link_config.active_flowctrl)
c6cdf436 1827 linkmesg = 1;
b02fd9e3
MC
1828
1829 tp->link_config.active_speed = phydev->speed;
1830 tp->link_config.active_duplex = phydev->duplex;
1831
24bb4fb6 1832 spin_unlock_bh(&tp->lock);
b02fd9e3
MC
1833
1834 if (linkmesg)
1835 tg3_link_report(tp);
1836}
1837
1838static int tg3_phy_init(struct tg3 *tp)
1839{
1840 struct phy_device *phydev;
1841
f07e9af3 1842 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
b02fd9e3
MC
1843 return 0;
1844
1845 /* Bring the PHY back to a known state. */
1846 tg3_bmcr_reset(tp);
1847
3f0e3ad7 1848 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
b02fd9e3
MC
1849
1850 /* Attach the MAC to the PHY. */
fb28ad35 1851 phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
a9daf367 1852 phydev->dev_flags, phydev->interface);
b02fd9e3 1853 if (IS_ERR(phydev)) {
ab96b241 1854 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
b02fd9e3
MC
1855 return PTR_ERR(phydev);
1856 }
1857
b02fd9e3 1858 /* Mask with MAC supported features. */
9c61d6bc
MC
1859 switch (phydev->interface) {
1860 case PHY_INTERFACE_MODE_GMII:
1861 case PHY_INTERFACE_MODE_RGMII:
f07e9af3 1862 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
321d32a0
MC
1863 phydev->supported &= (PHY_GBIT_FEATURES |
1864 SUPPORTED_Pause |
1865 SUPPORTED_Asym_Pause);
1866 break;
1867 }
1868 /* fallthru */
9c61d6bc
MC
1869 case PHY_INTERFACE_MODE_MII:
1870 phydev->supported &= (PHY_BASIC_FEATURES |
1871 SUPPORTED_Pause |
1872 SUPPORTED_Asym_Pause);
1873 break;
1874 default:
3f0e3ad7 1875 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
9c61d6bc
MC
1876 return -EINVAL;
1877 }
1878
f07e9af3 1879 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
b02fd9e3
MC
1880
1881 phydev->advertising = phydev->supported;
1882
b02fd9e3
MC
1883 return 0;
1884}
1885
1886static void tg3_phy_start(struct tg3 *tp)
1887{
1888 struct phy_device *phydev;
1889
f07e9af3 1890 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
b02fd9e3
MC
1891 return;
1892
3f0e3ad7 1893 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
b02fd9e3 1894
80096068
MC
1895 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
1896 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
b02fd9e3
MC
1897 phydev->speed = tp->link_config.orig_speed;
1898 phydev->duplex = tp->link_config.orig_duplex;
1899 phydev->autoneg = tp->link_config.orig_autoneg;
1900 phydev->advertising = tp->link_config.orig_advertising;
1901 }
1902
1903 phy_start(phydev);
1904
1905 phy_start_aneg(phydev);
1906}
1907
1908static void tg3_phy_stop(struct tg3 *tp)
1909{
f07e9af3 1910 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
b02fd9e3
MC
1911 return;
1912
3f0e3ad7 1913 phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
b02fd9e3
MC
1914}
1915
1916static void tg3_phy_fini(struct tg3 *tp)
1917{
f07e9af3 1918 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
3f0e3ad7 1919 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
f07e9af3 1920 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
b02fd9e3
MC
1921 }
1922}
1923
941ec90f
MC
1924static int tg3_phy_set_extloopbk(struct tg3 *tp)
1925{
1926 int err;
1927 u32 val;
1928
1929 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
1930 return 0;
1931
1932 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
1933 /* Cannot do read-modify-write on 5401 */
1934 err = tg3_phy_auxctl_write(tp,
1935 MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1936 MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
1937 0x4c20);
1938 goto done;
1939 }
1940
1941 err = tg3_phy_auxctl_read(tp,
1942 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1943 if (err)
1944 return err;
1945
1946 val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
1947 err = tg3_phy_auxctl_write(tp,
1948 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
1949
1950done:
1951 return err;
1952}
1953
7f97a4bd
MC
1954static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1955{
1956 u32 phytest;
1957
1958 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1959 u32 phy;
1960
1961 tg3_writephy(tp, MII_TG3_FET_TEST,
1962 phytest | MII_TG3_FET_SHADOW_EN);
1963 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1964 if (enable)
1965 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1966 else
1967 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1968 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1969 }
1970 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1971 }
1972}
1973
6833c043
MC
1974static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1975{
1976 u32 reg;
1977
63c3a66f
JP
1978 if (!tg3_flag(tp, 5705_PLUS) ||
1979 (tg3_flag(tp, 5717_PLUS) &&
f07e9af3 1980 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
6833c043
MC
1981 return;
1982
f07e9af3 1983 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
7f97a4bd
MC
1984 tg3_phy_fet_toggle_apd(tp, enable);
1985 return;
1986 }
1987
6833c043
MC
1988 reg = MII_TG3_MISC_SHDW_WREN |
1989 MII_TG3_MISC_SHDW_SCR5_SEL |
1990 MII_TG3_MISC_SHDW_SCR5_LPED |
1991 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
1992 MII_TG3_MISC_SHDW_SCR5_SDTL |
1993 MII_TG3_MISC_SHDW_SCR5_C125OE;
1994 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
1995 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
1996
1997 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1998
1999
2000 reg = MII_TG3_MISC_SHDW_WREN |
2001 MII_TG3_MISC_SHDW_APD_SEL |
2002 MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2003 if (enable)
2004 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2005
2006 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2007}
2008
9ef8ca99
MC
2009static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
2010{
2011 u32 phy;
2012
63c3a66f 2013 if (!tg3_flag(tp, 5705_PLUS) ||
f07e9af3 2014 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
9ef8ca99
MC
2015 return;
2016
f07e9af3 2017 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
9ef8ca99
MC
2018 u32 ephy;
2019
535ef6e1
MC
2020 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2021 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2022
2023 tg3_writephy(tp, MII_TG3_FET_TEST,
2024 ephy | MII_TG3_FET_SHADOW_EN);
2025 if (!tg3_readphy(tp, reg, &phy)) {
9ef8ca99 2026 if (enable)
535ef6e1 2027 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
9ef8ca99 2028 else
535ef6e1
MC
2029 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2030 tg3_writephy(tp, reg, phy);
9ef8ca99 2031 }
535ef6e1 2032 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
9ef8ca99
MC
2033 }
2034 } else {
15ee95c3
MC
2035 int ret;
2036
2037 ret = tg3_phy_auxctl_read(tp,
2038 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2039 if (!ret) {
9ef8ca99
MC
2040 if (enable)
2041 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2042 else
2043 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
b4bd2929
MC
2044 tg3_phy_auxctl_write(tp,
2045 MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
9ef8ca99
MC
2046 }
2047 }
2048}
2049
1da177e4
LT
2050static void tg3_phy_set_wirespeed(struct tg3 *tp)
2051{
15ee95c3 2052 int ret;
1da177e4
LT
2053 u32 val;
2054
f07e9af3 2055 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
1da177e4
LT
2056 return;
2057
15ee95c3
MC
2058 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2059 if (!ret)
b4bd2929
MC
2060 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2061 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
1da177e4
LT
2062}
2063
b2a5c19c
MC
2064static void tg3_phy_apply_otp(struct tg3 *tp)
2065{
2066 u32 otp, phy;
2067
2068 if (!tp->phy_otp)
2069 return;
2070
2071 otp = tp->phy_otp;
2072
1d36ba45
MC
2073 if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
2074 return;
b2a5c19c
MC
2075
2076 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2077 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2078 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2079
2080 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2081 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2082 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2083
2084 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2085 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2086 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2087
2088 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2089 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2090
2091 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2092 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2093
2094 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2095 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2096 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2097
1d36ba45 2098 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
b2a5c19c
MC
2099}
2100
52b02d04
MC
2101static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
2102{
2103 u32 val;
2104
2105 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2106 return;
2107
2108 tp->setlpicnt = 0;
2109
2110 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2111 current_link_up == 1 &&
a6b68dab
MC
2112 tp->link_config.active_duplex == DUPLEX_FULL &&
2113 (tp->link_config.active_speed == SPEED_100 ||
2114 tp->link_config.active_speed == SPEED_1000)) {
52b02d04
MC
2115 u32 eeectl;
2116
2117 if (tp->link_config.active_speed == SPEED_1000)
2118 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2119 else
2120 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2121
2122 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2123
3110f5f5
MC
2124 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
2125 TG3_CL45_D7_EEERES_STAT, &val);
52b02d04 2126
b0c5943f
MC
2127 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2128 val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
52b02d04
MC
2129 tp->setlpicnt = 2;
2130 }
2131
2132 if (!tp->setlpicnt) {
b715ce94
MC
2133 if (current_link_up == 1 &&
2134 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2135 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2136 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2137 }
2138
52b02d04
MC
2139 val = tr32(TG3_CPMU_EEE_MODE);
2140 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2141 }
2142}
2143
b0c5943f
MC
2144static void tg3_phy_eee_enable(struct tg3 *tp)
2145{
2146 u32 val;
2147
2148 if (tp->link_config.active_speed == SPEED_1000 &&
2149 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2150 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
55086ad9 2151 tg3_flag(tp, 57765_CLASS)) &&
b0c5943f 2152 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
b715ce94
MC
2153 val = MII_TG3_DSP_TAP26_ALNOKO |
2154 MII_TG3_DSP_TAP26_RMRXSTO;
2155 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
b0c5943f
MC
2156 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2157 }
2158
2159 val = tr32(TG3_CPMU_EEE_MODE);
2160 tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2161}
2162
1da177e4
LT
2163static int tg3_wait_macro_done(struct tg3 *tp)
2164{
2165 int limit = 100;
2166
2167 while (limit--) {
2168 u32 tmp32;
2169
f08aa1a8 2170 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
1da177e4
LT
2171 if ((tmp32 & 0x1000) == 0)
2172 break;
2173 }
2174 }
d4675b52 2175 if (limit < 0)
1da177e4
LT
2176 return -EBUSY;
2177
2178 return 0;
2179}
2180
2181static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2182{
2183 static const u32 test_pat[4][6] = {
2184 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2185 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2186 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2187 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2188 };
2189 int chan;
2190
2191 for (chan = 0; chan < 4; chan++) {
2192 int i;
2193
2194 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2195 (chan * 0x2000) | 0x0200);
f08aa1a8 2196 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1da177e4
LT
2197
2198 for (i = 0; i < 6; i++)
2199 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2200 test_pat[chan][i]);
2201
f08aa1a8 2202 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1da177e4
LT
2203 if (tg3_wait_macro_done(tp)) {
2204 *resetp = 1;
2205 return -EBUSY;
2206 }
2207
2208 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2209 (chan * 0x2000) | 0x0200);
f08aa1a8 2210 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
1da177e4
LT
2211 if (tg3_wait_macro_done(tp)) {
2212 *resetp = 1;
2213 return -EBUSY;
2214 }
2215
f08aa1a8 2216 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
1da177e4
LT
2217 if (tg3_wait_macro_done(tp)) {
2218 *resetp = 1;
2219 return -EBUSY;
2220 }
2221
2222 for (i = 0; i < 6; i += 2) {
2223 u32 low, high;
2224
2225 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2226 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2227 tg3_wait_macro_done(tp)) {
2228 *resetp = 1;
2229 return -EBUSY;
2230 }
2231 low &= 0x7fff;
2232 high &= 0x000f;
2233 if (low != test_pat[chan][i] ||
2234 high != test_pat[chan][i+1]) {
2235 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2236 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2237 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2238
2239 return -EBUSY;
2240 }
2241 }
2242 }
2243
2244 return 0;
2245}
2246
2247static int tg3_phy_reset_chanpat(struct tg3 *tp)
2248{
2249 int chan;
2250
2251 for (chan = 0; chan < 4; chan++) {
2252 int i;
2253
2254 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2255 (chan * 0x2000) | 0x0200);
f08aa1a8 2256 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1da177e4
LT
2257 for (i = 0; i < 6; i++)
2258 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
f08aa1a8 2259 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1da177e4
LT
2260 if (tg3_wait_macro_done(tp))
2261 return -EBUSY;
2262 }
2263
2264 return 0;
2265}
2266
2267static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2268{
2269 u32 reg32, phy9_orig;
2270 int retries, do_phy_reset, err;
2271
2272 retries = 10;
2273 do_phy_reset = 1;
2274 do {
2275 if (do_phy_reset) {
2276 err = tg3_bmcr_reset(tp);
2277 if (err)
2278 return err;
2279 do_phy_reset = 0;
2280 }
2281
2282 /* Disable transmitter and interrupt. */
2283 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2284 continue;
2285
2286 reg32 |= 0x3000;
2287 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2288
2289 /* Set full-duplex, 1000 mbps. */
2290 tg3_writephy(tp, MII_BMCR,
221c5637 2291 BMCR_FULLDPLX | BMCR_SPEED1000);
1da177e4
LT
2292
2293 /* Set to master mode. */
221c5637 2294 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
1da177e4
LT
2295 continue;
2296
221c5637
MC
2297 tg3_writephy(tp, MII_CTRL1000,
2298 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
1da177e4 2299
1d36ba45
MC
2300 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
2301 if (err)
2302 return err;
1da177e4
LT
2303
2304 /* Block the PHY control access. */
6ee7c0a0 2305 tg3_phydsp_write(tp, 0x8005, 0x0800);
1da177e4
LT
2306
2307 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2308 if (!err)
2309 break;
2310 } while (--retries);
2311
2312 err = tg3_phy_reset_chanpat(tp);
2313 if (err)
2314 return err;
2315
6ee7c0a0 2316 tg3_phydsp_write(tp, 0x8005, 0x0000);
1da177e4
LT
2317
2318 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
f08aa1a8 2319 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
1da177e4 2320
1d36ba45 2321 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1da177e4 2322
221c5637 2323 tg3_writephy(tp, MII_CTRL1000, phy9_orig);
1da177e4
LT
2324
2325 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2326 reg32 &= ~0x3000;
2327 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2328 } else if (!err)
2329 err = -EBUSY;
2330
2331 return err;
2332}
2333
2334/* This will reset the tigon3 PHY if there is no valid
2335 * link unless the FORCE argument is non-zero.
2336 */
2337static int tg3_phy_reset(struct tg3 *tp)
2338{
f833c4c1 2339 u32 val, cpmuctrl;
1da177e4
LT
2340 int err;
2341
60189ddf 2342 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
60189ddf
MC
2343 val = tr32(GRC_MISC_CFG);
2344 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2345 udelay(40);
2346 }
f833c4c1
MC
2347 err = tg3_readphy(tp, MII_BMSR, &val);
2348 err |= tg3_readphy(tp, MII_BMSR, &val);
1da177e4
LT
2349 if (err != 0)
2350 return -EBUSY;
2351
c8e1e82b
MC
2352 if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
2353 netif_carrier_off(tp->dev);
2354 tg3_link_report(tp);
2355 }
2356
1da177e4
LT
2357 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2358 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2359 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2360 err = tg3_phy_reset_5703_4_5(tp);
2361 if (err)
2362 return err;
2363 goto out;
2364 }
2365
b2a5c19c
MC
2366 cpmuctrl = 0;
2367 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2368 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2369 cpmuctrl = tr32(TG3_CPMU_CTRL);
2370 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2371 tw32(TG3_CPMU_CTRL,
2372 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2373 }
2374
1da177e4
LT
2375 err = tg3_bmcr_reset(tp);
2376 if (err)
2377 return err;
2378
b2a5c19c 2379 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
f833c4c1
MC
2380 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2381 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
b2a5c19c
MC
2382
2383 tw32(TG3_CPMU_CTRL, cpmuctrl);
2384 }
2385
bcb37f6c
MC
2386 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2387 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
ce057f01
MC
2388 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2389 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2390 CPMU_LSPD_1000MB_MACCLK_12_5) {
2391 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2392 udelay(40);
2393 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2394 }
2395 }
2396
63c3a66f 2397 if (tg3_flag(tp, 5717_PLUS) &&
f07e9af3 2398 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
ecf1410b
MC
2399 return 0;
2400
b2a5c19c
MC
2401 tg3_phy_apply_otp(tp);
2402
f07e9af3 2403 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
6833c043
MC
2404 tg3_phy_toggle_apd(tp, true);
2405 else
2406 tg3_phy_toggle_apd(tp, false);
2407
1da177e4 2408out:
1d36ba45
MC
2409 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2410 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
6ee7c0a0
MC
2411 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2412 tg3_phydsp_write(tp, 0x000a, 0x0323);
1d36ba45 2413 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1da177e4 2414 }
1d36ba45 2415
f07e9af3 2416 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
f08aa1a8
MC
2417 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2418 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
1da177e4 2419 }
1d36ba45 2420
f07e9af3 2421 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
1d36ba45
MC
2422 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2423 tg3_phydsp_write(tp, 0x000a, 0x310b);
2424 tg3_phydsp_write(tp, 0x201f, 0x9506);
2425 tg3_phydsp_write(tp, 0x401f, 0x14e2);
2426 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2427 }
f07e9af3 2428 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
1d36ba45
MC
2429 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2430 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2431 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2432 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2433 tg3_writephy(tp, MII_TG3_TEST1,
2434 MII_TG3_TEST1_TRIM_EN | 0x4);
2435 } else
2436 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2437
2438 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2439 }
c424cb24 2440 }
1d36ba45 2441
1da177e4
LT
2442 /* Set Extended packet length bit (bit 14) on all chips that */
2443 /* support jumbo frames */
79eb6904 2444 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
1da177e4 2445 /* Cannot do read-modify-write on 5401 */
b4bd2929 2446 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
63c3a66f 2447 } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
1da177e4 2448 /* Set bit 14 with read-modify-write to preserve other bits */
15ee95c3
MC
2449 err = tg3_phy_auxctl_read(tp,
2450 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2451 if (!err)
b4bd2929
MC
2452 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2453 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
1da177e4
LT
2454 }
2455
2456 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2457 * jumbo frames transmission.
2458 */
63c3a66f 2459 if (tg3_flag(tp, JUMBO_CAPABLE)) {
f833c4c1 2460 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
c6cdf436 2461 tg3_writephy(tp, MII_TG3_EXT_CTRL,
f833c4c1 2462 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1da177e4
LT
2463 }
2464
715116a1 2465 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
715116a1 2466 /* adjust output voltage */
535ef6e1 2467 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
715116a1
MC
2468 }
2469
9ef8ca99 2470 tg3_phy_toggle_automdix(tp, 1);
1da177e4
LT
2471 tg3_phy_set_wirespeed(tp);
2472 return 0;
2473}
2474
3a1e19d3
MC
2475#define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2476#define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2477#define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2478 TG3_GPIO_MSG_NEED_VAUX)
2479#define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2480 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2481 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2482 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2483 (TG3_GPIO_MSG_DRVR_PRES << 12))
2484
2485#define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2486 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2487 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2488 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2489 (TG3_GPIO_MSG_NEED_VAUX << 12))
2490
2491static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2492{
2493 u32 status, shift;
2494
2495 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2496 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2497 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2498 else
2499 status = tr32(TG3_CPMU_DRV_STATUS);
2500
2501 shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2502 status &= ~(TG3_GPIO_MSG_MASK << shift);
2503 status |= (newstat << shift);
2504
2505 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2506 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2507 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2508 else
2509 tw32(TG3_CPMU_DRV_STATUS, status);
2510
2511 return status >> TG3_APE_GPIO_MSG_SHIFT;
2512}
2513
520b2756
MC
2514static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2515{
2516 if (!tg3_flag(tp, IS_NIC))
2517 return 0;
2518
3a1e19d3
MC
2519 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2520 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2521 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2522 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2523 return -EIO;
520b2756 2524
3a1e19d3
MC
2525 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2526
2527 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2528 TG3_GRC_LCLCTL_PWRSW_DELAY);
2529
2530 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2531 } else {
2532 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2533 TG3_GRC_LCLCTL_PWRSW_DELAY);
2534 }
6f5c8f83 2535
520b2756
MC
2536 return 0;
2537}
2538
2539static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2540{
2541 u32 grc_local_ctrl;
2542
2543 if (!tg3_flag(tp, IS_NIC) ||
2544 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2545 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)
2546 return;
2547
2548 grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2549
2550 tw32_wait_f(GRC_LOCAL_CTRL,
2551 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2552 TG3_GRC_LCLCTL_PWRSW_DELAY);
2553
2554 tw32_wait_f(GRC_LOCAL_CTRL,
2555 grc_local_ctrl,
2556 TG3_GRC_LCLCTL_PWRSW_DELAY);
2557
2558 tw32_wait_f(GRC_LOCAL_CTRL,
2559 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2560 TG3_GRC_LCLCTL_PWRSW_DELAY);
2561}
2562
2563static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2564{
2565 if (!tg3_flag(tp, IS_NIC))
2566 return;
2567
2568 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2569 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2570 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2571 (GRC_LCLCTRL_GPIO_OE0 |
2572 GRC_LCLCTRL_GPIO_OE1 |
2573 GRC_LCLCTRL_GPIO_OE2 |
2574 GRC_LCLCTRL_GPIO_OUTPUT0 |
2575 GRC_LCLCTRL_GPIO_OUTPUT1),
2576 TG3_GRC_LCLCTL_PWRSW_DELAY);
2577 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2578 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2579 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2580 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2581 GRC_LCLCTRL_GPIO_OE1 |
2582 GRC_LCLCTRL_GPIO_OE2 |
2583 GRC_LCLCTRL_GPIO_OUTPUT0 |
2584 GRC_LCLCTRL_GPIO_OUTPUT1 |
2585 tp->grc_local_ctrl;
2586 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2587 TG3_GRC_LCLCTL_PWRSW_DELAY);
2588
2589 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2590 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2591 TG3_GRC_LCLCTL_PWRSW_DELAY);
2592
2593 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2594 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2595 TG3_GRC_LCLCTL_PWRSW_DELAY);
2596 } else {
2597 u32 no_gpio2;
2598 u32 grc_local_ctrl = 0;
2599
2600 /* Workaround to prevent overdrawing Amps. */
2601 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2602 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2603 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2604 grc_local_ctrl,
2605 TG3_GRC_LCLCTL_PWRSW_DELAY);
2606 }
2607
2608 /* On 5753 and variants, GPIO2 cannot be used. */
2609 no_gpio2 = tp->nic_sram_data_cfg &
2610 NIC_SRAM_DATA_CFG_NO_GPIO2;
2611
2612 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2613 GRC_LCLCTRL_GPIO_OE1 |
2614 GRC_LCLCTRL_GPIO_OE2 |
2615 GRC_LCLCTRL_GPIO_OUTPUT1 |
2616 GRC_LCLCTRL_GPIO_OUTPUT2;
2617 if (no_gpio2) {
2618 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2619 GRC_LCLCTRL_GPIO_OUTPUT2);
2620 }
2621 tw32_wait_f(GRC_LOCAL_CTRL,
2622 tp->grc_local_ctrl | grc_local_ctrl,
2623 TG3_GRC_LCLCTL_PWRSW_DELAY);
2624
2625 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2626
2627 tw32_wait_f(GRC_LOCAL_CTRL,
2628 tp->grc_local_ctrl | grc_local_ctrl,
2629 TG3_GRC_LCLCTL_PWRSW_DELAY);
2630
2631 if (!no_gpio2) {
2632 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2633 tw32_wait_f(GRC_LOCAL_CTRL,
2634 tp->grc_local_ctrl | grc_local_ctrl,
2635 TG3_GRC_LCLCTL_PWRSW_DELAY);
2636 }
2637 }
3a1e19d3
MC
2638}
2639
cd0d7228 2640static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
3a1e19d3
MC
2641{
2642 u32 msg = 0;
2643
2644 /* Serialize power state transitions */
2645 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2646 return;
2647
cd0d7228 2648 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
3a1e19d3
MC
2649 msg = TG3_GPIO_MSG_NEED_VAUX;
2650
2651 msg = tg3_set_function_status(tp, msg);
2652
2653 if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2654 goto done;
6f5c8f83 2655
3a1e19d3
MC
2656 if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2657 tg3_pwrsrc_switch_to_vaux(tp);
2658 else
2659 tg3_pwrsrc_die_with_vmain(tp);
2660
2661done:
6f5c8f83 2662 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
520b2756
MC
2663}
2664
cd0d7228 2665static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
1da177e4 2666{
683644b7 2667 bool need_vaux = false;
1da177e4 2668
334355aa 2669 /* The GPIOs do something completely different on 57765. */
55086ad9 2670 if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
1da177e4
LT
2671 return;
2672
3a1e19d3
MC
2673 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2674 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2675 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
cd0d7228
MC
2676 tg3_frob_aux_power_5717(tp, include_wol ?
2677 tg3_flag(tp, WOL_ENABLE) != 0 : 0);
3a1e19d3
MC
2678 return;
2679 }
2680
2681 if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
8c2dc7e1
MC
2682 struct net_device *dev_peer;
2683
2684 dev_peer = pci_get_drvdata(tp->pdev_peer);
683644b7 2685
bc1c7567 2686 /* remove_one() may have been run on the peer. */
683644b7
MC
2687 if (dev_peer) {
2688 struct tg3 *tp_peer = netdev_priv(dev_peer);
2689
63c3a66f 2690 if (tg3_flag(tp_peer, INIT_COMPLETE))
683644b7
MC
2691 return;
2692
cd0d7228 2693 if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
63c3a66f 2694 tg3_flag(tp_peer, ENABLE_ASF))
683644b7
MC
2695 need_vaux = true;
2696 }
1da177e4
LT
2697 }
2698
cd0d7228
MC
2699 if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2700 tg3_flag(tp, ENABLE_ASF))
683644b7
MC
2701 need_vaux = true;
2702
520b2756
MC
2703 if (need_vaux)
2704 tg3_pwrsrc_switch_to_vaux(tp);
2705 else
2706 tg3_pwrsrc_die_with_vmain(tp);
1da177e4
LT
2707}
2708
e8f3f6ca
MC
2709static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2710{
2711 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2712 return 1;
79eb6904 2713 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
e8f3f6ca
MC
2714 if (speed != SPEED_10)
2715 return 1;
2716 } else if (speed == SPEED_10)
2717 return 1;
2718
2719 return 0;
2720}
2721
0a459aac 2722static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
15c3b696 2723{
ce057f01
MC
2724 u32 val;
2725
f07e9af3 2726 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
5129724a
MC
2727 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2728 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2729 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2730
2731 sg_dig_ctrl |=
2732 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2733 tw32(SG_DIG_CTRL, sg_dig_ctrl);
2734 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2735 }
3f7045c1 2736 return;
5129724a 2737 }
3f7045c1 2738
60189ddf 2739 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
60189ddf
MC
2740 tg3_bmcr_reset(tp);
2741 val = tr32(GRC_MISC_CFG);
2742 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2743 udelay(40);
2744 return;
f07e9af3 2745 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
0e5f784c
MC
2746 u32 phytest;
2747 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2748 u32 phy;
2749
2750 tg3_writephy(tp, MII_ADVERTISE, 0);
2751 tg3_writephy(tp, MII_BMCR,
2752 BMCR_ANENABLE | BMCR_ANRESTART);
2753
2754 tg3_writephy(tp, MII_TG3_FET_TEST,
2755 phytest | MII_TG3_FET_SHADOW_EN);
2756 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2757 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2758 tg3_writephy(tp,
2759 MII_TG3_FET_SHDW_AUXMODE4,
2760 phy);
2761 }
2762 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2763 }
2764 return;
0a459aac 2765 } else if (do_low_power) {
715116a1
MC
2766 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2767 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
0a459aac 2768
b4bd2929
MC
2769 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2770 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2771 MII_TG3_AUXCTL_PCTL_VREG_11V;
2772 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
715116a1 2773 }
3f7045c1 2774
15c3b696
MC
2775 /* The PHY should not be powered down on some chips because
2776 * of bugs.
2777 */
2778 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2779 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2780 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
f07e9af3 2781 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
15c3b696 2782 return;
ce057f01 2783
bcb37f6c
MC
2784 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2785 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
ce057f01
MC
2786 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2787 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2788 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2789 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2790 }
2791
15c3b696
MC
2792 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2793}
2794
ffbcfed4
MC
2795/* tp->lock is held. */
2796static int tg3_nvram_lock(struct tg3 *tp)
2797{
63c3a66f 2798 if (tg3_flag(tp, NVRAM)) {
ffbcfed4
MC
2799 int i;
2800
2801 if (tp->nvram_lock_cnt == 0) {
2802 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2803 for (i = 0; i < 8000; i++) {
2804 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2805 break;
2806 udelay(20);
2807 }
2808 if (i == 8000) {
2809 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2810 return -ENODEV;
2811 }
2812 }
2813 tp->nvram_lock_cnt++;
2814 }
2815 return 0;
2816}
2817
2818/* tp->lock is held. */
2819static void tg3_nvram_unlock(struct tg3 *tp)
2820{
63c3a66f 2821 if (tg3_flag(tp, NVRAM)) {
ffbcfed4
MC
2822 if (tp->nvram_lock_cnt > 0)
2823 tp->nvram_lock_cnt--;
2824 if (tp->nvram_lock_cnt == 0)
2825 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2826 }
2827}
2828
2829/* tp->lock is held. */
2830static void tg3_enable_nvram_access(struct tg3 *tp)
2831{
63c3a66f 2832 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
ffbcfed4
MC
2833 u32 nvaccess = tr32(NVRAM_ACCESS);
2834
2835 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2836 }
2837}
2838
2839/* tp->lock is held. */
2840static void tg3_disable_nvram_access(struct tg3 *tp)
2841{
63c3a66f 2842 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
ffbcfed4
MC
2843 u32 nvaccess = tr32(NVRAM_ACCESS);
2844
2845 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2846 }
2847}
2848
2849static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2850 u32 offset, u32 *val)
2851{
2852 u32 tmp;
2853 int i;
2854
2855 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2856 return -EINVAL;
2857
2858 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2859 EEPROM_ADDR_DEVID_MASK |
2860 EEPROM_ADDR_READ);
2861 tw32(GRC_EEPROM_ADDR,
2862 tmp |
2863 (0 << EEPROM_ADDR_DEVID_SHIFT) |
2864 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2865 EEPROM_ADDR_ADDR_MASK) |
2866 EEPROM_ADDR_READ | EEPROM_ADDR_START);
2867
2868 for (i = 0; i < 1000; i++) {
2869 tmp = tr32(GRC_EEPROM_ADDR);
2870
2871 if (tmp & EEPROM_ADDR_COMPLETE)
2872 break;
2873 msleep(1);
2874 }
2875 if (!(tmp & EEPROM_ADDR_COMPLETE))
2876 return -EBUSY;
2877
62cedd11
MC
2878 tmp = tr32(GRC_EEPROM_DATA);
2879
2880 /*
2881 * The data will always be opposite the native endian
2882 * format. Perform a blind byteswap to compensate.
2883 */
2884 *val = swab32(tmp);
2885
ffbcfed4
MC
2886 return 0;
2887}
2888
2889#define NVRAM_CMD_TIMEOUT 10000
2890
2891static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2892{
2893 int i;
2894
2895 tw32(NVRAM_CMD, nvram_cmd);
2896 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2897 udelay(10);
2898 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2899 udelay(10);
2900 break;
2901 }
2902 }
2903
2904 if (i == NVRAM_CMD_TIMEOUT)
2905 return -EBUSY;
2906
2907 return 0;
2908}
2909
2910static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2911{
63c3a66f
JP
2912 if (tg3_flag(tp, NVRAM) &&
2913 tg3_flag(tp, NVRAM_BUFFERED) &&
2914 tg3_flag(tp, FLASH) &&
2915 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
ffbcfed4
MC
2916 (tp->nvram_jedecnum == JEDEC_ATMEL))
2917
2918 addr = ((addr / tp->nvram_pagesize) <<
2919 ATMEL_AT45DB0X1B_PAGE_POS) +
2920 (addr % tp->nvram_pagesize);
2921
2922 return addr;
2923}
2924
2925static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2926{
63c3a66f
JP
2927 if (tg3_flag(tp, NVRAM) &&
2928 tg3_flag(tp, NVRAM_BUFFERED) &&
2929 tg3_flag(tp, FLASH) &&
2930 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
ffbcfed4
MC
2931 (tp->nvram_jedecnum == JEDEC_ATMEL))
2932
2933 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2934 tp->nvram_pagesize) +
2935 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2936
2937 return addr;
2938}
2939
e4f34110
MC
2940/* NOTE: Data read in from NVRAM is byteswapped according to
2941 * the byteswapping settings for all other register accesses.
2942 * tg3 devices are BE devices, so on a BE machine, the data
2943 * returned will be exactly as it is seen in NVRAM. On a LE
2944 * machine, the 32-bit value will be byteswapped.
2945 */
ffbcfed4
MC
2946static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2947{
2948 int ret;
2949
63c3a66f 2950 if (!tg3_flag(tp, NVRAM))
ffbcfed4
MC
2951 return tg3_nvram_read_using_eeprom(tp, offset, val);
2952
2953 offset = tg3_nvram_phys_addr(tp, offset);
2954
2955 if (offset > NVRAM_ADDR_MSK)
2956 return -EINVAL;
2957
2958 ret = tg3_nvram_lock(tp);
2959 if (ret)
2960 return ret;
2961
2962 tg3_enable_nvram_access(tp);
2963
2964 tw32(NVRAM_ADDR, offset);
2965 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2966 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2967
2968 if (ret == 0)
e4f34110 2969 *val = tr32(NVRAM_RDDATA);
ffbcfed4
MC
2970
2971 tg3_disable_nvram_access(tp);
2972
2973 tg3_nvram_unlock(tp);
2974
2975 return ret;
2976}
2977
a9dc529d
MC
2978/* Ensures NVRAM data is in bytestream format. */
2979static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
ffbcfed4
MC
2980{
2981 u32 v;
a9dc529d 2982 int res = tg3_nvram_read(tp, offset, &v);
ffbcfed4 2983 if (!res)
a9dc529d 2984 *val = cpu_to_be32(v);
ffbcfed4
MC
2985 return res;
2986}
2987
dbe9b92a
MC
2988static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
2989 u32 offset, u32 len, u8 *buf)
2990{
2991 int i, j, rc = 0;
2992 u32 val;
2993
2994 for (i = 0; i < len; i += 4) {
2995 u32 addr;
2996 __be32 data;
2997
2998 addr = offset + i;
2999
3000 memcpy(&data, buf + i, 4);
3001
3002 /*
3003 * The SEEPROM interface expects the data to always be opposite
3004 * the native endian format. We accomplish this by reversing
3005 * all the operations that would have been performed on the
3006 * data from a call to tg3_nvram_read_be32().
3007 */
3008 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3009
3010 val = tr32(GRC_EEPROM_ADDR);
3011 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3012
3013 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3014 EEPROM_ADDR_READ);
3015 tw32(GRC_EEPROM_ADDR, val |
3016 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3017 (addr & EEPROM_ADDR_ADDR_MASK) |
3018 EEPROM_ADDR_START |
3019 EEPROM_ADDR_WRITE);
3020
3021 for (j = 0; j < 1000; j++) {
3022 val = tr32(GRC_EEPROM_ADDR);
3023
3024 if (val & EEPROM_ADDR_COMPLETE)
3025 break;
3026 msleep(1);
3027 }
3028 if (!(val & EEPROM_ADDR_COMPLETE)) {
3029 rc = -EBUSY;
3030 break;
3031 }
3032 }
3033
3034 return rc;
3035}
3036
3037/* offset and length are dword aligned */
3038static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3039 u8 *buf)
3040{
3041 int ret = 0;
3042 u32 pagesize = tp->nvram_pagesize;
3043 u32 pagemask = pagesize - 1;
3044 u32 nvram_cmd;
3045 u8 *tmp;
3046
3047 tmp = kmalloc(pagesize, GFP_KERNEL);
3048 if (tmp == NULL)
3049 return -ENOMEM;
3050
3051 while (len) {
3052 int j;
3053 u32 phy_addr, page_off, size;
3054
3055 phy_addr = offset & ~pagemask;
3056
3057 for (j = 0; j < pagesize; j += 4) {
3058 ret = tg3_nvram_read_be32(tp, phy_addr + j,
3059 (__be32 *) (tmp + j));
3060 if (ret)
3061 break;
3062 }
3063 if (ret)
3064 break;
3065
3066 page_off = offset & pagemask;
3067 size = pagesize;
3068 if (len < size)
3069 size = len;
3070
3071 len -= size;
3072
3073 memcpy(tmp + page_off, buf, size);
3074
3075 offset = offset + (pagesize - page_off);
3076
3077 tg3_enable_nvram_access(tp);
3078
3079 /*
3080 * Before we can erase the flash page, we need
3081 * to issue a special "write enable" command.
3082 */
3083 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3084
3085 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3086 break;
3087
3088 /* Erase the target page */
3089 tw32(NVRAM_ADDR, phy_addr);
3090
3091 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3092 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3093
3094 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3095 break;
3096
3097 /* Issue another write enable to start the write. */
3098 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3099
3100 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3101 break;
3102
3103 for (j = 0; j < pagesize; j += 4) {
3104 __be32 data;
3105
3106 data = *((__be32 *) (tmp + j));
3107
3108 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3109
3110 tw32(NVRAM_ADDR, phy_addr + j);
3111
3112 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3113 NVRAM_CMD_WR;
3114
3115 if (j == 0)
3116 nvram_cmd |= NVRAM_CMD_FIRST;
3117 else if (j == (pagesize - 4))
3118 nvram_cmd |= NVRAM_CMD_LAST;
3119
3120 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3121 if (ret)
3122 break;
3123 }
3124 if (ret)
3125 break;
3126 }
3127
3128 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3129 tg3_nvram_exec_cmd(tp, nvram_cmd);
3130
3131 kfree(tmp);
3132
3133 return ret;
3134}
3135
3136/* offset and length are dword aligned */
3137static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3138 u8 *buf)
3139{
3140 int i, ret = 0;
3141
3142 for (i = 0; i < len; i += 4, offset += 4) {
3143 u32 page_off, phy_addr, nvram_cmd;
3144 __be32 data;
3145
3146 memcpy(&data, buf + i, 4);
3147 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3148
3149 page_off = offset % tp->nvram_pagesize;
3150
3151 phy_addr = tg3_nvram_phys_addr(tp, offset);
3152
dbe9b92a
MC
3153 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3154
3155 if (page_off == 0 || i == 0)
3156 nvram_cmd |= NVRAM_CMD_FIRST;
3157 if (page_off == (tp->nvram_pagesize - 4))
3158 nvram_cmd |= NVRAM_CMD_LAST;
3159
3160 if (i == (len - 4))
3161 nvram_cmd |= NVRAM_CMD_LAST;
3162
42278224
MC
3163 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3164 !tg3_flag(tp, FLASH) ||
3165 !tg3_flag(tp, 57765_PLUS))
3166 tw32(NVRAM_ADDR, phy_addr);
3167
dbe9b92a
MC
3168 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
3169 !tg3_flag(tp, 5755_PLUS) &&
3170 (tp->nvram_jedecnum == JEDEC_ST) &&
3171 (nvram_cmd & NVRAM_CMD_FIRST)) {
3172 u32 cmd;
3173
3174 cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3175 ret = tg3_nvram_exec_cmd(tp, cmd);
3176 if (ret)
3177 break;
3178 }
3179 if (!tg3_flag(tp, FLASH)) {
3180 /* We always do complete word writes to eeprom. */
3181 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3182 }
3183
3184 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3185 if (ret)
3186 break;
3187 }
3188 return ret;
3189}
3190
3191/* offset and length are dword aligned */
3192static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3193{
3194 int ret;
3195
3196 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3197 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3198 ~GRC_LCLCTRL_GPIO_OUTPUT1);
3199 udelay(40);
3200 }
3201
3202 if (!tg3_flag(tp, NVRAM)) {
3203 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3204 } else {
3205 u32 grc_mode;
3206
3207 ret = tg3_nvram_lock(tp);
3208 if (ret)
3209 return ret;
3210
3211 tg3_enable_nvram_access(tp);
3212 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3213 tw32(NVRAM_WRITE1, 0x406);
3214
3215 grc_mode = tr32(GRC_MODE);
3216 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3217
3218 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3219 ret = tg3_nvram_write_block_buffered(tp, offset, len,
3220 buf);
3221 } else {
3222 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3223 buf);
3224 }
3225
3226 grc_mode = tr32(GRC_MODE);
3227 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3228
3229 tg3_disable_nvram_access(tp);
3230 tg3_nvram_unlock(tp);
3231 }
3232
3233 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3234 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3235 udelay(40);
3236 }
3237
3238 return ret;
3239}
3240
997b4f13
MC
3241#define RX_CPU_SCRATCH_BASE 0x30000
3242#define RX_CPU_SCRATCH_SIZE 0x04000
3243#define TX_CPU_SCRATCH_BASE 0x34000
3244#define TX_CPU_SCRATCH_SIZE 0x04000
3245
3246/* tp->lock is held. */
3247static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
3248{
3249 int i;
3250
3251 BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3252
3253 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3254 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3255
3256 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3257 return 0;
3258 }
3259 if (offset == RX_CPU_BASE) {
3260 for (i = 0; i < 10000; i++) {
3261 tw32(offset + CPU_STATE, 0xffffffff);
3262 tw32(offset + CPU_MODE, CPU_MODE_HALT);
3263 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3264 break;
3265 }
3266
3267 tw32(offset + CPU_STATE, 0xffffffff);
3268 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
3269 udelay(10);
3270 } else {
3271 for (i = 0; i < 10000; i++) {
3272 tw32(offset + CPU_STATE, 0xffffffff);
3273 tw32(offset + CPU_MODE, CPU_MODE_HALT);
3274 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3275 break;
3276 }
3277 }
3278
3279 if (i >= 10000) {
3280 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3281 __func__, offset == RX_CPU_BASE ? "RX" : "TX");
3282 return -ENODEV;
3283 }
3284
3285 /* Clear firmware's nvram arbitration. */
3286 if (tg3_flag(tp, NVRAM))
3287 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3288 return 0;
3289}
3290
3291struct fw_info {
3292 unsigned int fw_base;
3293 unsigned int fw_len;
3294 const __be32 *fw_data;
3295};
3296
3297/* tp->lock is held. */
3298static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3299 u32 cpu_scratch_base, int cpu_scratch_size,
3300 struct fw_info *info)
3301{
3302 int err, lock_err, i;
3303 void (*write_op)(struct tg3 *, u32, u32);
3304
3305 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3306 netdev_err(tp->dev,
3307 "%s: Trying to load TX cpu firmware which is 5705\n",
3308 __func__);
3309 return -EINVAL;
3310 }
3311
3312 if (tg3_flag(tp, 5705_PLUS))
3313 write_op = tg3_write_mem;
3314 else
3315 write_op = tg3_write_indirect_reg32;
3316
3317 /* It is possible that bootcode is still loading at this point.
3318 * Get the nvram lock first before halting the cpu.
3319 */
3320 lock_err = tg3_nvram_lock(tp);
3321 err = tg3_halt_cpu(tp, cpu_base);
3322 if (!lock_err)
3323 tg3_nvram_unlock(tp);
3324 if (err)
3325 goto out;
3326
3327 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3328 write_op(tp, cpu_scratch_base + i, 0);
3329 tw32(cpu_base + CPU_STATE, 0xffffffff);
3330 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
3331 for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
3332 write_op(tp, (cpu_scratch_base +
3333 (info->fw_base & 0xffff) +
3334 (i * sizeof(u32))),
3335 be32_to_cpu(info->fw_data[i]));
3336
3337 err = 0;
3338
3339out:
3340 return err;
3341}
3342
3343/* tp->lock is held. */
3344static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3345{
3346 struct fw_info info;
3347 const __be32 *fw_data;
3348 int err, i;
3349
3350 fw_data = (void *)tp->fw->data;
3351
3352 /* Firmware blob starts with version numbers, followed by
3353 start address and length. We are setting complete length.
3354 length = end_address_of_bss - start_address_of_text.
3355 Remainder is the blob to be loaded contiguously
3356 from start address. */
3357
3358 info.fw_base = be32_to_cpu(fw_data[1]);
3359 info.fw_len = tp->fw->size - 12;
3360 info.fw_data = &fw_data[3];
3361
3362 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3363 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3364 &info);
3365 if (err)
3366 return err;
3367
3368 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3369 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3370 &info);
3371 if (err)
3372 return err;
3373
3374 /* Now startup only the RX cpu. */
3375 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3376 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3377
3378 for (i = 0; i < 5; i++) {
3379 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
3380 break;
3381 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3382 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
3383 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3384 udelay(1000);
3385 }
3386 if (i >= 5) {
3387 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3388 "should be %08x\n", __func__,
3389 tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
3390 return -ENODEV;
3391 }
3392 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3393 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
3394
3395 return 0;
3396}
3397
3398/* tp->lock is held. */
3399static int tg3_load_tso_firmware(struct tg3 *tp)
3400{
3401 struct fw_info info;
3402 const __be32 *fw_data;
3403 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3404 int err, i;
3405
3406 if (tg3_flag(tp, HW_TSO_1) ||
3407 tg3_flag(tp, HW_TSO_2) ||
3408 tg3_flag(tp, HW_TSO_3))
3409 return 0;
3410
3411 fw_data = (void *)tp->fw->data;
3412
3413 /* Firmware blob starts with version numbers, followed by
3414 start address and length. We are setting complete length.
3415 length = end_address_of_bss - start_address_of_text.
3416 Remainder is the blob to be loaded contiguously
3417 from start address. */
3418
3419 info.fw_base = be32_to_cpu(fw_data[1]);
3420 cpu_scratch_size = tp->fw_len;
3421 info.fw_len = tp->fw->size - 12;
3422 info.fw_data = &fw_data[3];
3423
3424 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
3425 cpu_base = RX_CPU_BASE;
3426 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3427 } else {
3428 cpu_base = TX_CPU_BASE;
3429 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3430 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3431 }
3432
3433 err = tg3_load_firmware_cpu(tp, cpu_base,
3434 cpu_scratch_base, cpu_scratch_size,
3435 &info);
3436 if (err)
3437 return err;
3438
3439 /* Now startup the cpu. */
3440 tw32(cpu_base + CPU_STATE, 0xffffffff);
3441 tw32_f(cpu_base + CPU_PC, info.fw_base);
3442
3443 for (i = 0; i < 5; i++) {
3444 if (tr32(cpu_base + CPU_PC) == info.fw_base)
3445 break;
3446 tw32(cpu_base + CPU_STATE, 0xffffffff);
3447 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3448 tw32_f(cpu_base + CPU_PC, info.fw_base);
3449 udelay(1000);
3450 }
3451 if (i >= 5) {
3452 netdev_err(tp->dev,
3453 "%s fails to set CPU PC, is %08x should be %08x\n",
3454 __func__, tr32(cpu_base + CPU_PC), info.fw_base);
3455 return -ENODEV;
3456 }
3457 tw32(cpu_base + CPU_STATE, 0xffffffff);
3458 tw32_f(cpu_base + CPU_MODE, 0x00000000);
3459 return 0;
3460}
3461
3462
3f007891
MC
3463/* tp->lock is held. */
3464static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
3465{
3466 u32 addr_high, addr_low;
3467 int i;
3468
3469 addr_high = ((tp->dev->dev_addr[0] << 8) |
3470 tp->dev->dev_addr[1]);
3471 addr_low = ((tp->dev->dev_addr[2] << 24) |
3472 (tp->dev->dev_addr[3] << 16) |
3473 (tp->dev->dev_addr[4] << 8) |
3474 (tp->dev->dev_addr[5] << 0));
3475 for (i = 0; i < 4; i++) {
3476 if (i == 1 && skip_mac_1)
3477 continue;
3478 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3479 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3480 }
3481
3482 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3483 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
3484 for (i = 0; i < 12; i++) {
3485 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3486 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3487 }
3488 }
3489
3490 addr_high = (tp->dev->dev_addr[0] +
3491 tp->dev->dev_addr[1] +
3492 tp->dev->dev_addr[2] +
3493 tp->dev->dev_addr[3] +
3494 tp->dev->dev_addr[4] +
3495 tp->dev->dev_addr[5]) &
3496 TX_BACKOFF_SEED_MASK;
3497 tw32(MAC_TX_BACKOFF_SEED, addr_high);
3498}
3499
c866b7ea 3500static void tg3_enable_register_access(struct tg3 *tp)
1da177e4 3501{
c866b7ea
RW
3502 /*
3503 * Make sure register accesses (indirect or otherwise) will function
3504 * correctly.
1da177e4
LT
3505 */
3506 pci_write_config_dword(tp->pdev,
c866b7ea
RW
3507 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3508}
1da177e4 3509
c866b7ea
RW
3510static int tg3_power_up(struct tg3 *tp)
3511{
bed9829f 3512 int err;
8c6bda1a 3513
bed9829f 3514 tg3_enable_register_access(tp);
1da177e4 3515
bed9829f
MC
3516 err = pci_set_power_state(tp->pdev, PCI_D0);
3517 if (!err) {
3518 /* Switch out of Vaux if it is a NIC */
3519 tg3_pwrsrc_switch_to_vmain(tp);
3520 } else {
3521 netdev_err(tp->dev, "Transition to D0 failed\n");
3522 }
1da177e4 3523
bed9829f 3524 return err;
c866b7ea 3525}
1da177e4 3526
4b409522
MC
3527static int tg3_setup_phy(struct tg3 *, int);
3528
c866b7ea
RW
3529static int tg3_power_down_prepare(struct tg3 *tp)
3530{
3531 u32 misc_host_ctrl;
3532 bool device_should_wake, do_low_power;
3533
3534 tg3_enable_register_access(tp);
5e7dfd0f
MC
3535
3536 /* Restore the CLKREQ setting. */
63c3a66f 3537 if (tg3_flag(tp, CLKREQ_BUG)) {
5e7dfd0f
MC
3538 u16 lnkctl;
3539
3540 pci_read_config_word(tp->pdev,
708ebb3a 3541 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
5e7dfd0f
MC
3542 &lnkctl);
3543 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
3544 pci_write_config_word(tp->pdev,
708ebb3a 3545 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
5e7dfd0f
MC
3546 lnkctl);
3547 }
3548
1da177e4
LT
3549 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
3550 tw32(TG3PCI_MISC_HOST_CTRL,
3551 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
3552
c866b7ea 3553 device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
63c3a66f 3554 tg3_flag(tp, WOL_ENABLE);
05ac4cb7 3555
63c3a66f 3556 if (tg3_flag(tp, USE_PHYLIB)) {
0a459aac 3557 do_low_power = false;
f07e9af3 3558 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
80096068 3559 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
b02fd9e3 3560 struct phy_device *phydev;
0a459aac 3561 u32 phyid, advertising;
b02fd9e3 3562
3f0e3ad7 3563 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
b02fd9e3 3564
80096068 3565 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
b02fd9e3
MC
3566
3567 tp->link_config.orig_speed = phydev->speed;
3568 tp->link_config.orig_duplex = phydev->duplex;
3569 tp->link_config.orig_autoneg = phydev->autoneg;
3570 tp->link_config.orig_advertising = phydev->advertising;
3571
3572 advertising = ADVERTISED_TP |
3573 ADVERTISED_Pause |
3574 ADVERTISED_Autoneg |
3575 ADVERTISED_10baseT_Half;
3576
63c3a66f
JP
3577 if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
3578 if (tg3_flag(tp, WOL_SPEED_100MB))
b02fd9e3
MC
3579 advertising |=
3580 ADVERTISED_100baseT_Half |
3581 ADVERTISED_100baseT_Full |
3582 ADVERTISED_10baseT_Full;
3583 else
3584 advertising |= ADVERTISED_10baseT_Full;
3585 }
3586
3587 phydev->advertising = advertising;
3588
3589 phy_start_aneg(phydev);
0a459aac
MC
3590
3591 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
6a443a0f
MC
3592 if (phyid != PHY_ID_BCMAC131) {
3593 phyid &= PHY_BCM_OUI_MASK;
3594 if (phyid == PHY_BCM_OUI_1 ||
3595 phyid == PHY_BCM_OUI_2 ||
3596 phyid == PHY_BCM_OUI_3)
0a459aac
MC
3597 do_low_power = true;
3598 }
b02fd9e3 3599 }
dd477003 3600 } else {
2023276e 3601 do_low_power = true;
0a459aac 3602
80096068
MC
3603 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3604 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
dd477003
MC
3605 tp->link_config.orig_speed = tp->link_config.speed;
3606 tp->link_config.orig_duplex = tp->link_config.duplex;
3607 tp->link_config.orig_autoneg = tp->link_config.autoneg;
3608 }
1da177e4 3609
f07e9af3 3610 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
dd477003
MC
3611 tp->link_config.speed = SPEED_10;
3612 tp->link_config.duplex = DUPLEX_HALF;
3613 tp->link_config.autoneg = AUTONEG_ENABLE;
3614 tg3_setup_phy(tp, 0);
3615 }
1da177e4
LT
3616 }
3617
b5d3772c
MC
3618 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3619 u32 val;
3620
3621 val = tr32(GRC_VCPU_EXT_CTRL);
3622 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
63c3a66f 3623 } else if (!tg3_flag(tp, ENABLE_ASF)) {
6921d201
MC
3624 int i;
3625 u32 val;
3626
3627 for (i = 0; i < 200; i++) {
3628 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
3629 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
3630 break;
3631 msleep(1);
3632 }
3633 }
63c3a66f 3634 if (tg3_flag(tp, WOL_CAP))
a85feb8c
GZ
3635 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
3636 WOL_DRV_STATE_SHUTDOWN |
3637 WOL_DRV_WOL |
3638 WOL_SET_MAGIC_PKT);
6921d201 3639
05ac4cb7 3640 if (device_should_wake) {
1da177e4
LT
3641 u32 mac_mode;
3642
f07e9af3 3643 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
b4bd2929
MC
3644 if (do_low_power &&
3645 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
3646 tg3_phy_auxctl_write(tp,
3647 MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
3648 MII_TG3_AUXCTL_PCTL_WOL_EN |
3649 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3650 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
dd477003
MC
3651 udelay(40);
3652 }
1da177e4 3653
f07e9af3 3654 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3f7045c1
MC
3655 mac_mode = MAC_MODE_PORT_MODE_GMII;
3656 else
3657 mac_mode = MAC_MODE_PORT_MODE_MII;
1da177e4 3658
e8f3f6ca
MC
3659 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
3660 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3661 ASIC_REV_5700) {
63c3a66f 3662 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
e8f3f6ca
MC
3663 SPEED_100 : SPEED_10;
3664 if (tg3_5700_link_polarity(tp, speed))
3665 mac_mode |= MAC_MODE_LINK_POLARITY;
3666 else
3667 mac_mode &= ~MAC_MODE_LINK_POLARITY;
3668 }
1da177e4
LT
3669 } else {
3670 mac_mode = MAC_MODE_PORT_MODE_TBI;
3671 }
3672
63c3a66f 3673 if (!tg3_flag(tp, 5750_PLUS))
1da177e4
LT
3674 tw32(MAC_LED_CTRL, tp->led_ctrl);
3675
05ac4cb7 3676 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
63c3a66f
JP
3677 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
3678 (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
05ac4cb7 3679 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
1da177e4 3680
63c3a66f 3681 if (tg3_flag(tp, ENABLE_APE))
d2394e6b
MC
3682 mac_mode |= MAC_MODE_APE_TX_EN |
3683 MAC_MODE_APE_RX_EN |
3684 MAC_MODE_TDE_ENABLE;
3bda1258 3685
1da177e4
LT
3686 tw32_f(MAC_MODE, mac_mode);
3687 udelay(100);
3688
3689 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
3690 udelay(10);
3691 }
3692
63c3a66f 3693 if (!tg3_flag(tp, WOL_SPEED_100MB) &&
1da177e4
LT
3694 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3695 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
3696 u32 base_val;
3697
3698 base_val = tp->pci_clock_ctrl;
3699 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
3700 CLOCK_CTRL_TXCLK_DISABLE);
3701
b401e9e2
MC
3702 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
3703 CLOCK_CTRL_PWRDOWN_PLL133, 40);
63c3a66f
JP
3704 } else if (tg3_flag(tp, 5780_CLASS) ||
3705 tg3_flag(tp, CPMU_PRESENT) ||
6ff6f81d 3706 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
4cf78e4f 3707 /* do nothing */
63c3a66f 3708 } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
1da177e4
LT
3709 u32 newbits1, newbits2;
3710
3711 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3712 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3713 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
3714 CLOCK_CTRL_TXCLK_DISABLE |
3715 CLOCK_CTRL_ALTCLK);
3716 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
63c3a66f 3717 } else if (tg3_flag(tp, 5705_PLUS)) {
1da177e4
LT
3718 newbits1 = CLOCK_CTRL_625_CORE;
3719 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
3720 } else {
3721 newbits1 = CLOCK_CTRL_ALTCLK;
3722 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3723 }
3724
b401e9e2
MC
3725 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
3726 40);
1da177e4 3727
b401e9e2
MC
3728 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
3729 40);
1da177e4 3730
63c3a66f 3731 if (!tg3_flag(tp, 5705_PLUS)) {
1da177e4
LT
3732 u32 newbits3;
3733
3734 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3735 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3736 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
3737 CLOCK_CTRL_TXCLK_DISABLE |
3738 CLOCK_CTRL_44MHZ_CORE);
3739 } else {
3740 newbits3 = CLOCK_CTRL_44MHZ_CORE;
3741 }
3742
b401e9e2
MC
3743 tw32_wait_f(TG3PCI_CLOCK_CTRL,
3744 tp->pci_clock_ctrl | newbits3, 40);
1da177e4
LT
3745 }
3746 }
3747
63c3a66f 3748 if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
0a459aac 3749 tg3_power_down_phy(tp, do_low_power);
6921d201 3750
cd0d7228 3751 tg3_frob_aux_power(tp, true);
1da177e4
LT
3752
3753 /* Workaround for unstable PLL clock */
3754 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
3755 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
3756 u32 val = tr32(0x7d00);
3757
3758 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
3759 tw32(0x7d00, val);
63c3a66f 3760 if (!tg3_flag(tp, ENABLE_ASF)) {
ec41c7df
MC
3761 int err;
3762
3763 err = tg3_nvram_lock(tp);
1da177e4 3764 tg3_halt_cpu(tp, RX_CPU_BASE);
ec41c7df
MC
3765 if (!err)
3766 tg3_nvram_unlock(tp);
6921d201 3767 }
1da177e4
LT
3768 }
3769
bbadf503
MC
3770 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
3771
c866b7ea
RW
3772 return 0;
3773}
12dac075 3774
c866b7ea
RW
3775static void tg3_power_down(struct tg3 *tp)
3776{
3777 tg3_power_down_prepare(tp);
1da177e4 3778
63c3a66f 3779 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
c866b7ea 3780 pci_set_power_state(tp->pdev, PCI_D3hot);
1da177e4
LT
3781}
3782
1da177e4
LT
3783static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
3784{
3785 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
3786 case MII_TG3_AUX_STAT_10HALF:
3787 *speed = SPEED_10;
3788 *duplex = DUPLEX_HALF;
3789 break;
3790
3791 case MII_TG3_AUX_STAT_10FULL:
3792 *speed = SPEED_10;
3793 *duplex = DUPLEX_FULL;
3794 break;
3795
3796 case MII_TG3_AUX_STAT_100HALF:
3797 *speed = SPEED_100;
3798 *duplex = DUPLEX_HALF;
3799 break;
3800
3801 case MII_TG3_AUX_STAT_100FULL:
3802 *speed = SPEED_100;
3803 *duplex = DUPLEX_FULL;
3804 break;
3805
3806 case MII_TG3_AUX_STAT_1000HALF:
3807 *speed = SPEED_1000;
3808 *duplex = DUPLEX_HALF;
3809 break;
3810
3811 case MII_TG3_AUX_STAT_1000FULL:
3812 *speed = SPEED_1000;
3813 *duplex = DUPLEX_FULL;
3814 break;
3815
3816 default:
f07e9af3 3817 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
715116a1
MC
3818 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
3819 SPEED_10;
3820 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
3821 DUPLEX_HALF;
3822 break;
3823 }
1da177e4
LT
3824 *speed = SPEED_INVALID;
3825 *duplex = DUPLEX_INVALID;
3826 break;
855e1111 3827 }
1da177e4
LT
3828}
3829
42b64a45 3830static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
1da177e4 3831{
42b64a45
MC
3832 int err = 0;
3833 u32 val, new_adv;
1da177e4 3834
42b64a45 3835 new_adv = ADVERTISE_CSMA;
202ff1c2 3836 new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
f88788f0 3837 new_adv |= mii_advertise_flowctrl(flowctrl);
1da177e4 3838
42b64a45
MC
3839 err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
3840 if (err)
3841 goto done;
ba4d07a8 3842
4f272096
MC
3843 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3844 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
ba4d07a8 3845
4f272096
MC
3846 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3847 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
3848 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
ba4d07a8 3849
4f272096
MC
3850 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
3851 if (err)
3852 goto done;
3853 }
1da177e4 3854
42b64a45
MC
3855 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
3856 goto done;
52b02d04 3857
42b64a45
MC
3858 tw32(TG3_CPMU_EEE_MODE,
3859 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
52b02d04 3860
42b64a45
MC
3861 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
3862 if (!err) {
3863 u32 err2;
52b02d04 3864
b715ce94
MC
3865 val = 0;
3866 /* Advertise 100-BaseTX EEE ability */
3867 if (advertise & ADVERTISED_100baseT_Full)
3868 val |= MDIO_AN_EEE_ADV_100TX;
3869 /* Advertise 1000-BaseT EEE ability */
3870 if (advertise & ADVERTISED_1000baseT_Full)
3871 val |= MDIO_AN_EEE_ADV_1000T;
3872 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
3873 if (err)
3874 val = 0;
3875
21a00ab2
MC
3876 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
3877 case ASIC_REV_5717:
3878 case ASIC_REV_57765:
55086ad9 3879 case ASIC_REV_57766:
21a00ab2 3880 case ASIC_REV_5719:
b715ce94
MC
3881 /* If we advertised any eee advertisements above... */
3882 if (val)
3883 val = MII_TG3_DSP_TAP26_ALNOKO |
3884 MII_TG3_DSP_TAP26_RMRXSTO |
3885 MII_TG3_DSP_TAP26_OPCSINPT;
21a00ab2 3886 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
be671947
MC
3887 /* Fall through */
3888 case ASIC_REV_5720:
3889 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
3890 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
3891 MII_TG3_DSP_CH34TP2_HIBW01);
21a00ab2 3892 }
52b02d04 3893
42b64a45
MC
3894 err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
3895 if (!err)
3896 err = err2;
3897 }
3898
3899done:
3900 return err;
3901}
3902
3903static void tg3_phy_copper_begin(struct tg3 *tp)
3904{
3905 u32 new_adv;
3906 int i;
3907
3908 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
3909 new_adv = ADVERTISED_10baseT_Half |
3910 ADVERTISED_10baseT_Full;
3911 if (tg3_flag(tp, WOL_SPEED_100MB))
3912 new_adv |= ADVERTISED_100baseT_Half |
3913 ADVERTISED_100baseT_Full;
3914
3915 tg3_phy_autoneg_cfg(tp, new_adv,
3916 FLOW_CTRL_TX | FLOW_CTRL_RX);
3917 } else if (tp->link_config.speed == SPEED_INVALID) {
3918 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
3919 tp->link_config.advertising &=
3920 ~(ADVERTISED_1000baseT_Half |
3921 ADVERTISED_1000baseT_Full);
3922
3923 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
3924 tp->link_config.flowctrl);
3925 } else {
3926 /* Asking for a specific link mode. */
3927 if (tp->link_config.speed == SPEED_1000) {
3928 if (tp->link_config.duplex == DUPLEX_FULL)
3929 new_adv = ADVERTISED_1000baseT_Full;
3930 else
3931 new_adv = ADVERTISED_1000baseT_Half;
3932 } else if (tp->link_config.speed == SPEED_100) {
3933 if (tp->link_config.duplex == DUPLEX_FULL)
3934 new_adv = ADVERTISED_100baseT_Full;
3935 else
3936 new_adv = ADVERTISED_100baseT_Half;
3937 } else {
3938 if (tp->link_config.duplex == DUPLEX_FULL)
3939 new_adv = ADVERTISED_10baseT_Full;
3940 else
3941 new_adv = ADVERTISED_10baseT_Half;
52b02d04 3942 }
52b02d04 3943
42b64a45
MC
3944 tg3_phy_autoneg_cfg(tp, new_adv,
3945 tp->link_config.flowctrl);
52b02d04
MC
3946 }
3947
1da177e4
LT
3948 if (tp->link_config.autoneg == AUTONEG_DISABLE &&
3949 tp->link_config.speed != SPEED_INVALID) {
3950 u32 bmcr, orig_bmcr;
3951
3952 tp->link_config.active_speed = tp->link_config.speed;
3953 tp->link_config.active_duplex = tp->link_config.duplex;
3954
3955 bmcr = 0;
3956 switch (tp->link_config.speed) {
3957 default:
3958 case SPEED_10:
3959 break;
3960
3961 case SPEED_100:
3962 bmcr |= BMCR_SPEED100;
3963 break;
3964
3965 case SPEED_1000:
221c5637 3966 bmcr |= BMCR_SPEED1000;
1da177e4 3967 break;
855e1111 3968 }
1da177e4
LT
3969
3970 if (tp->link_config.duplex == DUPLEX_FULL)
3971 bmcr |= BMCR_FULLDPLX;
3972
3973 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
3974 (bmcr != orig_bmcr)) {
3975 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
3976 for (i = 0; i < 1500; i++) {
3977 u32 tmp;
3978
3979 udelay(10);
3980 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
3981 tg3_readphy(tp, MII_BMSR, &tmp))
3982 continue;
3983 if (!(tmp & BMSR_LSTATUS)) {
3984 udelay(40);
3985 break;
3986 }
3987 }
3988 tg3_writephy(tp, MII_BMCR, bmcr);
3989 udelay(40);
3990 }
3991 } else {
3992 tg3_writephy(tp, MII_BMCR,
3993 BMCR_ANENABLE | BMCR_ANRESTART);
3994 }
3995}
3996
3997static int tg3_init_5401phy_dsp(struct tg3 *tp)
3998{
3999 int err;
4000
4001 /* Turn off tap power management. */
4002 /* Set Extended packet length bit */
b4bd2929 4003 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
1da177e4 4004
6ee7c0a0
MC
4005 err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4006 err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4007 err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4008 err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4009 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
1da177e4
LT
4010
4011 udelay(40);
4012
4013 return err;
4014}
4015
e2bf73e7 4016static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
1da177e4 4017{
e2bf73e7 4018 u32 advmsk, tgtadv, advertising;
3600d918 4019
e2bf73e7
MC
4020 advertising = tp->link_config.advertising;
4021 tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
1da177e4 4022
e2bf73e7
MC
4023 advmsk = ADVERTISE_ALL;
4024 if (tp->link_config.active_duplex == DUPLEX_FULL) {
f88788f0 4025 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
e2bf73e7
MC
4026 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4027 }
1da177e4 4028
e2bf73e7
MC
4029 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4030 return false;
4031
4032 if ((*lcladv & advmsk) != tgtadv)
4033 return false;
b99d2a57 4034
f07e9af3 4035 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1da177e4
LT
4036 u32 tg3_ctrl;
4037
e2bf73e7 4038 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
3600d918 4039
221c5637 4040 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
e2bf73e7 4041 return false;
1da177e4 4042
3198e07f
MC
4043 if (tgtadv &&
4044 (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4045 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)) {
4046 tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4047 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4048 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4049 } else {
4050 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4051 }
4052
e2bf73e7
MC
4053 if (tg3_ctrl != tgtadv)
4054 return false;
ef167e27
MC
4055 }
4056
e2bf73e7 4057 return true;
ef167e27
MC
4058}
4059
859edb26
MC
4060static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4061{
4062 u32 lpeth = 0;
4063
4064 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4065 u32 val;
4066
4067 if (tg3_readphy(tp, MII_STAT1000, &val))
4068 return false;
4069
4070 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4071 }
4072
4073 if (tg3_readphy(tp, MII_LPA, rmtadv))
4074 return false;
4075
4076 lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4077 tp->link_config.rmt_adv = lpeth;
4078
4079 return true;
4080}
4081
1da177e4
LT
4082static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
4083{
4084 int current_link_up;
f833c4c1 4085 u32 bmsr, val;
ef167e27 4086 u32 lcl_adv, rmt_adv;
1da177e4
LT
4087 u16 current_speed;
4088 u8 current_duplex;
4089 int i, err;
4090
4091 tw32(MAC_EVENT, 0);
4092
4093 tw32_f(MAC_STATUS,
4094 (MAC_STATUS_SYNC_CHANGED |
4095 MAC_STATUS_CFG_CHANGED |
4096 MAC_STATUS_MI_COMPLETION |
4097 MAC_STATUS_LNKSTATE_CHANGED));
4098 udelay(40);
4099
8ef21428
MC
4100 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4101 tw32_f(MAC_MI_MODE,
4102 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4103 udelay(80);
4104 }
1da177e4 4105
b4bd2929 4106 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
1da177e4
LT
4107
4108 /* Some third-party PHYs need to be reset on link going
4109 * down.
4110 */
4111 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
4112 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
4113 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
4114 netif_carrier_ok(tp->dev)) {
4115 tg3_readphy(tp, MII_BMSR, &bmsr);
4116 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4117 !(bmsr & BMSR_LSTATUS))
4118 force_reset = 1;
4119 }
4120 if (force_reset)
4121 tg3_phy_reset(tp);
4122
79eb6904 4123 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
1da177e4
LT
4124 tg3_readphy(tp, MII_BMSR, &bmsr);
4125 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
63c3a66f 4126 !tg3_flag(tp, INIT_COMPLETE))
1da177e4
LT
4127 bmsr = 0;
4128
4129 if (!(bmsr & BMSR_LSTATUS)) {
4130 err = tg3_init_5401phy_dsp(tp);
4131 if (err)
4132 return err;
4133
4134 tg3_readphy(tp, MII_BMSR, &bmsr);
4135 for (i = 0; i < 1000; i++) {
4136 udelay(10);
4137 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4138 (bmsr & BMSR_LSTATUS)) {
4139 udelay(40);
4140 break;
4141 }
4142 }
4143
79eb6904
MC
4144 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4145 TG3_PHY_REV_BCM5401_B0 &&
1da177e4
LT
4146 !(bmsr & BMSR_LSTATUS) &&
4147 tp->link_config.active_speed == SPEED_1000) {
4148 err = tg3_phy_reset(tp);
4149 if (!err)
4150 err = tg3_init_5401phy_dsp(tp);
4151 if (err)
4152 return err;
4153 }
4154 }
4155 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4156 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
4157 /* 5701 {A0,B0} CRC bug workaround */
4158 tg3_writephy(tp, 0x15, 0x0a75);
f08aa1a8
MC
4159 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4160 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4161 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
1da177e4
LT
4162 }
4163
4164 /* Clear pending interrupts... */
f833c4c1
MC
4165 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4166 tg3_readphy(tp, MII_TG3_ISTAT, &val);
1da177e4 4167
f07e9af3 4168 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
1da177e4 4169 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
f07e9af3 4170 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
1da177e4
LT
4171 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4172
4173 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
4174 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
4175 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4176 tg3_writephy(tp, MII_TG3_EXT_CTRL,
4177 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4178 else
4179 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4180 }
4181
4182 current_link_up = 0;
4183 current_speed = SPEED_INVALID;
4184 current_duplex = DUPLEX_INVALID;
e348c5e7 4185 tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
859edb26 4186 tp->link_config.rmt_adv = 0;
1da177e4 4187
f07e9af3 4188 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
15ee95c3
MC
4189 err = tg3_phy_auxctl_read(tp,
4190 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4191 &val);
4192 if (!err && !(val & (1 << 10))) {
b4bd2929
MC
4193 tg3_phy_auxctl_write(tp,
4194 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4195 val | (1 << 10));
1da177e4
LT
4196 goto relink;
4197 }
4198 }
4199
4200 bmsr = 0;
4201 for (i = 0; i < 100; i++) {
4202 tg3_readphy(tp, MII_BMSR, &bmsr);
4203 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4204 (bmsr & BMSR_LSTATUS))
4205 break;
4206 udelay(40);
4207 }
4208
4209 if (bmsr & BMSR_LSTATUS) {
4210 u32 aux_stat, bmcr;
4211
4212 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4213 for (i = 0; i < 2000; i++) {
4214 udelay(10);
4215 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4216 aux_stat)
4217 break;
4218 }
4219
4220 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4221 &current_speed,
4222 &current_duplex);
4223
4224 bmcr = 0;
4225 for (i = 0; i < 200; i++) {
4226 tg3_readphy(tp, MII_BMCR, &bmcr);
4227 if (tg3_readphy(tp, MII_BMCR, &bmcr))
4228 continue;
4229 if (bmcr && bmcr != 0x7fff)
4230 break;
4231 udelay(10);
4232 }
4233
ef167e27
MC
4234 lcl_adv = 0;
4235 rmt_adv = 0;
1da177e4 4236
ef167e27
MC
4237 tp->link_config.active_speed = current_speed;
4238 tp->link_config.active_duplex = current_duplex;
4239
4240 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4241 if ((bmcr & BMCR_ANENABLE) &&
e2bf73e7 4242 tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
859edb26 4243 tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
e2bf73e7 4244 current_link_up = 1;
1da177e4
LT
4245 } else {
4246 if (!(bmcr & BMCR_ANENABLE) &&
4247 tp->link_config.speed == current_speed &&
ef167e27
MC
4248 tp->link_config.duplex == current_duplex &&
4249 tp->link_config.flowctrl ==
4250 tp->link_config.active_flowctrl) {
1da177e4 4251 current_link_up = 1;
1da177e4
LT
4252 }
4253 }
4254
ef167e27 4255 if (current_link_up == 1 &&
e348c5e7
MC
4256 tp->link_config.active_duplex == DUPLEX_FULL) {
4257 u32 reg, bit;
4258
4259 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4260 reg = MII_TG3_FET_GEN_STAT;
4261 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4262 } else {
4263 reg = MII_TG3_EXT_STAT;
4264 bit = MII_TG3_EXT_STAT_MDIX;
4265 }
4266
4267 if (!tg3_readphy(tp, reg, &val) && (val & bit))
4268 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4269
ef167e27 4270 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
e348c5e7 4271 }
1da177e4
LT
4272 }
4273
1da177e4 4274relink:
80096068 4275 if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
1da177e4
LT
4276 tg3_phy_copper_begin(tp);
4277
f833c4c1 4278 tg3_readphy(tp, MII_BMSR, &bmsr);
06c03c02
MB
4279 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4280 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
1da177e4
LT
4281 current_link_up = 1;
4282 }
4283
4284 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4285 if (current_link_up == 1) {
4286 if (tp->link_config.active_speed == SPEED_100 ||
4287 tp->link_config.active_speed == SPEED_10)
4288 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4289 else
4290 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
f07e9af3 4291 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
7f97a4bd
MC
4292 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4293 else
1da177e4
LT
4294 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4295
4296 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4297 if (tp->link_config.active_duplex == DUPLEX_HALF)
4298 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4299
1da177e4 4300 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
e8f3f6ca
MC
4301 if (current_link_up == 1 &&
4302 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
1da177e4 4303 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
e8f3f6ca
MC
4304 else
4305 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1da177e4
LT
4306 }
4307
4308 /* ??? Without this setting Netgear GA302T PHY does not
4309 * ??? send/receive packets...
4310 */
79eb6904 4311 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
1da177e4
LT
4312 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
4313 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
4314 tw32_f(MAC_MI_MODE, tp->mi_mode);
4315 udelay(80);
4316 }
4317
4318 tw32_f(MAC_MODE, tp->mac_mode);
4319 udelay(40);
4320
52b02d04
MC
4321 tg3_phy_eee_adjust(tp, current_link_up);
4322
63c3a66f 4323 if (tg3_flag(tp, USE_LINKCHG_REG)) {
1da177e4
LT
4324 /* Polled via timer. */
4325 tw32_f(MAC_EVENT, 0);
4326 } else {
4327 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4328 }
4329 udelay(40);
4330
4331 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
4332 current_link_up == 1 &&
4333 tp->link_config.active_speed == SPEED_1000 &&
63c3a66f 4334 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
1da177e4
LT
4335 udelay(120);
4336 tw32_f(MAC_STATUS,
4337 (MAC_STATUS_SYNC_CHANGED |
4338 MAC_STATUS_CFG_CHANGED));
4339 udelay(40);
4340 tg3_write_mem(tp,
4341 NIC_SRAM_FIRMWARE_MBOX,
4342 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
4343 }
4344
5e7dfd0f 4345 /* Prevent send BD corruption. */
63c3a66f 4346 if (tg3_flag(tp, CLKREQ_BUG)) {
5e7dfd0f
MC
4347 u16 oldlnkctl, newlnkctl;
4348
4349 pci_read_config_word(tp->pdev,
708ebb3a 4350 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
5e7dfd0f
MC
4351 &oldlnkctl);
4352 if (tp->link_config.active_speed == SPEED_100 ||
4353 tp->link_config.active_speed == SPEED_10)
4354 newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
4355 else
4356 newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
4357 if (newlnkctl != oldlnkctl)
4358 pci_write_config_word(tp->pdev,
93a700a9
MC
4359 pci_pcie_cap(tp->pdev) +
4360 PCI_EXP_LNKCTL, newlnkctl);
5e7dfd0f
MC
4361 }
4362
1da177e4
LT
4363 if (current_link_up != netif_carrier_ok(tp->dev)) {
4364 if (current_link_up)
4365 netif_carrier_on(tp->dev);
4366 else
4367 netif_carrier_off(tp->dev);
4368 tg3_link_report(tp);
4369 }
4370
4371 return 0;
4372}
4373
4374struct tg3_fiber_aneginfo {
4375 int state;
4376#define ANEG_STATE_UNKNOWN 0
4377#define ANEG_STATE_AN_ENABLE 1
4378#define ANEG_STATE_RESTART_INIT 2
4379#define ANEG_STATE_RESTART 3
4380#define ANEG_STATE_DISABLE_LINK_OK 4
4381#define ANEG_STATE_ABILITY_DETECT_INIT 5
4382#define ANEG_STATE_ABILITY_DETECT 6
4383#define ANEG_STATE_ACK_DETECT_INIT 7
4384#define ANEG_STATE_ACK_DETECT 8
4385#define ANEG_STATE_COMPLETE_ACK_INIT 9
4386#define ANEG_STATE_COMPLETE_ACK 10
4387#define ANEG_STATE_IDLE_DETECT_INIT 11
4388#define ANEG_STATE_IDLE_DETECT 12
4389#define ANEG_STATE_LINK_OK 13
4390#define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
4391#define ANEG_STATE_NEXT_PAGE_WAIT 15
4392
4393 u32 flags;
4394#define MR_AN_ENABLE 0x00000001
4395#define MR_RESTART_AN 0x00000002
4396#define MR_AN_COMPLETE 0x00000004
4397#define MR_PAGE_RX 0x00000008
4398#define MR_NP_LOADED 0x00000010
4399#define MR_TOGGLE_TX 0x00000020
4400#define MR_LP_ADV_FULL_DUPLEX 0x00000040
4401#define MR_LP_ADV_HALF_DUPLEX 0x00000080
4402#define MR_LP_ADV_SYM_PAUSE 0x00000100
4403#define MR_LP_ADV_ASYM_PAUSE 0x00000200
4404#define MR_LP_ADV_REMOTE_FAULT1 0x00000400
4405#define MR_LP_ADV_REMOTE_FAULT2 0x00000800
4406#define MR_LP_ADV_NEXT_PAGE 0x00001000
4407#define MR_TOGGLE_RX 0x00002000
4408#define MR_NP_RX 0x00004000
4409
4410#define MR_LINK_OK 0x80000000
4411
4412 unsigned long link_time, cur_time;
4413
4414 u32 ability_match_cfg;
4415 int ability_match_count;
4416
4417 char ability_match, idle_match, ack_match;
4418
4419 u32 txconfig, rxconfig;
4420#define ANEG_CFG_NP 0x00000080
4421#define ANEG_CFG_ACK 0x00000040
4422#define ANEG_CFG_RF2 0x00000020
4423#define ANEG_CFG_RF1 0x00000010
4424#define ANEG_CFG_PS2 0x00000001
4425#define ANEG_CFG_PS1 0x00008000
4426#define ANEG_CFG_HD 0x00004000
4427#define ANEG_CFG_FD 0x00002000
4428#define ANEG_CFG_INVAL 0x00001f06
4429
4430};
4431#define ANEG_OK 0
4432#define ANEG_DONE 1
4433#define ANEG_TIMER_ENAB 2
4434#define ANEG_FAILED -1
4435
4436#define ANEG_STATE_SETTLE_TIME 10000
4437
4438static int tg3_fiber_aneg_smachine(struct tg3 *tp,
4439 struct tg3_fiber_aneginfo *ap)
4440{
5be73b47 4441 u16 flowctrl;
1da177e4
LT
4442 unsigned long delta;
4443 u32 rx_cfg_reg;
4444 int ret;
4445
4446 if (ap->state == ANEG_STATE_UNKNOWN) {
4447 ap->rxconfig = 0;
4448 ap->link_time = 0;
4449 ap->cur_time = 0;
4450 ap->ability_match_cfg = 0;
4451 ap->ability_match_count = 0;
4452 ap->ability_match = 0;
4453 ap->idle_match = 0;
4454 ap->ack_match = 0;
4455 }
4456 ap->cur_time++;
4457
4458 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
4459 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
4460
4461 if (rx_cfg_reg != ap->ability_match_cfg) {
4462 ap->ability_match_cfg = rx_cfg_reg;
4463 ap->ability_match = 0;
4464 ap->ability_match_count = 0;
4465 } else {
4466 if (++ap->ability_match_count > 1) {
4467 ap->ability_match = 1;
4468 ap->ability_match_cfg = rx_cfg_reg;
4469 }
4470 }
4471 if (rx_cfg_reg & ANEG_CFG_ACK)
4472 ap->ack_match = 1;
4473 else
4474 ap->ack_match = 0;
4475
4476 ap->idle_match = 0;
4477 } else {
4478 ap->idle_match = 1;
4479 ap->ability_match_cfg = 0;
4480 ap->ability_match_count = 0;
4481 ap->ability_match = 0;
4482 ap->ack_match = 0;
4483
4484 rx_cfg_reg = 0;
4485 }
4486
4487 ap->rxconfig = rx_cfg_reg;
4488 ret = ANEG_OK;
4489
33f401ae 4490 switch (ap->state) {
1da177e4
LT
4491 case ANEG_STATE_UNKNOWN:
4492 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
4493 ap->state = ANEG_STATE_AN_ENABLE;
4494
4495 /* fallthru */
4496 case ANEG_STATE_AN_ENABLE:
4497 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
4498 if (ap->flags & MR_AN_ENABLE) {
4499 ap->link_time = 0;
4500 ap->cur_time = 0;
4501 ap->ability_match_cfg = 0;
4502 ap->ability_match_count = 0;
4503 ap->ability_match = 0;
4504 ap->idle_match = 0;
4505 ap->ack_match = 0;
4506
4507 ap->state = ANEG_STATE_RESTART_INIT;
4508 } else {
4509 ap->state = ANEG_STATE_DISABLE_LINK_OK;
4510 }
4511 break;
4512
4513 case ANEG_STATE_RESTART_INIT:
4514 ap->link_time = ap->cur_time;
4515 ap->flags &= ~(MR_NP_LOADED);
4516 ap->txconfig = 0;
4517 tw32(MAC_TX_AUTO_NEG, 0);
4518 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4519 tw32_f(MAC_MODE, tp->mac_mode);
4520 udelay(40);
4521
4522 ret = ANEG_TIMER_ENAB;
4523 ap->state = ANEG_STATE_RESTART;
4524
4525 /* fallthru */
4526 case ANEG_STATE_RESTART:
4527 delta = ap->cur_time - ap->link_time;
859a5887 4528 if (delta > ANEG_STATE_SETTLE_TIME)
1da177e4 4529 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
859a5887 4530 else
1da177e4 4531 ret = ANEG_TIMER_ENAB;
1da177e4
LT
4532 break;
4533
4534 case ANEG_STATE_DISABLE_LINK_OK:
4535 ret = ANEG_DONE;
4536 break;
4537
4538 case ANEG_STATE_ABILITY_DETECT_INIT:
4539 ap->flags &= ~(MR_TOGGLE_TX);
5be73b47
MC
4540 ap->txconfig = ANEG_CFG_FD;
4541 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4542 if (flowctrl & ADVERTISE_1000XPAUSE)
4543 ap->txconfig |= ANEG_CFG_PS1;
4544 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4545 ap->txconfig |= ANEG_CFG_PS2;
1da177e4
LT
4546 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4547 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4548 tw32_f(MAC_MODE, tp->mac_mode);
4549 udelay(40);
4550
4551 ap->state = ANEG_STATE_ABILITY_DETECT;
4552 break;
4553
4554 case ANEG_STATE_ABILITY_DETECT:
859a5887 4555 if (ap->ability_match != 0 && ap->rxconfig != 0)
1da177e4 4556 ap->state = ANEG_STATE_ACK_DETECT_INIT;
1da177e4
LT
4557 break;
4558
4559 case ANEG_STATE_ACK_DETECT_INIT:
4560 ap->txconfig |= ANEG_CFG_ACK;
4561 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4562 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4563 tw32_f(MAC_MODE, tp->mac_mode);
4564 udelay(40);
4565
4566 ap->state = ANEG_STATE_ACK_DETECT;
4567
4568 /* fallthru */
4569 case ANEG_STATE_ACK_DETECT:
4570 if (ap->ack_match != 0) {
4571 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
4572 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
4573 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
4574 } else {
4575 ap->state = ANEG_STATE_AN_ENABLE;
4576 }
4577 } else if (ap->ability_match != 0 &&
4578 ap->rxconfig == 0) {
4579 ap->state = ANEG_STATE_AN_ENABLE;
4580 }
4581 break;
4582
4583 case ANEG_STATE_COMPLETE_ACK_INIT:
4584 if (ap->rxconfig & ANEG_CFG_INVAL) {
4585 ret = ANEG_FAILED;
4586 break;
4587 }
4588 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
4589 MR_LP_ADV_HALF_DUPLEX |
4590 MR_LP_ADV_SYM_PAUSE |
4591 MR_LP_ADV_ASYM_PAUSE |
4592 MR_LP_ADV_REMOTE_FAULT1 |
4593 MR_LP_ADV_REMOTE_FAULT2 |
4594 MR_LP_ADV_NEXT_PAGE |
4595 MR_TOGGLE_RX |
4596 MR_NP_RX);
4597 if (ap->rxconfig & ANEG_CFG_FD)
4598 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
4599 if (ap->rxconfig & ANEG_CFG_HD)
4600 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
4601 if (ap->rxconfig & ANEG_CFG_PS1)
4602 ap->flags |= MR_LP_ADV_SYM_PAUSE;
4603 if (ap->rxconfig & ANEG_CFG_PS2)
4604 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
4605 if (ap->rxconfig & ANEG_CFG_RF1)
4606 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
4607 if (ap->rxconfig & ANEG_CFG_RF2)
4608 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
4609 if (ap->rxconfig & ANEG_CFG_NP)
4610 ap->flags |= MR_LP_ADV_NEXT_PAGE;
4611
4612 ap->link_time = ap->cur_time;
4613
4614 ap->flags ^= (MR_TOGGLE_TX);
4615 if (ap->rxconfig & 0x0008)
4616 ap->flags |= MR_TOGGLE_RX;
4617 if (ap->rxconfig & ANEG_CFG_NP)
4618 ap->flags |= MR_NP_RX;
4619 ap->flags |= MR_PAGE_RX;
4620
4621 ap->state = ANEG_STATE_COMPLETE_ACK;
4622 ret = ANEG_TIMER_ENAB;
4623 break;
4624
4625 case ANEG_STATE_COMPLETE_ACK:
4626 if (ap->ability_match != 0 &&
4627 ap->rxconfig == 0) {
4628 ap->state = ANEG_STATE_AN_ENABLE;
4629 break;
4630 }
4631 delta = ap->cur_time - ap->link_time;
4632 if (delta > ANEG_STATE_SETTLE_TIME) {
4633 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
4634 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4635 } else {
4636 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
4637 !(ap->flags & MR_NP_RX)) {
4638 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4639 } else {
4640 ret = ANEG_FAILED;
4641 }
4642 }
4643 }
4644 break;
4645
4646 case ANEG_STATE_IDLE_DETECT_INIT:
4647 ap->link_time = ap->cur_time;
4648 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4649 tw32_f(MAC_MODE, tp->mac_mode);
4650 udelay(40);
4651
4652 ap->state = ANEG_STATE_IDLE_DETECT;
4653 ret = ANEG_TIMER_ENAB;
4654 break;
4655
4656 case ANEG_STATE_IDLE_DETECT:
4657 if (ap->ability_match != 0 &&
4658 ap->rxconfig == 0) {
4659 ap->state = ANEG_STATE_AN_ENABLE;
4660 break;
4661 }
4662 delta = ap->cur_time - ap->link_time;
4663 if (delta > ANEG_STATE_SETTLE_TIME) {
4664 /* XXX another gem from the Broadcom driver :( */
4665 ap->state = ANEG_STATE_LINK_OK;
4666 }
4667 break;
4668
4669 case ANEG_STATE_LINK_OK:
4670 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
4671 ret = ANEG_DONE;
4672 break;
4673
4674 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
4675 /* ??? unimplemented */
4676 break;
4677
4678 case ANEG_STATE_NEXT_PAGE_WAIT:
4679 /* ??? unimplemented */
4680 break;
4681
4682 default:
4683 ret = ANEG_FAILED;
4684 break;
855e1111 4685 }
1da177e4
LT
4686
4687 return ret;
4688}
4689
5be73b47 4690static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
1da177e4
LT
4691{
4692 int res = 0;
4693 struct tg3_fiber_aneginfo aninfo;
4694 int status = ANEG_FAILED;
4695 unsigned int tick;
4696 u32 tmp;
4697
4698 tw32_f(MAC_TX_AUTO_NEG, 0);
4699
4700 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
4701 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
4702 udelay(40);
4703
4704 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
4705 udelay(40);
4706
4707 memset(&aninfo, 0, sizeof(aninfo));
4708 aninfo.flags |= MR_AN_ENABLE;
4709 aninfo.state = ANEG_STATE_UNKNOWN;
4710 aninfo.cur_time = 0;
4711 tick = 0;
4712 while (++tick < 195000) {
4713 status = tg3_fiber_aneg_smachine(tp, &aninfo);
4714 if (status == ANEG_DONE || status == ANEG_FAILED)
4715 break;
4716
4717 udelay(1);
4718 }
4719
4720 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4721 tw32_f(MAC_MODE, tp->mac_mode);
4722 udelay(40);
4723
5be73b47
MC
4724 *txflags = aninfo.txconfig;
4725 *rxflags = aninfo.flags;
1da177e4
LT
4726
4727 if (status == ANEG_DONE &&
4728 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
4729 MR_LP_ADV_FULL_DUPLEX)))
4730 res = 1;
4731
4732 return res;
4733}
4734
4735static void tg3_init_bcm8002(struct tg3 *tp)
4736{
4737 u32 mac_status = tr32(MAC_STATUS);
4738 int i;
4739
4740 /* Reset when initting first time or we have a link. */
63c3a66f 4741 if (tg3_flag(tp, INIT_COMPLETE) &&
1da177e4
LT
4742 !(mac_status & MAC_STATUS_PCS_SYNCED))
4743 return;
4744
4745 /* Set PLL lock range. */
4746 tg3_writephy(tp, 0x16, 0x8007);
4747
4748 /* SW reset */
4749 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
4750
4751 /* Wait for reset to complete. */
4752 /* XXX schedule_timeout() ... */
4753 for (i = 0; i < 500; i++)
4754 udelay(10);
4755
4756 /* Config mode; select PMA/Ch 1 regs. */
4757 tg3_writephy(tp, 0x10, 0x8411);
4758
4759 /* Enable auto-lock and comdet, select txclk for tx. */
4760 tg3_writephy(tp, 0x11, 0x0a10);
4761
4762 tg3_writephy(tp, 0x18, 0x00a0);
4763 tg3_writephy(tp, 0x16, 0x41ff);
4764
4765 /* Assert and deassert POR. */
4766 tg3_writephy(tp, 0x13, 0x0400);
4767 udelay(40);
4768 tg3_writephy(tp, 0x13, 0x0000);
4769
4770 tg3_writephy(tp, 0x11, 0x0a50);
4771 udelay(40);
4772 tg3_writephy(tp, 0x11, 0x0a10);
4773
4774 /* Wait for signal to stabilize */
4775 /* XXX schedule_timeout() ... */
4776 for (i = 0; i < 15000; i++)
4777 udelay(10);
4778
4779 /* Deselect the channel register so we can read the PHYID
4780 * later.
4781 */
4782 tg3_writephy(tp, 0x10, 0x8011);
4783}
4784
4785static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
4786{
82cd3d11 4787 u16 flowctrl;
1da177e4
LT
4788 u32 sg_dig_ctrl, sg_dig_status;
4789 u32 serdes_cfg, expected_sg_dig_ctrl;
4790 int workaround, port_a;
4791 int current_link_up;
4792
4793 serdes_cfg = 0;
4794 expected_sg_dig_ctrl = 0;
4795 workaround = 0;
4796 port_a = 1;
4797 current_link_up = 0;
4798
4799 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
4800 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
4801 workaround = 1;
4802 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
4803 port_a = 0;
4804
4805 /* preserve bits 0-11,13,14 for signal pre-emphasis */
4806 /* preserve bits 20-23 for voltage regulator */
4807 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
4808 }
4809
4810 sg_dig_ctrl = tr32(SG_DIG_CTRL);
4811
4812 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
c98f6e3b 4813 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
1da177e4
LT
4814 if (workaround) {
4815 u32 val = serdes_cfg;
4816
4817 if (port_a)
4818 val |= 0xc010000;
4819 else
4820 val |= 0x4010000;
4821 tw32_f(MAC_SERDES_CFG, val);
4822 }
c98f6e3b
MC
4823
4824 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
1da177e4
LT
4825 }
4826 if (mac_status & MAC_STATUS_PCS_SYNCED) {
4827 tg3_setup_flow_control(tp, 0, 0);
4828 current_link_up = 1;
4829 }
4830 goto out;
4831 }
4832
4833 /* Want auto-negotiation. */
c98f6e3b 4834 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
1da177e4 4835
82cd3d11
MC
4836 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4837 if (flowctrl & ADVERTISE_1000XPAUSE)
4838 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
4839 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4840 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
1da177e4
LT
4841
4842 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
f07e9af3 4843 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
3d3ebe74
MC
4844 tp->serdes_counter &&
4845 ((mac_status & (MAC_STATUS_PCS_SYNCED |
4846 MAC_STATUS_RCVD_CFG)) ==
4847 MAC_STATUS_PCS_SYNCED)) {
4848 tp->serdes_counter--;
4849 current_link_up = 1;
4850 goto out;
4851 }
4852restart_autoneg:
1da177e4
LT
4853 if (workaround)
4854 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
c98f6e3b 4855 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
1da177e4
LT
4856 udelay(5);
4857 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
4858
3d3ebe74 4859 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
f07e9af3 4860 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
1da177e4
LT
4861 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
4862 MAC_STATUS_SIGNAL_DET)) {
3d3ebe74 4863 sg_dig_status = tr32(SG_DIG_STATUS);
1da177e4
LT
4864 mac_status = tr32(MAC_STATUS);
4865
c98f6e3b 4866 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
1da177e4 4867 (mac_status & MAC_STATUS_PCS_SYNCED)) {
82cd3d11
MC
4868 u32 local_adv = 0, remote_adv = 0;
4869
4870 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
4871 local_adv |= ADVERTISE_1000XPAUSE;
4872 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
4873 local_adv |= ADVERTISE_1000XPSE_ASYM;
1da177e4 4874
c98f6e3b 4875 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
82cd3d11 4876 remote_adv |= LPA_1000XPAUSE;
c98f6e3b 4877 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
82cd3d11 4878 remote_adv |= LPA_1000XPAUSE_ASYM;
1da177e4 4879
859edb26
MC
4880 tp->link_config.rmt_adv =
4881 mii_adv_to_ethtool_adv_x(remote_adv);
4882
1da177e4
LT
4883 tg3_setup_flow_control(tp, local_adv, remote_adv);
4884 current_link_up = 1;
3d3ebe74 4885 tp->serdes_counter = 0;
f07e9af3 4886 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
c98f6e3b 4887 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
3d3ebe74
MC
4888 if (tp->serdes_counter)
4889 tp->serdes_counter--;
1da177e4
LT
4890 else {
4891 if (workaround) {
4892 u32 val = serdes_cfg;
4893
4894 if (port_a)
4895 val |= 0xc010000;
4896 else
4897 val |= 0x4010000;
4898
4899 tw32_f(MAC_SERDES_CFG, val);
4900 }
4901
c98f6e3b 4902 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
1da177e4
LT
4903 udelay(40);
4904
4905 /* Link parallel detection - link is up */
4906 /* only if we have PCS_SYNC and not */
4907 /* receiving config code words */
4908 mac_status = tr32(MAC_STATUS);
4909 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
4910 !(mac_status & MAC_STATUS_RCVD_CFG)) {
4911 tg3_setup_flow_control(tp, 0, 0);
4912 current_link_up = 1;
f07e9af3
MC
4913 tp->phy_flags |=
4914 TG3_PHYFLG_PARALLEL_DETECT;
3d3ebe74
MC
4915 tp->serdes_counter =
4916 SERDES_PARALLEL_DET_TIMEOUT;
4917 } else
4918 goto restart_autoneg;
1da177e4
LT
4919 }
4920 }
3d3ebe74
MC
4921 } else {
4922 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
f07e9af3 4923 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
1da177e4
LT
4924 }
4925
4926out:
4927 return current_link_up;
4928}
4929
4930static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
4931{
4932 int current_link_up = 0;
4933
5cf64b8a 4934 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
1da177e4 4935 goto out;
1da177e4
LT
4936
4937 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5be73b47 4938 u32 txflags, rxflags;
1da177e4 4939 int i;
6aa20a22 4940
5be73b47
MC
4941 if (fiber_autoneg(tp, &txflags, &rxflags)) {
4942 u32 local_adv = 0, remote_adv = 0;
1da177e4 4943
5be73b47
MC
4944 if (txflags & ANEG_CFG_PS1)
4945 local_adv |= ADVERTISE_1000XPAUSE;
4946 if (txflags & ANEG_CFG_PS2)
4947 local_adv |= ADVERTISE_1000XPSE_ASYM;
4948
4949 if (rxflags & MR_LP_ADV_SYM_PAUSE)
4950 remote_adv |= LPA_1000XPAUSE;
4951 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
4952 remote_adv |= LPA_1000XPAUSE_ASYM;
1da177e4 4953
859edb26
MC
4954 tp->link_config.rmt_adv =
4955 mii_adv_to_ethtool_adv_x(remote_adv);
4956
1da177e4
LT
4957 tg3_setup_flow_control(tp, local_adv, remote_adv);
4958
1da177e4
LT
4959 current_link_up = 1;
4960 }
4961 for (i = 0; i < 30; i++) {
4962 udelay(20);
4963 tw32_f(MAC_STATUS,
4964 (MAC_STATUS_SYNC_CHANGED |
4965 MAC_STATUS_CFG_CHANGED));
4966 udelay(40);
4967 if ((tr32(MAC_STATUS) &
4968 (MAC_STATUS_SYNC_CHANGED |
4969 MAC_STATUS_CFG_CHANGED)) == 0)
4970 break;
4971 }
4972
4973 mac_status = tr32(MAC_STATUS);
4974 if (current_link_up == 0 &&
4975 (mac_status & MAC_STATUS_PCS_SYNCED) &&
4976 !(mac_status & MAC_STATUS_RCVD_CFG))
4977 current_link_up = 1;
4978 } else {
5be73b47
MC
4979 tg3_setup_flow_control(tp, 0, 0);
4980
1da177e4
LT
4981 /* Forcing 1000FD link up. */
4982 current_link_up = 1;
1da177e4
LT
4983
4984 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
4985 udelay(40);
e8f3f6ca
MC
4986
4987 tw32_f(MAC_MODE, tp->mac_mode);
4988 udelay(40);
1da177e4
LT
4989 }
4990
4991out:
4992 return current_link_up;
4993}
4994
4995static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
4996{
4997 u32 orig_pause_cfg;
4998 u16 orig_active_speed;
4999 u8 orig_active_duplex;
5000 u32 mac_status;
5001 int current_link_up;
5002 int i;
5003
8d018621 5004 orig_pause_cfg = tp->link_config.active_flowctrl;
1da177e4
LT
5005 orig_active_speed = tp->link_config.active_speed;
5006 orig_active_duplex = tp->link_config.active_duplex;
5007
63c3a66f 5008 if (!tg3_flag(tp, HW_AUTONEG) &&
1da177e4 5009 netif_carrier_ok(tp->dev) &&
63c3a66f 5010 tg3_flag(tp, INIT_COMPLETE)) {
1da177e4
LT
5011 mac_status = tr32(MAC_STATUS);
5012 mac_status &= (MAC_STATUS_PCS_SYNCED |
5013 MAC_STATUS_SIGNAL_DET |
5014 MAC_STATUS_CFG_CHANGED |
5015 MAC_STATUS_RCVD_CFG);
5016 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5017 MAC_STATUS_SIGNAL_DET)) {
5018 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5019 MAC_STATUS_CFG_CHANGED));
5020 return 0;
5021 }
5022 }
5023
5024 tw32_f(MAC_TX_AUTO_NEG, 0);
5025
5026 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5027 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5028 tw32_f(MAC_MODE, tp->mac_mode);
5029 udelay(40);
5030
79eb6904 5031 if (tp->phy_id == TG3_PHY_ID_BCM8002)
1da177e4
LT
5032 tg3_init_bcm8002(tp);
5033
5034 /* Enable link change event even when serdes polling. */
5035 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5036 udelay(40);
5037
5038 current_link_up = 0;
859edb26 5039 tp->link_config.rmt_adv = 0;
1da177e4
LT
5040 mac_status = tr32(MAC_STATUS);
5041
63c3a66f 5042 if (tg3_flag(tp, HW_AUTONEG))
1da177e4
LT
5043 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5044 else
5045 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5046
898a56f8 5047 tp->napi[0].hw_status->status =
1da177e4 5048 (SD_STATUS_UPDATED |
898a56f8 5049 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
1da177e4
LT
5050
5051 for (i = 0; i < 100; i++) {
5052 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5053 MAC_STATUS_CFG_CHANGED));
5054 udelay(5);
5055 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
3d3ebe74
MC
5056 MAC_STATUS_CFG_CHANGED |
5057 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
1da177e4
LT
5058 break;
5059 }
5060
5061 mac_status = tr32(MAC_STATUS);
5062 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5063 current_link_up = 0;
3d3ebe74
MC
5064 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5065 tp->serdes_counter == 0) {
1da177e4
LT
5066 tw32_f(MAC_MODE, (tp->mac_mode |
5067 MAC_MODE_SEND_CONFIGS));
5068 udelay(1);
5069 tw32_f(MAC_MODE, tp->mac_mode);
5070 }
5071 }
5072
5073 if (current_link_up == 1) {
5074 tp->link_config.active_speed = SPEED_1000;
5075 tp->link_config.active_duplex = DUPLEX_FULL;
5076 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5077 LED_CTRL_LNKLED_OVERRIDE |
5078 LED_CTRL_1000MBPS_ON));
5079 } else {
5080 tp->link_config.active_speed = SPEED_INVALID;
5081 tp->link_config.active_duplex = DUPLEX_INVALID;
5082 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5083 LED_CTRL_LNKLED_OVERRIDE |
5084 LED_CTRL_TRAFFIC_OVERRIDE));
5085 }
5086
5087 if (current_link_up != netif_carrier_ok(tp->dev)) {
5088 if (current_link_up)
5089 netif_carrier_on(tp->dev);
5090 else
5091 netif_carrier_off(tp->dev);
5092 tg3_link_report(tp);
5093 } else {
8d018621 5094 u32 now_pause_cfg = tp->link_config.active_flowctrl;
1da177e4
LT
5095 if (orig_pause_cfg != now_pause_cfg ||
5096 orig_active_speed != tp->link_config.active_speed ||
5097 orig_active_duplex != tp->link_config.active_duplex)
5098 tg3_link_report(tp);
5099 }
5100
5101 return 0;
5102}
5103
747e8f8b
MC
5104static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
5105{
5106 int current_link_up, err = 0;
5107 u32 bmsr, bmcr;
5108 u16 current_speed;
5109 u8 current_duplex;
ef167e27 5110 u32 local_adv, remote_adv;
747e8f8b
MC
5111
5112 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5113 tw32_f(MAC_MODE, tp->mac_mode);
5114 udelay(40);
5115
5116 tw32(MAC_EVENT, 0);
5117
5118 tw32_f(MAC_STATUS,
5119 (MAC_STATUS_SYNC_CHANGED |
5120 MAC_STATUS_CFG_CHANGED |
5121 MAC_STATUS_MI_COMPLETION |
5122 MAC_STATUS_LNKSTATE_CHANGED));
5123 udelay(40);
5124
5125 if (force_reset)
5126 tg3_phy_reset(tp);
5127
5128 current_link_up = 0;
5129 current_speed = SPEED_INVALID;
5130 current_duplex = DUPLEX_INVALID;
859edb26 5131 tp->link_config.rmt_adv = 0;
747e8f8b
MC
5132
5133 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5134 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
d4d2c558
MC
5135 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
5136 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5137 bmsr |= BMSR_LSTATUS;
5138 else
5139 bmsr &= ~BMSR_LSTATUS;
5140 }
747e8f8b
MC
5141
5142 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5143
5144 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
f07e9af3 5145 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
747e8f8b
MC
5146 /* do nothing, just check for link up at the end */
5147 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
28011cf1 5148 u32 adv, newadv;
747e8f8b
MC
5149
5150 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
28011cf1
MC
5151 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5152 ADVERTISE_1000XPAUSE |
5153 ADVERTISE_1000XPSE_ASYM |
5154 ADVERTISE_SLCT);
747e8f8b 5155
28011cf1 5156 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
37f07023 5157 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
747e8f8b 5158
28011cf1
MC
5159 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5160 tg3_writephy(tp, MII_ADVERTISE, newadv);
747e8f8b
MC
5161 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5162 tg3_writephy(tp, MII_BMCR, bmcr);
5163
5164 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3d3ebe74 5165 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
f07e9af3 5166 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
747e8f8b
MC
5167
5168 return err;
5169 }
5170 } else {
5171 u32 new_bmcr;
5172
5173 bmcr &= ~BMCR_SPEED1000;
5174 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5175
5176 if (tp->link_config.duplex == DUPLEX_FULL)
5177 new_bmcr |= BMCR_FULLDPLX;
5178
5179 if (new_bmcr != bmcr) {
5180 /* BMCR_SPEED1000 is a reserved bit that needs
5181 * to be set on write.
5182 */
5183 new_bmcr |= BMCR_SPEED1000;
5184
5185 /* Force a linkdown */
5186 if (netif_carrier_ok(tp->dev)) {
5187 u32 adv;
5188
5189 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5190 adv &= ~(ADVERTISE_1000XFULL |
5191 ADVERTISE_1000XHALF |
5192 ADVERTISE_SLCT);
5193 tg3_writephy(tp, MII_ADVERTISE, adv);
5194 tg3_writephy(tp, MII_BMCR, bmcr |
5195 BMCR_ANRESTART |
5196 BMCR_ANENABLE);
5197 udelay(10);
5198 netif_carrier_off(tp->dev);
5199 }
5200 tg3_writephy(tp, MII_BMCR, new_bmcr);
5201 bmcr = new_bmcr;
5202 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5203 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
d4d2c558
MC
5204 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
5205 ASIC_REV_5714) {
5206 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5207 bmsr |= BMSR_LSTATUS;
5208 else
5209 bmsr &= ~BMSR_LSTATUS;
5210 }
f07e9af3 5211 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
747e8f8b
MC
5212 }
5213 }
5214
5215 if (bmsr & BMSR_LSTATUS) {
5216 current_speed = SPEED_1000;
5217 current_link_up = 1;
5218 if (bmcr & BMCR_FULLDPLX)
5219 current_duplex = DUPLEX_FULL;
5220 else
5221 current_duplex = DUPLEX_HALF;
5222
ef167e27
MC
5223 local_adv = 0;
5224 remote_adv = 0;
5225
747e8f8b 5226 if (bmcr & BMCR_ANENABLE) {
ef167e27 5227 u32 common;
747e8f8b
MC
5228
5229 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5230 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5231 common = local_adv & remote_adv;
5232 if (common & (ADVERTISE_1000XHALF |
5233 ADVERTISE_1000XFULL)) {
5234 if (common & ADVERTISE_1000XFULL)
5235 current_duplex = DUPLEX_FULL;
5236 else
5237 current_duplex = DUPLEX_HALF;
859edb26
MC
5238
5239 tp->link_config.rmt_adv =
5240 mii_adv_to_ethtool_adv_x(remote_adv);
63c3a66f 5241 } else if (!tg3_flag(tp, 5780_CLASS)) {
57d8b880 5242 /* Link is up via parallel detect */
859a5887 5243 } else {
747e8f8b 5244 current_link_up = 0;
859a5887 5245 }
747e8f8b
MC
5246 }
5247 }
5248
ef167e27
MC
5249 if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
5250 tg3_setup_flow_control(tp, local_adv, remote_adv);
5251
747e8f8b
MC
5252 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5253 if (tp->link_config.active_duplex == DUPLEX_HALF)
5254 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5255
5256 tw32_f(MAC_MODE, tp->mac_mode);
5257 udelay(40);
5258
5259 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5260
5261 tp->link_config.active_speed = current_speed;
5262 tp->link_config.active_duplex = current_duplex;
5263
5264 if (current_link_up != netif_carrier_ok(tp->dev)) {
5265 if (current_link_up)
5266 netif_carrier_on(tp->dev);
5267 else {
5268 netif_carrier_off(tp->dev);
f07e9af3 5269 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
747e8f8b
MC
5270 }
5271 tg3_link_report(tp);
5272 }
5273 return err;
5274}
5275
5276static void tg3_serdes_parallel_detect(struct tg3 *tp)
5277{
3d3ebe74 5278 if (tp->serdes_counter) {
747e8f8b 5279 /* Give autoneg time to complete. */
3d3ebe74 5280 tp->serdes_counter--;
747e8f8b
MC
5281 return;
5282 }
c6cdf436 5283
747e8f8b
MC
5284 if (!netif_carrier_ok(tp->dev) &&
5285 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5286 u32 bmcr;
5287
5288 tg3_readphy(tp, MII_BMCR, &bmcr);
5289 if (bmcr & BMCR_ANENABLE) {
5290 u32 phy1, phy2;
5291
5292 /* Select shadow register 0x1f */
f08aa1a8
MC
5293 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5294 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
747e8f8b
MC
5295
5296 /* Select expansion interrupt status register */
f08aa1a8
MC
5297 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5298 MII_TG3_DSP_EXP1_INT_STAT);
5299 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5300 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
747e8f8b
MC
5301
5302 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5303 /* We have signal detect and not receiving
5304 * config code words, link is up by parallel
5305 * detection.
5306 */
5307
5308 bmcr &= ~BMCR_ANENABLE;
5309 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5310 tg3_writephy(tp, MII_BMCR, bmcr);
f07e9af3 5311 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
747e8f8b
MC
5312 }
5313 }
859a5887
MC
5314 } else if (netif_carrier_ok(tp->dev) &&
5315 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
f07e9af3 5316 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
747e8f8b
MC
5317 u32 phy2;
5318
5319 /* Select expansion interrupt status register */
f08aa1a8
MC
5320 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5321 MII_TG3_DSP_EXP1_INT_STAT);
5322 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
747e8f8b
MC
5323 if (phy2 & 0x20) {
5324 u32 bmcr;
5325
5326 /* Config code words received, turn on autoneg. */
5327 tg3_readphy(tp, MII_BMCR, &bmcr);
5328 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
5329
f07e9af3 5330 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
747e8f8b
MC
5331
5332 }
5333 }
5334}
5335
1da177e4
LT
5336static int tg3_setup_phy(struct tg3 *tp, int force_reset)
5337{
f2096f94 5338 u32 val;
1da177e4
LT
5339 int err;
5340
f07e9af3 5341 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
1da177e4 5342 err = tg3_setup_fiber_phy(tp, force_reset);
f07e9af3 5343 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
747e8f8b 5344 err = tg3_setup_fiber_mii_phy(tp, force_reset);
859a5887 5345 else
1da177e4 5346 err = tg3_setup_copper_phy(tp, force_reset);
1da177e4 5347
bcb37f6c 5348 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
f2096f94 5349 u32 scale;
aa6c91fe
MC
5350
5351 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
5352 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
5353 scale = 65;
5354 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
5355 scale = 6;
5356 else
5357 scale = 12;
5358
5359 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
5360 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
5361 tw32(GRC_MISC_CFG, val);
5362 }
5363
f2096f94
MC
5364 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5365 (6 << TX_LENGTHS_IPG_SHIFT);
5366 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
5367 val |= tr32(MAC_TX_LENGTHS) &
5368 (TX_LENGTHS_JMB_FRM_LEN_MSK |
5369 TX_LENGTHS_CNT_DWN_VAL_MSK);
5370
1da177e4
LT
5371 if (tp->link_config.active_speed == SPEED_1000 &&
5372 tp->link_config.active_duplex == DUPLEX_HALF)
f2096f94
MC
5373 tw32(MAC_TX_LENGTHS, val |
5374 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
1da177e4 5375 else
f2096f94
MC
5376 tw32(MAC_TX_LENGTHS, val |
5377 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
1da177e4 5378
63c3a66f 5379 if (!tg3_flag(tp, 5705_PLUS)) {
1da177e4
LT
5380 if (netif_carrier_ok(tp->dev)) {
5381 tw32(HOSTCC_STAT_COAL_TICKS,
15f9850d 5382 tp->coal.stats_block_coalesce_usecs);
1da177e4
LT
5383 } else {
5384 tw32(HOSTCC_STAT_COAL_TICKS, 0);
5385 }
5386 }
5387
63c3a66f 5388 if (tg3_flag(tp, ASPM_WORKAROUND)) {
f2096f94 5389 val = tr32(PCIE_PWR_MGMT_THRESH);
8ed5d97e
MC
5390 if (!netif_carrier_ok(tp->dev))
5391 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
5392 tp->pwrmgmt_thresh;
5393 else
5394 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
5395 tw32(PCIE_PWR_MGMT_THRESH, val);
5396 }
5397
1da177e4
LT
5398 return err;
5399}
5400
66cfd1bd
MC
5401static inline int tg3_irq_sync(struct tg3 *tp)
5402{
5403 return tp->irq_sync;
5404}
5405
97bd8e49
MC
5406static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
5407{
5408 int i;
5409
5410 dst = (u32 *)((u8 *)dst + off);
5411 for (i = 0; i < len; i += sizeof(u32))
5412 *dst++ = tr32(off + i);
5413}
5414
5415static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
5416{
5417 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
5418 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
5419 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
5420 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
5421 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
5422 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
5423 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
5424 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
5425 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
5426 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
5427 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
5428 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
5429 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
5430 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
5431 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
5432 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
5433 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
5434 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
5435 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
5436
63c3a66f 5437 if (tg3_flag(tp, SUPPORT_MSIX))
97bd8e49
MC
5438 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
5439
5440 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
5441 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
5442 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
5443 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
5444 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
5445 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
5446 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
5447 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
5448
63c3a66f 5449 if (!tg3_flag(tp, 5705_PLUS)) {
97bd8e49
MC
5450 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
5451 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
5452 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
5453 }
5454
5455 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
5456 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
5457 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
5458 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
5459 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
5460
63c3a66f 5461 if (tg3_flag(tp, NVRAM))
97bd8e49
MC
5462 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
5463}
5464
5465static void tg3_dump_state(struct tg3 *tp)
5466{
5467 int i;
5468 u32 *regs;
5469
5470 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
5471 if (!regs) {
5472 netdev_err(tp->dev, "Failed allocating register dump buffer\n");
5473 return;
5474 }
5475
63c3a66f 5476 if (tg3_flag(tp, PCI_EXPRESS)) {
97bd8e49
MC
5477 /* Read up to but not including private PCI registers */
5478 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
5479 regs[i / sizeof(u32)] = tr32(i);
5480 } else
5481 tg3_dump_legacy_regs(tp, regs);
5482
5483 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
5484 if (!regs[i + 0] && !regs[i + 1] &&
5485 !regs[i + 2] && !regs[i + 3])
5486 continue;
5487
5488 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
5489 i * 4,
5490 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
5491 }
5492
5493 kfree(regs);
5494
5495 for (i = 0; i < tp->irq_cnt; i++) {
5496 struct tg3_napi *tnapi = &tp->napi[i];
5497
5498 /* SW status block */
5499 netdev_err(tp->dev,
5500 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
5501 i,
5502 tnapi->hw_status->status,
5503 tnapi->hw_status->status_tag,
5504 tnapi->hw_status->rx_jumbo_consumer,
5505 tnapi->hw_status->rx_consumer,
5506 tnapi->hw_status->rx_mini_consumer,
5507 tnapi->hw_status->idx[0].rx_producer,
5508 tnapi->hw_status->idx[0].tx_consumer);
5509
5510 netdev_err(tp->dev,
5511 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
5512 i,
5513 tnapi->last_tag, tnapi->last_irq_tag,
5514 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
5515 tnapi->rx_rcb_ptr,
5516 tnapi->prodring.rx_std_prod_idx,
5517 tnapi->prodring.rx_std_cons_idx,
5518 tnapi->prodring.rx_jmb_prod_idx,
5519 tnapi->prodring.rx_jmb_cons_idx);
5520 }
5521}
5522
df3e6548
MC
5523/* This is called whenever we suspect that the system chipset is re-
5524 * ordering the sequence of MMIO to the tx send mailbox. The symptom
5525 * is bogus tx completions. We try to recover by setting the
5526 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
5527 * in the workqueue.
5528 */
5529static void tg3_tx_recover(struct tg3 *tp)
5530{
63c3a66f 5531 BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
df3e6548
MC
5532 tp->write32_tx_mbox == tg3_write_indirect_mbox);
5533
5129c3a3
MC
5534 netdev_warn(tp->dev,
5535 "The system may be re-ordering memory-mapped I/O "
5536 "cycles to the network device, attempting to recover. "
5537 "Please report the problem to the driver maintainer "
5538 "and include system chipset information.\n");
df3e6548
MC
5539
5540 spin_lock(&tp->lock);
63c3a66f 5541 tg3_flag_set(tp, TX_RECOVERY_PENDING);
df3e6548
MC
5542 spin_unlock(&tp->lock);
5543}
5544
f3f3f27e 5545static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
1b2a7205 5546{
f65aac16
MC
5547 /* Tell compiler to fetch tx indices from memory. */
5548 barrier();
f3f3f27e
MC
5549 return tnapi->tx_pending -
5550 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
1b2a7205
MC
5551}
5552
1da177e4
LT
5553/* Tigon3 never reports partial packet sends. So we do not
5554 * need special logic to handle SKBs that have not had all
5555 * of their frags sent yet, like SunGEM does.
5556 */
17375d25 5557static void tg3_tx(struct tg3_napi *tnapi)
1da177e4 5558{
17375d25 5559 struct tg3 *tp = tnapi->tp;
898a56f8 5560 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
f3f3f27e 5561 u32 sw_idx = tnapi->tx_cons;
fe5f5787
MC
5562 struct netdev_queue *txq;
5563 int index = tnapi - tp->napi;
298376d3 5564 unsigned int pkts_compl = 0, bytes_compl = 0;
fe5f5787 5565
63c3a66f 5566 if (tg3_flag(tp, ENABLE_TSS))
fe5f5787
MC
5567 index--;
5568
5569 txq = netdev_get_tx_queue(tp->dev, index);
1da177e4
LT
5570
5571 while (sw_idx != hw_idx) {
df8944cf 5572 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
1da177e4 5573 struct sk_buff *skb = ri->skb;
df3e6548
MC
5574 int i, tx_bug = 0;
5575
5576 if (unlikely(skb == NULL)) {
5577 tg3_tx_recover(tp);
5578 return;
5579 }
1da177e4 5580
f4188d8a 5581 pci_unmap_single(tp->pdev,
4e5e4f0d 5582 dma_unmap_addr(ri, mapping),
f4188d8a
AD
5583 skb_headlen(skb),
5584 PCI_DMA_TODEVICE);
1da177e4
LT
5585
5586 ri->skb = NULL;
5587
e01ee14d
MC
5588 while (ri->fragmented) {
5589 ri->fragmented = false;
5590 sw_idx = NEXT_TX(sw_idx);
5591 ri = &tnapi->tx_buffers[sw_idx];
5592 }
5593
1da177e4
LT
5594 sw_idx = NEXT_TX(sw_idx);
5595
5596 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
f3f3f27e 5597 ri = &tnapi->tx_buffers[sw_idx];
df3e6548
MC
5598 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
5599 tx_bug = 1;
f4188d8a
AD
5600
5601 pci_unmap_page(tp->pdev,
4e5e4f0d 5602 dma_unmap_addr(ri, mapping),
9e903e08 5603 skb_frag_size(&skb_shinfo(skb)->frags[i]),
f4188d8a 5604 PCI_DMA_TODEVICE);
e01ee14d
MC
5605
5606 while (ri->fragmented) {
5607 ri->fragmented = false;
5608 sw_idx = NEXT_TX(sw_idx);
5609 ri = &tnapi->tx_buffers[sw_idx];
5610 }
5611
1da177e4
LT
5612 sw_idx = NEXT_TX(sw_idx);
5613 }
5614
298376d3
TH
5615 pkts_compl++;
5616 bytes_compl += skb->len;
5617
f47c11ee 5618 dev_kfree_skb(skb);
df3e6548
MC
5619
5620 if (unlikely(tx_bug)) {
5621 tg3_tx_recover(tp);
5622 return;
5623 }
1da177e4
LT
5624 }
5625
298376d3
TH
5626 netdev_completed_queue(tp->dev, pkts_compl, bytes_compl);
5627
f3f3f27e 5628 tnapi->tx_cons = sw_idx;
1da177e4 5629
1b2a7205
MC
5630 /* Need to make the tx_cons update visible to tg3_start_xmit()
5631 * before checking for netif_queue_stopped(). Without the
5632 * memory barrier, there is a small possibility that tg3_start_xmit()
5633 * will miss it and cause the queue to be stopped forever.
5634 */
5635 smp_mb();
5636
fe5f5787 5637 if (unlikely(netif_tx_queue_stopped(txq) &&
f3f3f27e 5638 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
fe5f5787
MC
5639 __netif_tx_lock(txq, smp_processor_id());
5640 if (netif_tx_queue_stopped(txq) &&
f3f3f27e 5641 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
fe5f5787
MC
5642 netif_tx_wake_queue(txq);
5643 __netif_tx_unlock(txq);
51b91468 5644 }
1da177e4
LT
5645}
5646
9205fd9c 5647static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
2b2cdb65 5648{
9205fd9c 5649 if (!ri->data)
2b2cdb65
MC
5650 return;
5651
4e5e4f0d 5652 pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
2b2cdb65 5653 map_sz, PCI_DMA_FROMDEVICE);
9205fd9c
ED
5654 kfree(ri->data);
5655 ri->data = NULL;
2b2cdb65
MC
5656}
5657
1da177e4
LT
5658/* Returns size of skb allocated or < 0 on error.
5659 *
5660 * We only need to fill in the address because the other members
5661 * of the RX descriptor are invariant, see tg3_init_rings.
5662 *
5663 * Note the purposeful assymetry of cpu vs. chip accesses. For
5664 * posting buffers we only dirty the first cache line of the RX
5665 * descriptor (containing the address). Whereas for the RX status
5666 * buffers the cpu only reads the last cacheline of the RX descriptor
5667 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
5668 */
9205fd9c 5669static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
a3896167 5670 u32 opaque_key, u32 dest_idx_unmasked)
1da177e4
LT
5671{
5672 struct tg3_rx_buffer_desc *desc;
f94e290e 5673 struct ring_info *map;
9205fd9c 5674 u8 *data;
1da177e4 5675 dma_addr_t mapping;
9205fd9c 5676 int skb_size, data_size, dest_idx;
1da177e4 5677
1da177e4
LT
5678 switch (opaque_key) {
5679 case RXD_OPAQUE_RING_STD:
2c49a44d 5680 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
21f581a5
MC
5681 desc = &tpr->rx_std[dest_idx];
5682 map = &tpr->rx_std_buffers[dest_idx];
9205fd9c 5683 data_size = tp->rx_pkt_map_sz;
1da177e4
LT
5684 break;
5685
5686 case RXD_OPAQUE_RING_JUMBO:
2c49a44d 5687 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
79ed5ac7 5688 desc = &tpr->rx_jmb[dest_idx].std;
21f581a5 5689 map = &tpr->rx_jmb_buffers[dest_idx];
9205fd9c 5690 data_size = TG3_RX_JMB_MAP_SZ;
1da177e4
LT
5691 break;
5692
5693 default:
5694 return -EINVAL;
855e1111 5695 }
1da177e4
LT
5696
5697 /* Do not overwrite any of the map or rp information
5698 * until we are sure we can commit to a new buffer.
5699 *
5700 * Callers depend upon this behavior and assume that
5701 * we leave everything unchanged if we fail.
5702 */
9205fd9c
ED
5703 skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
5704 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5705 data = kmalloc(skb_size, GFP_ATOMIC);
5706 if (!data)
1da177e4
LT
5707 return -ENOMEM;
5708
9205fd9c
ED
5709 mapping = pci_map_single(tp->pdev,
5710 data + TG3_RX_OFFSET(tp),
5711 data_size,
1da177e4 5712 PCI_DMA_FROMDEVICE);
a21771dd 5713 if (pci_dma_mapping_error(tp->pdev, mapping)) {
9205fd9c 5714 kfree(data);
a21771dd
MC
5715 return -EIO;
5716 }
1da177e4 5717
9205fd9c 5718 map->data = data;
4e5e4f0d 5719 dma_unmap_addr_set(map, mapping, mapping);
1da177e4 5720
1da177e4
LT
5721 desc->addr_hi = ((u64)mapping >> 32);
5722 desc->addr_lo = ((u64)mapping & 0xffffffff);
5723
9205fd9c 5724 return data_size;
1da177e4
LT
5725}
5726
5727/* We only need to move over in the address because the other
5728 * members of the RX descriptor are invariant. See notes above
9205fd9c 5729 * tg3_alloc_rx_data for full details.
1da177e4 5730 */
a3896167
MC
5731static void tg3_recycle_rx(struct tg3_napi *tnapi,
5732 struct tg3_rx_prodring_set *dpr,
5733 u32 opaque_key, int src_idx,
5734 u32 dest_idx_unmasked)
1da177e4 5735{
17375d25 5736 struct tg3 *tp = tnapi->tp;
1da177e4
LT
5737 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
5738 struct ring_info *src_map, *dest_map;
8fea32b9 5739 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
c6cdf436 5740 int dest_idx;
1da177e4
LT
5741
5742 switch (opaque_key) {
5743 case RXD_OPAQUE_RING_STD:
2c49a44d 5744 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
a3896167
MC
5745 dest_desc = &dpr->rx_std[dest_idx];
5746 dest_map = &dpr->rx_std_buffers[dest_idx];
5747 src_desc = &spr->rx_std[src_idx];
5748 src_map = &spr->rx_std_buffers[src_idx];
1da177e4
LT
5749 break;
5750
5751 case RXD_OPAQUE_RING_JUMBO:
2c49a44d 5752 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
a3896167
MC
5753 dest_desc = &dpr->rx_jmb[dest_idx].std;
5754 dest_map = &dpr->rx_jmb_buffers[dest_idx];
5755 src_desc = &spr->rx_jmb[src_idx].std;
5756 src_map = &spr->rx_jmb_buffers[src_idx];
1da177e4
LT
5757 break;
5758
5759 default:
5760 return;
855e1111 5761 }
1da177e4 5762
9205fd9c 5763 dest_map->data = src_map->data;
4e5e4f0d
FT
5764 dma_unmap_addr_set(dest_map, mapping,
5765 dma_unmap_addr(src_map, mapping));
1da177e4
LT
5766 dest_desc->addr_hi = src_desc->addr_hi;
5767 dest_desc->addr_lo = src_desc->addr_lo;
e92967bf
MC
5768
5769 /* Ensure that the update to the skb happens after the physical
5770 * addresses have been transferred to the new BD location.
5771 */
5772 smp_wmb();
5773
9205fd9c 5774 src_map->data = NULL;
1da177e4
LT
5775}
5776
1da177e4
LT
5777/* The RX ring scheme is composed of multiple rings which post fresh
5778 * buffers to the chip, and one special ring the chip uses to report
5779 * status back to the host.
5780 *
5781 * The special ring reports the status of received packets to the
5782 * host. The chip does not write into the original descriptor the
5783 * RX buffer was obtained from. The chip simply takes the original
5784 * descriptor as provided by the host, updates the status and length
5785 * field, then writes this into the next status ring entry.
5786 *
5787 * Each ring the host uses to post buffers to the chip is described
5788 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
5789 * it is first placed into the on-chip ram. When the packet's length
5790 * is known, it walks down the TG3_BDINFO entries to select the ring.
5791 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
5792 * which is within the range of the new packet's length is chosen.
5793 *
5794 * The "separate ring for rx status" scheme may sound queer, but it makes
5795 * sense from a cache coherency perspective. If only the host writes
5796 * to the buffer post rings, and only the chip writes to the rx status
5797 * rings, then cache lines never move beyond shared-modified state.
5798 * If both the host and chip were to write into the same ring, cache line
5799 * eviction could occur since both entities want it in an exclusive state.
5800 */
17375d25 5801static int tg3_rx(struct tg3_napi *tnapi, int budget)
1da177e4 5802{
17375d25 5803 struct tg3 *tp = tnapi->tp;
f92905de 5804 u32 work_mask, rx_std_posted = 0;
4361935a 5805 u32 std_prod_idx, jmb_prod_idx;
72334482 5806 u32 sw_idx = tnapi->rx_rcb_ptr;
483ba50b 5807 u16 hw_idx;
1da177e4 5808 int received;
8fea32b9 5809 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
1da177e4 5810
8d9d7cfc 5811 hw_idx = *(tnapi->rx_rcb_prod_idx);
1da177e4
LT
5812 /*
5813 * We need to order the read of hw_idx and the read of
5814 * the opaque cookie.
5815 */
5816 rmb();
1da177e4
LT
5817 work_mask = 0;
5818 received = 0;
4361935a
MC
5819 std_prod_idx = tpr->rx_std_prod_idx;
5820 jmb_prod_idx = tpr->rx_jmb_prod_idx;
1da177e4 5821 while (sw_idx != hw_idx && budget > 0) {
afc081f8 5822 struct ring_info *ri;
72334482 5823 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
1da177e4
LT
5824 unsigned int len;
5825 struct sk_buff *skb;
5826 dma_addr_t dma_addr;
5827 u32 opaque_key, desc_idx, *post_ptr;
9205fd9c 5828 u8 *data;
1da177e4
LT
5829
5830 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
5831 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
5832 if (opaque_key == RXD_OPAQUE_RING_STD) {
8fea32b9 5833 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
4e5e4f0d 5834 dma_addr = dma_unmap_addr(ri, mapping);
9205fd9c 5835 data = ri->data;
4361935a 5836 post_ptr = &std_prod_idx;
f92905de 5837 rx_std_posted++;
1da177e4 5838 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
8fea32b9 5839 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
4e5e4f0d 5840 dma_addr = dma_unmap_addr(ri, mapping);
9205fd9c 5841 data = ri->data;
4361935a 5842 post_ptr = &jmb_prod_idx;
21f581a5 5843 } else
1da177e4 5844 goto next_pkt_nopost;
1da177e4
LT
5845
5846 work_mask |= opaque_key;
5847
5848 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
5849 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
5850 drop_it:
a3896167 5851 tg3_recycle_rx(tnapi, tpr, opaque_key,
1da177e4
LT
5852 desc_idx, *post_ptr);
5853 drop_it_no_recycle:
5854 /* Other statistics kept track of by card. */
b0057c51 5855 tp->rx_dropped++;
1da177e4
LT
5856 goto next_pkt;
5857 }
5858
9205fd9c 5859 prefetch(data + TG3_RX_OFFSET(tp));
ad829268
MC
5860 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
5861 ETH_FCS_LEN;
1da177e4 5862
d2757fc4 5863 if (len > TG3_RX_COPY_THRESH(tp)) {
1da177e4
LT
5864 int skb_size;
5865
9205fd9c 5866 skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
afc081f8 5867 *post_ptr);
1da177e4
LT
5868 if (skb_size < 0)
5869 goto drop_it;
5870
287be12e 5871 pci_unmap_single(tp->pdev, dma_addr, skb_size,
1da177e4
LT
5872 PCI_DMA_FROMDEVICE);
5873
9205fd9c
ED
5874 skb = build_skb(data);
5875 if (!skb) {
5876 kfree(data);
5877 goto drop_it_no_recycle;
5878 }
5879 skb_reserve(skb, TG3_RX_OFFSET(tp));
5880 /* Ensure that the update to the data happens
61e800cf
MC
5881 * after the usage of the old DMA mapping.
5882 */
5883 smp_wmb();
5884
9205fd9c 5885 ri->data = NULL;
61e800cf 5886
1da177e4 5887 } else {
a3896167 5888 tg3_recycle_rx(tnapi, tpr, opaque_key,
1da177e4
LT
5889 desc_idx, *post_ptr);
5890
9205fd9c
ED
5891 skb = netdev_alloc_skb(tp->dev,
5892 len + TG3_RAW_IP_ALIGN);
5893 if (skb == NULL)
1da177e4
LT
5894 goto drop_it_no_recycle;
5895
9205fd9c 5896 skb_reserve(skb, TG3_RAW_IP_ALIGN);
1da177e4 5897 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
9205fd9c
ED
5898 memcpy(skb->data,
5899 data + TG3_RX_OFFSET(tp),
5900 len);
1da177e4 5901 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
1da177e4
LT
5902 }
5903
9205fd9c 5904 skb_put(skb, len);
dc668910 5905 if ((tp->dev->features & NETIF_F_RXCSUM) &&
1da177e4
LT
5906 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
5907 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
5908 >> RXD_TCPCSUM_SHIFT) == 0xffff))
5909 skb->ip_summed = CHECKSUM_UNNECESSARY;
5910 else
bc8acf2c 5911 skb_checksum_none_assert(skb);
1da177e4
LT
5912
5913 skb->protocol = eth_type_trans(skb, tp->dev);
f7b493e0
MC
5914
5915 if (len > (tp->dev->mtu + ETH_HLEN) &&
5916 skb->protocol != htons(ETH_P_8021Q)) {
5917 dev_kfree_skb(skb);
b0057c51 5918 goto drop_it_no_recycle;
f7b493e0
MC
5919 }
5920
9dc7a113 5921 if (desc->type_flags & RXD_FLAG_VLAN &&
bf933c80
MC
5922 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
5923 __vlan_hwaccel_put_tag(skb,
5924 desc->err_vlan & RXD_VLAN_MASK);
9dc7a113 5925
bf933c80 5926 napi_gro_receive(&tnapi->napi, skb);
1da177e4 5927
1da177e4
LT
5928 received++;
5929 budget--;
5930
5931next_pkt:
5932 (*post_ptr)++;
f92905de
MC
5933
5934 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
2c49a44d
MC
5935 tpr->rx_std_prod_idx = std_prod_idx &
5936 tp->rx_std_ring_mask;
86cfe4ff
MC
5937 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5938 tpr->rx_std_prod_idx);
f92905de
MC
5939 work_mask &= ~RXD_OPAQUE_RING_STD;
5940 rx_std_posted = 0;
5941 }
1da177e4 5942next_pkt_nopost:
483ba50b 5943 sw_idx++;
7cb32cf2 5944 sw_idx &= tp->rx_ret_ring_mask;
52f6d697
MC
5945
5946 /* Refresh hw_idx to see if there is new work */
5947 if (sw_idx == hw_idx) {
8d9d7cfc 5948 hw_idx = *(tnapi->rx_rcb_prod_idx);
52f6d697
MC
5949 rmb();
5950 }
1da177e4
LT
5951 }
5952
5953 /* ACK the status ring. */
72334482
MC
5954 tnapi->rx_rcb_ptr = sw_idx;
5955 tw32_rx_mbox(tnapi->consmbox, sw_idx);
1da177e4
LT
5956
5957 /* Refill RX ring(s). */
63c3a66f 5958 if (!tg3_flag(tp, ENABLE_RSS)) {
b196c7e4 5959 if (work_mask & RXD_OPAQUE_RING_STD) {
2c49a44d
MC
5960 tpr->rx_std_prod_idx = std_prod_idx &
5961 tp->rx_std_ring_mask;
b196c7e4
MC
5962 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5963 tpr->rx_std_prod_idx);
5964 }
5965 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
2c49a44d
MC
5966 tpr->rx_jmb_prod_idx = jmb_prod_idx &
5967 tp->rx_jmb_ring_mask;
b196c7e4
MC
5968 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5969 tpr->rx_jmb_prod_idx);
5970 }
5971 mmiowb();
5972 } else if (work_mask) {
5973 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
5974 * updated before the producer indices can be updated.
5975 */
5976 smp_wmb();
5977
2c49a44d
MC
5978 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
5979 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
b196c7e4 5980
e4af1af9
MC
5981 if (tnapi != &tp->napi[1])
5982 napi_schedule(&tp->napi[1].napi);
1da177e4 5983 }
1da177e4
LT
5984
5985 return received;
5986}
5987
35f2d7d0 5988static void tg3_poll_link(struct tg3 *tp)
1da177e4 5989{
1da177e4 5990 /* handle link change and other phy events */
63c3a66f 5991 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
35f2d7d0
MC
5992 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
5993
1da177e4
LT
5994 if (sblk->status & SD_STATUS_LINK_CHG) {
5995 sblk->status = SD_STATUS_UPDATED |
35f2d7d0 5996 (sblk->status & ~SD_STATUS_LINK_CHG);
f47c11ee 5997 spin_lock(&tp->lock);
63c3a66f 5998 if (tg3_flag(tp, USE_PHYLIB)) {
dd477003
MC
5999 tw32_f(MAC_STATUS,
6000 (MAC_STATUS_SYNC_CHANGED |
6001 MAC_STATUS_CFG_CHANGED |
6002 MAC_STATUS_MI_COMPLETION |
6003 MAC_STATUS_LNKSTATE_CHANGED));
6004 udelay(40);
6005 } else
6006 tg3_setup_phy(tp, 0);
f47c11ee 6007 spin_unlock(&tp->lock);
1da177e4
LT
6008 }
6009 }
35f2d7d0
MC
6010}
6011
f89f38b8
MC
6012static int tg3_rx_prodring_xfer(struct tg3 *tp,
6013 struct tg3_rx_prodring_set *dpr,
6014 struct tg3_rx_prodring_set *spr)
b196c7e4
MC
6015{
6016 u32 si, di, cpycnt, src_prod_idx;
f89f38b8 6017 int i, err = 0;
b196c7e4
MC
6018
6019 while (1) {
6020 src_prod_idx = spr->rx_std_prod_idx;
6021
6022 /* Make sure updates to the rx_std_buffers[] entries and the
6023 * standard producer index are seen in the correct order.
6024 */
6025 smp_rmb();
6026
6027 if (spr->rx_std_cons_idx == src_prod_idx)
6028 break;
6029
6030 if (spr->rx_std_cons_idx < src_prod_idx)
6031 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
6032 else
2c49a44d
MC
6033 cpycnt = tp->rx_std_ring_mask + 1 -
6034 spr->rx_std_cons_idx;
b196c7e4 6035
2c49a44d
MC
6036 cpycnt = min(cpycnt,
6037 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
b196c7e4
MC
6038
6039 si = spr->rx_std_cons_idx;
6040 di = dpr->rx_std_prod_idx;
6041
e92967bf 6042 for (i = di; i < di + cpycnt; i++) {
9205fd9c 6043 if (dpr->rx_std_buffers[i].data) {
e92967bf 6044 cpycnt = i - di;
f89f38b8 6045 err = -ENOSPC;
e92967bf
MC
6046 break;
6047 }
6048 }
6049
6050 if (!cpycnt)
6051 break;
6052
6053 /* Ensure that updates to the rx_std_buffers ring and the
6054 * shadowed hardware producer ring from tg3_recycle_skb() are
6055 * ordered correctly WRT the skb check above.
6056 */
6057 smp_rmb();
6058
b196c7e4
MC
6059 memcpy(&dpr->rx_std_buffers[di],
6060 &spr->rx_std_buffers[si],
6061 cpycnt * sizeof(struct ring_info));
6062
6063 for (i = 0; i < cpycnt; i++, di++, si++) {
6064 struct tg3_rx_buffer_desc *sbd, *dbd;
6065 sbd = &spr->rx_std[si];
6066 dbd = &dpr->rx_std[di];
6067 dbd->addr_hi = sbd->addr_hi;
6068 dbd->addr_lo = sbd->addr_lo;
6069 }
6070
2c49a44d
MC
6071 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
6072 tp->rx_std_ring_mask;
6073 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
6074 tp->rx_std_ring_mask;
b196c7e4
MC
6075 }
6076
6077 while (1) {
6078 src_prod_idx = spr->rx_jmb_prod_idx;
6079
6080 /* Make sure updates to the rx_jmb_buffers[] entries and
6081 * the jumbo producer index are seen in the correct order.
6082 */
6083 smp_rmb();
6084
6085 if (spr->rx_jmb_cons_idx == src_prod_idx)
6086 break;
6087
6088 if (spr->rx_jmb_cons_idx < src_prod_idx)
6089 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
6090 else
2c49a44d
MC
6091 cpycnt = tp->rx_jmb_ring_mask + 1 -
6092 spr->rx_jmb_cons_idx;
b196c7e4
MC
6093
6094 cpycnt = min(cpycnt,
2c49a44d 6095 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
b196c7e4
MC
6096
6097 si = spr->rx_jmb_cons_idx;
6098 di = dpr->rx_jmb_prod_idx;
6099
e92967bf 6100 for (i = di; i < di + cpycnt; i++) {
9205fd9c 6101 if (dpr->rx_jmb_buffers[i].data) {
e92967bf 6102 cpycnt = i - di;
f89f38b8 6103 err = -ENOSPC;
e92967bf
MC
6104 break;
6105 }
6106 }
6107
6108 if (!cpycnt)
6109 break;
6110
6111 /* Ensure that updates to the rx_jmb_buffers ring and the
6112 * shadowed hardware producer ring from tg3_recycle_skb() are
6113 * ordered correctly WRT the skb check above.
6114 */
6115 smp_rmb();
6116
b196c7e4
MC
6117 memcpy(&dpr->rx_jmb_buffers[di],
6118 &spr->rx_jmb_buffers[si],
6119 cpycnt * sizeof(struct ring_info));
6120
6121 for (i = 0; i < cpycnt; i++, di++, si++) {
6122 struct tg3_rx_buffer_desc *sbd, *dbd;
6123 sbd = &spr->rx_jmb[si].std;
6124 dbd = &dpr->rx_jmb[di].std;
6125 dbd->addr_hi = sbd->addr_hi;
6126 dbd->addr_lo = sbd->addr_lo;
6127 }
6128
2c49a44d
MC
6129 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
6130 tp->rx_jmb_ring_mask;
6131 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
6132 tp->rx_jmb_ring_mask;
b196c7e4 6133 }
f89f38b8
MC
6134
6135 return err;
b196c7e4
MC
6136}
6137
35f2d7d0
MC
6138static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
6139{
6140 struct tg3 *tp = tnapi->tp;
1da177e4
LT
6141
6142 /* run TX completion thread */
f3f3f27e 6143 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
17375d25 6144 tg3_tx(tnapi);
63c3a66f 6145 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
4fd7ab59 6146 return work_done;
1da177e4
LT
6147 }
6148
1da177e4
LT
6149 /* run RX thread, within the bounds set by NAPI.
6150 * All RX "locking" is done by ensuring outside
bea3348e 6151 * code synchronizes with tg3->napi.poll()
1da177e4 6152 */
8d9d7cfc 6153 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
17375d25 6154 work_done += tg3_rx(tnapi, budget - work_done);
1da177e4 6155
63c3a66f 6156 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
8fea32b9 6157 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
f89f38b8 6158 int i, err = 0;
e4af1af9
MC
6159 u32 std_prod_idx = dpr->rx_std_prod_idx;
6160 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
b196c7e4 6161
e4af1af9 6162 for (i = 1; i < tp->irq_cnt; i++)
f89f38b8 6163 err |= tg3_rx_prodring_xfer(tp, dpr,
8fea32b9 6164 &tp->napi[i].prodring);
b196c7e4
MC
6165
6166 wmb();
6167
e4af1af9
MC
6168 if (std_prod_idx != dpr->rx_std_prod_idx)
6169 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6170 dpr->rx_std_prod_idx);
b196c7e4 6171
e4af1af9
MC
6172 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
6173 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6174 dpr->rx_jmb_prod_idx);
b196c7e4
MC
6175
6176 mmiowb();
f89f38b8
MC
6177
6178 if (err)
6179 tw32_f(HOSTCC_MODE, tp->coal_now);
b196c7e4
MC
6180 }
6181
6f535763
DM
6182 return work_done;
6183}
6184
db219973
MC
6185static inline void tg3_reset_task_schedule(struct tg3 *tp)
6186{
6187 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
6188 schedule_work(&tp->reset_task);
6189}
6190
6191static inline void tg3_reset_task_cancel(struct tg3 *tp)
6192{
6193 cancel_work_sync(&tp->reset_task);
6194 tg3_flag_clear(tp, RESET_TASK_PENDING);
6195}
6196
35f2d7d0
MC
6197static int tg3_poll_msix(struct napi_struct *napi, int budget)
6198{
6199 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6200 struct tg3 *tp = tnapi->tp;
6201 int work_done = 0;
6202 struct tg3_hw_status *sblk = tnapi->hw_status;
6203
6204 while (1) {
6205 work_done = tg3_poll_work(tnapi, work_done, budget);
6206
63c3a66f 6207 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
35f2d7d0
MC
6208 goto tx_recovery;
6209
6210 if (unlikely(work_done >= budget))
6211 break;
6212
c6cdf436 6213 /* tp->last_tag is used in tg3_int_reenable() below
35f2d7d0
MC
6214 * to tell the hw how much work has been processed,
6215 * so we must read it before checking for more work.
6216 */
6217 tnapi->last_tag = sblk->status_tag;
6218 tnapi->last_irq_tag = tnapi->last_tag;
6219 rmb();
6220
6221 /* check for RX/TX work to do */
6d40db7b
MC
6222 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
6223 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
35f2d7d0
MC
6224 napi_complete(napi);
6225 /* Reenable interrupts. */
6226 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
6227 mmiowb();
6228 break;
6229 }
6230 }
6231
6232 return work_done;
6233
6234tx_recovery:
6235 /* work_done is guaranteed to be less than budget. */
6236 napi_complete(napi);
db219973 6237 tg3_reset_task_schedule(tp);
35f2d7d0
MC
6238 return work_done;
6239}
6240
e64de4e6
MC
6241static void tg3_process_error(struct tg3 *tp)
6242{
6243 u32 val;
6244 bool real_error = false;
6245
63c3a66f 6246 if (tg3_flag(tp, ERROR_PROCESSED))
e64de4e6
MC
6247 return;
6248
6249 /* Check Flow Attention register */
6250 val = tr32(HOSTCC_FLOW_ATTN);
6251 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
6252 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n");
6253 real_error = true;
6254 }
6255
6256 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
6257 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n");
6258 real_error = true;
6259 }
6260
6261 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
6262 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n");
6263 real_error = true;
6264 }
6265
6266 if (!real_error)
6267 return;
6268
6269 tg3_dump_state(tp);
6270
63c3a66f 6271 tg3_flag_set(tp, ERROR_PROCESSED);
db219973 6272 tg3_reset_task_schedule(tp);
e64de4e6
MC
6273}
6274
6f535763
DM
6275static int tg3_poll(struct napi_struct *napi, int budget)
6276{
8ef0442f
MC
6277 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6278 struct tg3 *tp = tnapi->tp;
6f535763 6279 int work_done = 0;
898a56f8 6280 struct tg3_hw_status *sblk = tnapi->hw_status;
6f535763
DM
6281
6282 while (1) {
e64de4e6
MC
6283 if (sblk->status & SD_STATUS_ERROR)
6284 tg3_process_error(tp);
6285
35f2d7d0
MC
6286 tg3_poll_link(tp);
6287
17375d25 6288 work_done = tg3_poll_work(tnapi, work_done, budget);
6f535763 6289
63c3a66f 6290 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6f535763
DM
6291 goto tx_recovery;
6292
6293 if (unlikely(work_done >= budget))
6294 break;
6295
63c3a66f 6296 if (tg3_flag(tp, TAGGED_STATUS)) {
17375d25 6297 /* tp->last_tag is used in tg3_int_reenable() below
4fd7ab59
MC
6298 * to tell the hw how much work has been processed,
6299 * so we must read it before checking for more work.
6300 */
898a56f8
MC
6301 tnapi->last_tag = sblk->status_tag;
6302 tnapi->last_irq_tag = tnapi->last_tag;
4fd7ab59
MC
6303 rmb();
6304 } else
6305 sblk->status &= ~SD_STATUS_UPDATED;
6f535763 6306
17375d25 6307 if (likely(!tg3_has_work(tnapi))) {
288379f0 6308 napi_complete(napi);
17375d25 6309 tg3_int_reenable(tnapi);
6f535763
DM
6310 break;
6311 }
1da177e4
LT
6312 }
6313
bea3348e 6314 return work_done;
6f535763
DM
6315
6316tx_recovery:
4fd7ab59 6317 /* work_done is guaranteed to be less than budget. */
288379f0 6318 napi_complete(napi);
db219973 6319 tg3_reset_task_schedule(tp);
4fd7ab59 6320 return work_done;
1da177e4
LT
6321}
6322
66cfd1bd
MC
6323static void tg3_napi_disable(struct tg3 *tp)
6324{
6325 int i;
6326
6327 for (i = tp->irq_cnt - 1; i >= 0; i--)
6328 napi_disable(&tp->napi[i].napi);
6329}
6330
6331static void tg3_napi_enable(struct tg3 *tp)
6332{
6333 int i;
6334
6335 for (i = 0; i < tp->irq_cnt; i++)
6336 napi_enable(&tp->napi[i].napi);
6337}
6338
6339static void tg3_napi_init(struct tg3 *tp)
6340{
6341 int i;
6342
6343 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
6344 for (i = 1; i < tp->irq_cnt; i++)
6345 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
6346}
6347
6348static void tg3_napi_fini(struct tg3 *tp)
6349{
6350 int i;
6351
6352 for (i = 0; i < tp->irq_cnt; i++)
6353 netif_napi_del(&tp->napi[i].napi);
6354}
6355
6356static inline void tg3_netif_stop(struct tg3 *tp)
6357{
6358 tp->dev->trans_start = jiffies; /* prevent tx timeout */
6359 tg3_napi_disable(tp);
6360 netif_tx_disable(tp->dev);
6361}
6362
6363static inline void tg3_netif_start(struct tg3 *tp)
6364{
6365 /* NOTE: unconditional netif_tx_wake_all_queues is only
6366 * appropriate so long as all callers are assured to
6367 * have free tx slots (such as after tg3_init_hw)
6368 */
6369 netif_tx_wake_all_queues(tp->dev);
6370
6371 tg3_napi_enable(tp);
6372 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
6373 tg3_enable_ints(tp);
6374}
6375
f47c11ee
DM
6376static void tg3_irq_quiesce(struct tg3 *tp)
6377{
4f125f42
MC
6378 int i;
6379
f47c11ee
DM
6380 BUG_ON(tp->irq_sync);
6381
6382 tp->irq_sync = 1;
6383 smp_mb();
6384
4f125f42
MC
6385 for (i = 0; i < tp->irq_cnt; i++)
6386 synchronize_irq(tp->napi[i].irq_vec);
f47c11ee
DM
6387}
6388
f47c11ee
DM
6389/* Fully shutdown all tg3 driver activity elsewhere in the system.
6390 * If irq_sync is non-zero, then the IRQ handler must be synchronized
6391 * with as well. Most of the time, this is not necessary except when
6392 * shutting down the device.
6393 */
6394static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
6395{
46966545 6396 spin_lock_bh(&tp->lock);
f47c11ee
DM
6397 if (irq_sync)
6398 tg3_irq_quiesce(tp);
f47c11ee
DM
6399}
6400
6401static inline void tg3_full_unlock(struct tg3 *tp)
6402{
f47c11ee
DM
6403 spin_unlock_bh(&tp->lock);
6404}
6405
fcfa0a32
MC
6406/* One-shot MSI handler - Chip automatically disables interrupt
6407 * after sending MSI so driver doesn't have to do it.
6408 */
7d12e780 6409static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
fcfa0a32 6410{
09943a18
MC
6411 struct tg3_napi *tnapi = dev_id;
6412 struct tg3 *tp = tnapi->tp;
fcfa0a32 6413
898a56f8 6414 prefetch(tnapi->hw_status);
0c1d0e2b
MC
6415 if (tnapi->rx_rcb)
6416 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
fcfa0a32
MC
6417
6418 if (likely(!tg3_irq_sync(tp)))
09943a18 6419 napi_schedule(&tnapi->napi);
fcfa0a32
MC
6420
6421 return IRQ_HANDLED;
6422}
6423
88b06bc2
MC
6424/* MSI ISR - No need to check for interrupt sharing and no need to
6425 * flush status block and interrupt mailbox. PCI ordering rules
6426 * guarantee that MSI will arrive after the status block.
6427 */
7d12e780 6428static irqreturn_t tg3_msi(int irq, void *dev_id)
88b06bc2 6429{
09943a18
MC
6430 struct tg3_napi *tnapi = dev_id;
6431 struct tg3 *tp = tnapi->tp;
88b06bc2 6432
898a56f8 6433 prefetch(tnapi->hw_status);
0c1d0e2b
MC
6434 if (tnapi->rx_rcb)
6435 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
88b06bc2 6436 /*
fac9b83e 6437 * Writing any value to intr-mbox-0 clears PCI INTA# and
88b06bc2 6438 * chip-internal interrupt pending events.
fac9b83e 6439 * Writing non-zero to intr-mbox-0 additional tells the
88b06bc2
MC
6440 * NIC to stop sending us irqs, engaging "in-intr-handler"
6441 * event coalescing.
6442 */
5b39de91 6443 tw32_mailbox(tnapi->int_mbox, 0x00000001);
61487480 6444 if (likely(!tg3_irq_sync(tp)))
09943a18 6445 napi_schedule(&tnapi->napi);
61487480 6446
88b06bc2
MC
6447 return IRQ_RETVAL(1);
6448}
6449
7d12e780 6450static irqreturn_t tg3_interrupt(int irq, void *dev_id)
1da177e4 6451{
09943a18
MC
6452 struct tg3_napi *tnapi = dev_id;
6453 struct tg3 *tp = tnapi->tp;
898a56f8 6454 struct tg3_hw_status *sblk = tnapi->hw_status;
1da177e4
LT
6455 unsigned int handled = 1;
6456
1da177e4
LT
6457 /* In INTx mode, it is possible for the interrupt to arrive at
6458 * the CPU before the status block posted prior to the interrupt.
6459 * Reading the PCI State register will confirm whether the
6460 * interrupt is ours and will flush the status block.
6461 */
d18edcb2 6462 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
63c3a66f 6463 if (tg3_flag(tp, CHIP_RESETTING) ||
d18edcb2
MC
6464 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6465 handled = 0;
f47c11ee 6466 goto out;
fac9b83e 6467 }
d18edcb2
MC
6468 }
6469
6470 /*
6471 * Writing any value to intr-mbox-0 clears PCI INTA# and
6472 * chip-internal interrupt pending events.
6473 * Writing non-zero to intr-mbox-0 additional tells the
6474 * NIC to stop sending us irqs, engaging "in-intr-handler"
6475 * event coalescing.
c04cb347
MC
6476 *
6477 * Flush the mailbox to de-assert the IRQ immediately to prevent
6478 * spurious interrupts. The flush impacts performance but
6479 * excessive spurious interrupts can be worse in some cases.
d18edcb2 6480 */
c04cb347 6481 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
d18edcb2
MC
6482 if (tg3_irq_sync(tp))
6483 goto out;
6484 sblk->status &= ~SD_STATUS_UPDATED;
17375d25 6485 if (likely(tg3_has_work(tnapi))) {
72334482 6486 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
09943a18 6487 napi_schedule(&tnapi->napi);
d18edcb2
MC
6488 } else {
6489 /* No work, shared interrupt perhaps? re-enable
6490 * interrupts, and flush that PCI write
6491 */
6492 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
6493 0x00000000);
fac9b83e 6494 }
f47c11ee 6495out:
fac9b83e
DM
6496 return IRQ_RETVAL(handled);
6497}
6498
7d12e780 6499static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
fac9b83e 6500{
09943a18
MC
6501 struct tg3_napi *tnapi = dev_id;
6502 struct tg3 *tp = tnapi->tp;
898a56f8 6503 struct tg3_hw_status *sblk = tnapi->hw_status;
fac9b83e
DM
6504 unsigned int handled = 1;
6505
fac9b83e
DM
6506 /* In INTx mode, it is possible for the interrupt to arrive at
6507 * the CPU before the status block posted prior to the interrupt.
6508 * Reading the PCI State register will confirm whether the
6509 * interrupt is ours and will flush the status block.
6510 */
898a56f8 6511 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
63c3a66f 6512 if (tg3_flag(tp, CHIP_RESETTING) ||
d18edcb2
MC
6513 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6514 handled = 0;
f47c11ee 6515 goto out;
1da177e4 6516 }
d18edcb2
MC
6517 }
6518
6519 /*
6520 * writing any value to intr-mbox-0 clears PCI INTA# and
6521 * chip-internal interrupt pending events.
6522 * writing non-zero to intr-mbox-0 additional tells the
6523 * NIC to stop sending us irqs, engaging "in-intr-handler"
6524 * event coalescing.
c04cb347
MC
6525 *
6526 * Flush the mailbox to de-assert the IRQ immediately to prevent
6527 * spurious interrupts. The flush impacts performance but
6528 * excessive spurious interrupts can be worse in some cases.
d18edcb2 6529 */
c04cb347 6530 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
624f8e50
MC
6531
6532 /*
6533 * In a shared interrupt configuration, sometimes other devices'
6534 * interrupts will scream. We record the current status tag here
6535 * so that the above check can report that the screaming interrupts
6536 * are unhandled. Eventually they will be silenced.
6537 */
898a56f8 6538 tnapi->last_irq_tag = sblk->status_tag;
624f8e50 6539
d18edcb2
MC
6540 if (tg3_irq_sync(tp))
6541 goto out;
624f8e50 6542
72334482 6543 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
624f8e50 6544
09943a18 6545 napi_schedule(&tnapi->napi);
624f8e50 6546
f47c11ee 6547out:
1da177e4
LT
6548 return IRQ_RETVAL(handled);
6549}
6550
7938109f 6551/* ISR for interrupt test */
7d12e780 6552static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7938109f 6553{
09943a18
MC
6554 struct tg3_napi *tnapi = dev_id;
6555 struct tg3 *tp = tnapi->tp;
898a56f8 6556 struct tg3_hw_status *sblk = tnapi->hw_status;
7938109f 6557
f9804ddb
MC
6558 if ((sblk->status & SD_STATUS_UPDATED) ||
6559 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
b16250e3 6560 tg3_disable_ints(tp);
7938109f
MC
6561 return IRQ_RETVAL(1);
6562 }
6563 return IRQ_RETVAL(0);
6564}
6565
1da177e4
LT
6566#ifdef CONFIG_NET_POLL_CONTROLLER
6567static void tg3_poll_controller(struct net_device *dev)
6568{
4f125f42 6569 int i;
88b06bc2
MC
6570 struct tg3 *tp = netdev_priv(dev);
6571
4f125f42 6572 for (i = 0; i < tp->irq_cnt; i++)
fe234f0e 6573 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
1da177e4
LT
6574}
6575#endif
6576
1da177e4
LT
6577static void tg3_tx_timeout(struct net_device *dev)
6578{
6579 struct tg3 *tp = netdev_priv(dev);
6580
b0408751 6581 if (netif_msg_tx_err(tp)) {
05dbe005 6582 netdev_err(dev, "transmit timed out, resetting\n");
97bd8e49 6583 tg3_dump_state(tp);
b0408751 6584 }
1da177e4 6585
db219973 6586 tg3_reset_task_schedule(tp);
1da177e4
LT
6587}
6588
c58ec932
MC
6589/* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
6590static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
6591{
6592 u32 base = (u32) mapping & 0xffffffff;
6593
807540ba 6594 return (base > 0xffffdcc0) && (base + len + 8 < base);
c58ec932
MC
6595}
6596
72f2afb8
MC
6597/* Test for DMA addresses > 40-bit */
6598static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
6599 int len)
6600{
6601#if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
63c3a66f 6602 if (tg3_flag(tp, 40BIT_DMA_BUG))
807540ba 6603 return ((u64) mapping + len) > DMA_BIT_MASK(40);
72f2afb8
MC
6604 return 0;
6605#else
6606 return 0;
6607#endif
6608}
6609
d1a3b737 6610static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
92cd3a17
MC
6611 dma_addr_t mapping, u32 len, u32 flags,
6612 u32 mss, u32 vlan)
2ffcc981 6613{
92cd3a17
MC
6614 txbd->addr_hi = ((u64) mapping >> 32);
6615 txbd->addr_lo = ((u64) mapping & 0xffffffff);
6616 txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
6617 txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
2ffcc981 6618}
1da177e4 6619
84b67b27 6620static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
d1a3b737
MC
6621 dma_addr_t map, u32 len, u32 flags,
6622 u32 mss, u32 vlan)
6623{
6624 struct tg3 *tp = tnapi->tp;
6625 bool hwbug = false;
6626
6627 if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
3db1cd5c 6628 hwbug = true;
d1a3b737
MC
6629
6630 if (tg3_4g_overflow_test(map, len))
3db1cd5c 6631 hwbug = true;
d1a3b737
MC
6632
6633 if (tg3_40bit_overflow_test(tp, map, len))
3db1cd5c 6634 hwbug = true;
d1a3b737 6635
a4cb428d 6636 if (tp->dma_limit) {
b9e45482 6637 u32 prvidx = *entry;
e31aa987 6638 u32 tmp_flag = flags & ~TXD_FLAG_END;
a4cb428d
MC
6639 while (len > tp->dma_limit && *budget) {
6640 u32 frag_len = tp->dma_limit;
6641 len -= tp->dma_limit;
e31aa987 6642
b9e45482
MC
6643 /* Avoid the 8byte DMA problem */
6644 if (len <= 8) {
a4cb428d
MC
6645 len += tp->dma_limit / 2;
6646 frag_len = tp->dma_limit / 2;
e31aa987
MC
6647 }
6648
b9e45482
MC
6649 tnapi->tx_buffers[*entry].fragmented = true;
6650
6651 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6652 frag_len, tmp_flag, mss, vlan);
6653 *budget -= 1;
6654 prvidx = *entry;
6655 *entry = NEXT_TX(*entry);
6656
e31aa987
MC
6657 map += frag_len;
6658 }
6659
6660 if (len) {
6661 if (*budget) {
6662 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6663 len, flags, mss, vlan);
b9e45482 6664 *budget -= 1;
e31aa987
MC
6665 *entry = NEXT_TX(*entry);
6666 } else {
3db1cd5c 6667 hwbug = true;
b9e45482 6668 tnapi->tx_buffers[prvidx].fragmented = false;
e31aa987
MC
6669 }
6670 }
6671 } else {
84b67b27
MC
6672 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6673 len, flags, mss, vlan);
e31aa987
MC
6674 *entry = NEXT_TX(*entry);
6675 }
d1a3b737
MC
6676
6677 return hwbug;
6678}
6679
0d681b27 6680static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
432aa7ed
MC
6681{
6682 int i;
0d681b27 6683 struct sk_buff *skb;
df8944cf 6684 struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
432aa7ed 6685
0d681b27
MC
6686 skb = txb->skb;
6687 txb->skb = NULL;
6688
432aa7ed
MC
6689 pci_unmap_single(tnapi->tp->pdev,
6690 dma_unmap_addr(txb, mapping),
6691 skb_headlen(skb),
6692 PCI_DMA_TODEVICE);
e01ee14d
MC
6693
6694 while (txb->fragmented) {
6695 txb->fragmented = false;
6696 entry = NEXT_TX(entry);
6697 txb = &tnapi->tx_buffers[entry];
6698 }
6699
ba1142e4 6700 for (i = 0; i <= last; i++) {
9e903e08 6701 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
432aa7ed
MC
6702
6703 entry = NEXT_TX(entry);
6704 txb = &tnapi->tx_buffers[entry];
6705
6706 pci_unmap_page(tnapi->tp->pdev,
6707 dma_unmap_addr(txb, mapping),
9e903e08 6708 skb_frag_size(frag), PCI_DMA_TODEVICE);
e01ee14d
MC
6709
6710 while (txb->fragmented) {
6711 txb->fragmented = false;
6712 entry = NEXT_TX(entry);
6713 txb = &tnapi->tx_buffers[entry];
6714 }
432aa7ed
MC
6715 }
6716}
6717
72f2afb8 6718/* Workaround 4GB and 40-bit hardware DMA bugs. */
24f4efd4 6719static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
f7ff1987 6720 struct sk_buff **pskb,
84b67b27 6721 u32 *entry, u32 *budget,
92cd3a17 6722 u32 base_flags, u32 mss, u32 vlan)
1da177e4 6723{
24f4efd4 6724 struct tg3 *tp = tnapi->tp;
f7ff1987 6725 struct sk_buff *new_skb, *skb = *pskb;
c58ec932 6726 dma_addr_t new_addr = 0;
432aa7ed 6727 int ret = 0;
1da177e4 6728
41588ba1
MC
6729 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
6730 new_skb = skb_copy(skb, GFP_ATOMIC);
6731 else {
6732 int more_headroom = 4 - ((unsigned long)skb->data & 3);
6733
6734 new_skb = skb_copy_expand(skb,
6735 skb_headroom(skb) + more_headroom,
6736 skb_tailroom(skb), GFP_ATOMIC);
6737 }
6738
1da177e4 6739 if (!new_skb) {
c58ec932
MC
6740 ret = -1;
6741 } else {
6742 /* New SKB is guaranteed to be linear. */
f4188d8a
AD
6743 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
6744 PCI_DMA_TODEVICE);
6745 /* Make sure the mapping succeeded */
6746 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
f4188d8a 6747 dev_kfree_skb(new_skb);
c58ec932 6748 ret = -1;
c58ec932 6749 } else {
b9e45482
MC
6750 u32 save_entry = *entry;
6751
92cd3a17
MC
6752 base_flags |= TXD_FLAG_END;
6753
84b67b27
MC
6754 tnapi->tx_buffers[*entry].skb = new_skb;
6755 dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
432aa7ed
MC
6756 mapping, new_addr);
6757
84b67b27 6758 if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
d1a3b737
MC
6759 new_skb->len, base_flags,
6760 mss, vlan)) {
ba1142e4 6761 tg3_tx_skb_unmap(tnapi, save_entry, -1);
d1a3b737
MC
6762 dev_kfree_skb(new_skb);
6763 ret = -1;
6764 }
f4188d8a 6765 }
1da177e4
LT
6766 }
6767
6768 dev_kfree_skb(skb);
f7ff1987 6769 *pskb = new_skb;
c58ec932 6770 return ret;
1da177e4
LT
6771}
6772
2ffcc981 6773static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
52c0fd83
MC
6774
6775/* Use GSO to workaround a rare TSO bug that may be triggered when the
6776 * TSO header is greater than 80 bytes.
6777 */
6778static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
6779{
6780 struct sk_buff *segs, *nskb;
f3f3f27e 6781 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
52c0fd83
MC
6782
6783 /* Estimate the number of fragments in the worst case */
f3f3f27e 6784 if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
52c0fd83 6785 netif_stop_queue(tp->dev);
f65aac16
MC
6786
6787 /* netif_tx_stop_queue() must be done before checking
6788 * checking tx index in tg3_tx_avail() below, because in
6789 * tg3_tx(), we update tx index before checking for
6790 * netif_tx_queue_stopped().
6791 */
6792 smp_mb();
f3f3f27e 6793 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
7f62ad5d
MC
6794 return NETDEV_TX_BUSY;
6795
6796 netif_wake_queue(tp->dev);
52c0fd83
MC
6797 }
6798
6799 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
801678c5 6800 if (IS_ERR(segs))
52c0fd83
MC
6801 goto tg3_tso_bug_end;
6802
6803 do {
6804 nskb = segs;
6805 segs = segs->next;
6806 nskb->next = NULL;
2ffcc981 6807 tg3_start_xmit(nskb, tp->dev);
52c0fd83
MC
6808 } while (segs);
6809
6810tg3_tso_bug_end:
6811 dev_kfree_skb(skb);
6812
6813 return NETDEV_TX_OK;
6814}
52c0fd83 6815
5a6f3074 6816/* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
63c3a66f 6817 * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
5a6f3074 6818 */
2ffcc981 6819static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
1da177e4
LT
6820{
6821 struct tg3 *tp = netdev_priv(dev);
92cd3a17 6822 u32 len, entry, base_flags, mss, vlan = 0;
84b67b27 6823 u32 budget;
432aa7ed 6824 int i = -1, would_hit_hwbug;
90079ce8 6825 dma_addr_t mapping;
24f4efd4
MC
6826 struct tg3_napi *tnapi;
6827 struct netdev_queue *txq;
432aa7ed 6828 unsigned int last;
f4188d8a 6829
24f4efd4
MC
6830 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
6831 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
63c3a66f 6832 if (tg3_flag(tp, ENABLE_TSS))
24f4efd4 6833 tnapi++;
1da177e4 6834
84b67b27
MC
6835 budget = tg3_tx_avail(tnapi);
6836
00b70504 6837 /* We are running in BH disabled context with netif_tx_lock
bea3348e 6838 * and TX reclaim runs via tp->napi.poll inside of a software
f47c11ee
DM
6839 * interrupt. Furthermore, IRQ processing runs lockless so we have
6840 * no IRQ context deadlocks to worry about either. Rejoice!
1da177e4 6841 */
84b67b27 6842 if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
24f4efd4
MC
6843 if (!netif_tx_queue_stopped(txq)) {
6844 netif_tx_stop_queue(txq);
1f064a87
SH
6845
6846 /* This is a hard error, log it. */
5129c3a3
MC
6847 netdev_err(dev,
6848 "BUG! Tx Ring full when queue awake!\n");
1f064a87 6849 }
1da177e4
LT
6850 return NETDEV_TX_BUSY;
6851 }
6852
f3f3f27e 6853 entry = tnapi->tx_prod;
1da177e4 6854 base_flags = 0;
84fa7933 6855 if (skb->ip_summed == CHECKSUM_PARTIAL)
1da177e4 6856 base_flags |= TXD_FLAG_TCPUDP_CSUM;
24f4efd4 6857
be98da6a
MC
6858 mss = skb_shinfo(skb)->gso_size;
6859 if (mss) {
eddc9ec5 6860 struct iphdr *iph;
34195c3d 6861 u32 tcp_opt_len, hdr_len;
1da177e4
LT
6862
6863 if (skb_header_cloned(skb) &&
48855432
ED
6864 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
6865 goto drop;
1da177e4 6866
34195c3d 6867 iph = ip_hdr(skb);
ab6a5bb6 6868 tcp_opt_len = tcp_optlen(skb);
1da177e4 6869
a5a11955 6870 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
34195c3d 6871
a5a11955 6872 if (!skb_is_gso_v6(skb)) {
34195c3d
MC
6873 iph->check = 0;
6874 iph->tot_len = htons(mss + hdr_len);
6875 }
6876
52c0fd83 6877 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
63c3a66f 6878 tg3_flag(tp, TSO_BUG))
de6f31eb 6879 return tg3_tso_bug(tp, skb);
52c0fd83 6880
1da177e4
LT
6881 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
6882 TXD_FLAG_CPU_POST_DMA);
6883
63c3a66f
JP
6884 if (tg3_flag(tp, HW_TSO_1) ||
6885 tg3_flag(tp, HW_TSO_2) ||
6886 tg3_flag(tp, HW_TSO_3)) {
aa8223c7 6887 tcp_hdr(skb)->check = 0;
1da177e4 6888 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
aa8223c7
ACM
6889 } else
6890 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
6891 iph->daddr, 0,
6892 IPPROTO_TCP,
6893 0);
1da177e4 6894
63c3a66f 6895 if (tg3_flag(tp, HW_TSO_3)) {
615774fe
MC
6896 mss |= (hdr_len & 0xc) << 12;
6897 if (hdr_len & 0x10)
6898 base_flags |= 0x00000010;
6899 base_flags |= (hdr_len & 0x3e0) << 5;
63c3a66f 6900 } else if (tg3_flag(tp, HW_TSO_2))
92c6b8d1 6901 mss |= hdr_len << 9;
63c3a66f 6902 else if (tg3_flag(tp, HW_TSO_1) ||
92c6b8d1 6903 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
eddc9ec5 6904 if (tcp_opt_len || iph->ihl > 5) {
1da177e4
LT
6905 int tsflags;
6906
eddc9ec5 6907 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
1da177e4
LT
6908 mss |= (tsflags << 11);
6909 }
6910 } else {
eddc9ec5 6911 if (tcp_opt_len || iph->ihl > 5) {
1da177e4
LT
6912 int tsflags;
6913
eddc9ec5 6914 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
1da177e4
LT
6915 base_flags |= tsflags << 12;
6916 }
6917 }
6918 }
bf933c80 6919
93a700a9
MC
6920 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
6921 !mss && skb->len > VLAN_ETH_FRAME_LEN)
6922 base_flags |= TXD_FLAG_JMB_PKT;
6923
92cd3a17
MC
6924 if (vlan_tx_tag_present(skb)) {
6925 base_flags |= TXD_FLAG_VLAN;
6926 vlan = vlan_tx_tag_get(skb);
6927 }
1da177e4 6928
f4188d8a
AD
6929 len = skb_headlen(skb);
6930
6931 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
48855432
ED
6932 if (pci_dma_mapping_error(tp->pdev, mapping))
6933 goto drop;
6934
90079ce8 6935
f3f3f27e 6936 tnapi->tx_buffers[entry].skb = skb;
4e5e4f0d 6937 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
1da177e4
LT
6938
6939 would_hit_hwbug = 0;
6940
63c3a66f 6941 if (tg3_flag(tp, 5701_DMA_BUG))
c58ec932 6942 would_hit_hwbug = 1;
1da177e4 6943
84b67b27 6944 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
d1a3b737 6945 ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
ba1142e4 6946 mss, vlan)) {
d1a3b737 6947 would_hit_hwbug = 1;
ba1142e4 6948 } else if (skb_shinfo(skb)->nr_frags > 0) {
92cd3a17
MC
6949 u32 tmp_mss = mss;
6950
6951 if (!tg3_flag(tp, HW_TSO_1) &&
6952 !tg3_flag(tp, HW_TSO_2) &&
6953 !tg3_flag(tp, HW_TSO_3))
6954 tmp_mss = 0;
6955
c5665a53
MC
6956 /* Now loop through additional data
6957 * fragments, and queue them.
6958 */
1da177e4
LT
6959 last = skb_shinfo(skb)->nr_frags - 1;
6960 for (i = 0; i <= last; i++) {
6961 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6962
9e903e08 6963 len = skb_frag_size(frag);
dc234d0b 6964 mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
5d6bcdfe 6965 len, DMA_TO_DEVICE);
1da177e4 6966
f3f3f27e 6967 tnapi->tx_buffers[entry].skb = NULL;
4e5e4f0d 6968 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
f4188d8a 6969 mapping);
5d6bcdfe 6970 if (dma_mapping_error(&tp->pdev->dev, mapping))
f4188d8a 6971 goto dma_error;
1da177e4 6972
b9e45482
MC
6973 if (!budget ||
6974 tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
84b67b27
MC
6975 len, base_flags |
6976 ((i == last) ? TXD_FLAG_END : 0),
b9e45482 6977 tmp_mss, vlan)) {
72f2afb8 6978 would_hit_hwbug = 1;
b9e45482
MC
6979 break;
6980 }
1da177e4
LT
6981 }
6982 }
6983
6984 if (would_hit_hwbug) {
0d681b27 6985 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
1da177e4
LT
6986
6987 /* If the workaround fails due to memory/mapping
6988 * failure, silently drop this packet.
6989 */
84b67b27
MC
6990 entry = tnapi->tx_prod;
6991 budget = tg3_tx_avail(tnapi);
f7ff1987 6992 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
84b67b27 6993 base_flags, mss, vlan))
48855432 6994 goto drop_nofree;
1da177e4
LT
6995 }
6996
d515b450 6997 skb_tx_timestamp(skb);
298376d3 6998 netdev_sent_queue(tp->dev, skb->len);
d515b450 6999
1da177e4 7000 /* Packets are ready, update Tx producer idx local and on card. */
24f4efd4 7001 tw32_tx_mbox(tnapi->prodmbox, entry);
1da177e4 7002
f3f3f27e
MC
7003 tnapi->tx_prod = entry;
7004 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
24f4efd4 7005 netif_tx_stop_queue(txq);
f65aac16
MC
7006
7007 /* netif_tx_stop_queue() must be done before checking
7008 * checking tx index in tg3_tx_avail() below, because in
7009 * tg3_tx(), we update tx index before checking for
7010 * netif_tx_queue_stopped().
7011 */
7012 smp_mb();
f3f3f27e 7013 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
24f4efd4 7014 netif_tx_wake_queue(txq);
51b91468 7015 }
1da177e4 7016
cdd0db05 7017 mmiowb();
1da177e4 7018 return NETDEV_TX_OK;
f4188d8a
AD
7019
7020dma_error:
ba1142e4 7021 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
432aa7ed 7022 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
48855432
ED
7023drop:
7024 dev_kfree_skb(skb);
7025drop_nofree:
7026 tp->tx_dropped++;
f4188d8a 7027 return NETDEV_TX_OK;
1da177e4
LT
7028}
7029
6e01b20b
MC
7030static void tg3_mac_loopback(struct tg3 *tp, bool enable)
7031{
7032 if (enable) {
7033 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
7034 MAC_MODE_PORT_MODE_MASK);
7035
7036 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
7037
7038 if (!tg3_flag(tp, 5705_PLUS))
7039 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7040
7041 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
7042 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
7043 else
7044 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7045 } else {
7046 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
7047
7048 if (tg3_flag(tp, 5705_PLUS) ||
7049 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
7050 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
7051 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
7052 }
7053
7054 tw32(MAC_MODE, tp->mac_mode);
7055 udelay(40);
7056}
7057
941ec90f 7058static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
5e5a7f37 7059{
941ec90f 7060 u32 val, bmcr, mac_mode, ptest = 0;
5e5a7f37
MC
7061
7062 tg3_phy_toggle_apd(tp, false);
7063 tg3_phy_toggle_automdix(tp, 0);
7064
941ec90f
MC
7065 if (extlpbk && tg3_phy_set_extloopbk(tp))
7066 return -EIO;
7067
7068 bmcr = BMCR_FULLDPLX;
5e5a7f37
MC
7069 switch (speed) {
7070 case SPEED_10:
7071 break;
7072 case SPEED_100:
7073 bmcr |= BMCR_SPEED100;
7074 break;
7075 case SPEED_1000:
7076 default:
7077 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
7078 speed = SPEED_100;
7079 bmcr |= BMCR_SPEED100;
7080 } else {
7081 speed = SPEED_1000;
7082 bmcr |= BMCR_SPEED1000;
7083 }
7084 }
7085
941ec90f
MC
7086 if (extlpbk) {
7087 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
7088 tg3_readphy(tp, MII_CTRL1000, &val);
7089 val |= CTL1000_AS_MASTER |
7090 CTL1000_ENABLE_MASTER;
7091 tg3_writephy(tp, MII_CTRL1000, val);
7092 } else {
7093 ptest = MII_TG3_FET_PTEST_TRIM_SEL |
7094 MII_TG3_FET_PTEST_TRIM_2;
7095 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
7096 }
7097 } else
7098 bmcr |= BMCR_LOOPBACK;
7099
5e5a7f37
MC
7100 tg3_writephy(tp, MII_BMCR, bmcr);
7101
7102 /* The write needs to be flushed for the FETs */
7103 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
7104 tg3_readphy(tp, MII_BMCR, &bmcr);
7105
7106 udelay(40);
7107
7108 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
7109 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
941ec90f 7110 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
5e5a7f37
MC
7111 MII_TG3_FET_PTEST_FRC_TX_LINK |
7112 MII_TG3_FET_PTEST_FRC_TX_LOCK);
7113
7114 /* The write needs to be flushed for the AC131 */
7115 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
7116 }
7117
7118 /* Reset to prevent losing 1st rx packet intermittently */
7119 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
7120 tg3_flag(tp, 5780_CLASS)) {
7121 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7122 udelay(10);
7123 tw32_f(MAC_RX_MODE, tp->rx_mode);
7124 }
7125
7126 mac_mode = tp->mac_mode &
7127 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
7128 if (speed == SPEED_1000)
7129 mac_mode |= MAC_MODE_PORT_MODE_GMII;
7130 else
7131 mac_mode |= MAC_MODE_PORT_MODE_MII;
7132
7133 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
7134 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
7135
7136 if (masked_phy_id == TG3_PHY_ID_BCM5401)
7137 mac_mode &= ~MAC_MODE_LINK_POLARITY;
7138 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
7139 mac_mode |= MAC_MODE_LINK_POLARITY;
7140
7141 tg3_writephy(tp, MII_TG3_EXT_CTRL,
7142 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
7143 }
7144
7145 tw32(MAC_MODE, mac_mode);
7146 udelay(40);
941ec90f
MC
7147
7148 return 0;
5e5a7f37
MC
7149}
7150
c8f44aff 7151static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
06c03c02
MB
7152{
7153 struct tg3 *tp = netdev_priv(dev);
7154
7155 if (features & NETIF_F_LOOPBACK) {
7156 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
7157 return;
7158
06c03c02 7159 spin_lock_bh(&tp->lock);
6e01b20b 7160 tg3_mac_loopback(tp, true);
06c03c02
MB
7161 netif_carrier_on(tp->dev);
7162 spin_unlock_bh(&tp->lock);
7163 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
7164 } else {
7165 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
7166 return;
7167
06c03c02 7168 spin_lock_bh(&tp->lock);
6e01b20b 7169 tg3_mac_loopback(tp, false);
06c03c02
MB
7170 /* Force link status check */
7171 tg3_setup_phy(tp, 1);
7172 spin_unlock_bh(&tp->lock);
7173 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
7174 }
7175}
7176
c8f44aff
MM
7177static netdev_features_t tg3_fix_features(struct net_device *dev,
7178 netdev_features_t features)
dc668910
MM
7179{
7180 struct tg3 *tp = netdev_priv(dev);
7181
63c3a66f 7182 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
dc668910
MM
7183 features &= ~NETIF_F_ALL_TSO;
7184
7185 return features;
7186}
7187
c8f44aff 7188static int tg3_set_features(struct net_device *dev, netdev_features_t features)
06c03c02 7189{
c8f44aff 7190 netdev_features_t changed = dev->features ^ features;
06c03c02
MB
7191
7192 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
7193 tg3_set_loopback(dev, features);
7194
7195 return 0;
7196}
7197
21f581a5
MC
7198static void tg3_rx_prodring_free(struct tg3 *tp,
7199 struct tg3_rx_prodring_set *tpr)
1da177e4 7200{
1da177e4
LT
7201 int i;
7202
8fea32b9 7203 if (tpr != &tp->napi[0].prodring) {
b196c7e4 7204 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
2c49a44d 7205 i = (i + 1) & tp->rx_std_ring_mask)
9205fd9c 7206 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
b196c7e4
MC
7207 tp->rx_pkt_map_sz);
7208
63c3a66f 7209 if (tg3_flag(tp, JUMBO_CAPABLE)) {
b196c7e4
MC
7210 for (i = tpr->rx_jmb_cons_idx;
7211 i != tpr->rx_jmb_prod_idx;
2c49a44d 7212 i = (i + 1) & tp->rx_jmb_ring_mask) {
9205fd9c 7213 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
b196c7e4
MC
7214 TG3_RX_JMB_MAP_SZ);
7215 }
7216 }
7217
2b2cdb65 7218 return;
b196c7e4 7219 }
1da177e4 7220
2c49a44d 7221 for (i = 0; i <= tp->rx_std_ring_mask; i++)
9205fd9c 7222 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
2b2cdb65 7223 tp->rx_pkt_map_sz);
1da177e4 7224
63c3a66f 7225 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
2c49a44d 7226 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
9205fd9c 7227 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
2b2cdb65 7228 TG3_RX_JMB_MAP_SZ);
1da177e4
LT
7229 }
7230}
7231
c6cdf436 7232/* Initialize rx rings for packet processing.
1da177e4
LT
7233 *
7234 * The chip has been shut down and the driver detached from
7235 * the networking, so no interrupts or new tx packets will
7236 * end up in the driver. tp->{tx,}lock are held and thus
7237 * we may not sleep.
7238 */
21f581a5
MC
7239static int tg3_rx_prodring_alloc(struct tg3 *tp,
7240 struct tg3_rx_prodring_set *tpr)
1da177e4 7241{
287be12e 7242 u32 i, rx_pkt_dma_sz;
1da177e4 7243
b196c7e4
MC
7244 tpr->rx_std_cons_idx = 0;
7245 tpr->rx_std_prod_idx = 0;
7246 tpr->rx_jmb_cons_idx = 0;
7247 tpr->rx_jmb_prod_idx = 0;
7248
8fea32b9 7249 if (tpr != &tp->napi[0].prodring) {
2c49a44d
MC
7250 memset(&tpr->rx_std_buffers[0], 0,
7251 TG3_RX_STD_BUFF_RING_SIZE(tp));
48035728 7252 if (tpr->rx_jmb_buffers)
2b2cdb65 7253 memset(&tpr->rx_jmb_buffers[0], 0,
2c49a44d 7254 TG3_RX_JMB_BUFF_RING_SIZE(tp));
2b2cdb65
MC
7255 goto done;
7256 }
7257
1da177e4 7258 /* Zero out all descriptors. */
2c49a44d 7259 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
1da177e4 7260
287be12e 7261 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
63c3a66f 7262 if (tg3_flag(tp, 5780_CLASS) &&
287be12e
MC
7263 tp->dev->mtu > ETH_DATA_LEN)
7264 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
7265 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
7e72aad4 7266
1da177e4
LT
7267 /* Initialize invariants of the rings, we only set this
7268 * stuff once. This works because the card does not
7269 * write into the rx buffer posting rings.
7270 */
2c49a44d 7271 for (i = 0; i <= tp->rx_std_ring_mask; i++) {
1da177e4
LT
7272 struct tg3_rx_buffer_desc *rxd;
7273
21f581a5 7274 rxd = &tpr->rx_std[i];
287be12e 7275 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
1da177e4
LT
7276 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
7277 rxd->opaque = (RXD_OPAQUE_RING_STD |
7278 (i << RXD_OPAQUE_INDEX_SHIFT));
7279 }
7280
1da177e4
LT
7281 /* Now allocate fresh SKBs for each rx ring. */
7282 for (i = 0; i < tp->rx_pending; i++) {
9205fd9c 7283 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
5129c3a3
MC
7284 netdev_warn(tp->dev,
7285 "Using a smaller RX standard ring. Only "
7286 "%d out of %d buffers were allocated "
7287 "successfully\n", i, tp->rx_pending);
32d8c572 7288 if (i == 0)
cf7a7298 7289 goto initfail;
32d8c572 7290 tp->rx_pending = i;
1da177e4 7291 break;
32d8c572 7292 }
1da177e4
LT
7293 }
7294
63c3a66f 7295 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
cf7a7298
MC
7296 goto done;
7297
2c49a44d 7298 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
cf7a7298 7299
63c3a66f 7300 if (!tg3_flag(tp, JUMBO_RING_ENABLE))
0d86df80 7301 goto done;
cf7a7298 7302
2c49a44d 7303 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
0d86df80
MC
7304 struct tg3_rx_buffer_desc *rxd;
7305
7306 rxd = &tpr->rx_jmb[i].std;
7307 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
7308 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
7309 RXD_FLAG_JUMBO;
7310 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
7311 (i << RXD_OPAQUE_INDEX_SHIFT));
7312 }
7313
7314 for (i = 0; i < tp->rx_jumbo_pending; i++) {
9205fd9c 7315 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
5129c3a3
MC
7316 netdev_warn(tp->dev,
7317 "Using a smaller RX jumbo ring. Only %d "
7318 "out of %d buffers were allocated "
7319 "successfully\n", i, tp->rx_jumbo_pending);
0d86df80
MC
7320 if (i == 0)
7321 goto initfail;
7322 tp->rx_jumbo_pending = i;
7323 break;
1da177e4
LT
7324 }
7325 }
cf7a7298
MC
7326
7327done:
32d8c572 7328 return 0;
cf7a7298
MC
7329
7330initfail:
21f581a5 7331 tg3_rx_prodring_free(tp, tpr);
cf7a7298 7332 return -ENOMEM;
1da177e4
LT
7333}
7334
21f581a5
MC
7335static void tg3_rx_prodring_fini(struct tg3 *tp,
7336 struct tg3_rx_prodring_set *tpr)
1da177e4 7337{
21f581a5
MC
7338 kfree(tpr->rx_std_buffers);
7339 tpr->rx_std_buffers = NULL;
7340 kfree(tpr->rx_jmb_buffers);
7341 tpr->rx_jmb_buffers = NULL;
7342 if (tpr->rx_std) {
4bae65c8
MC
7343 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
7344 tpr->rx_std, tpr->rx_std_mapping);
21f581a5 7345 tpr->rx_std = NULL;
1da177e4 7346 }
21f581a5 7347 if (tpr->rx_jmb) {
4bae65c8
MC
7348 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
7349 tpr->rx_jmb, tpr->rx_jmb_mapping);
21f581a5 7350 tpr->rx_jmb = NULL;
1da177e4 7351 }
cf7a7298
MC
7352}
7353
21f581a5
MC
7354static int tg3_rx_prodring_init(struct tg3 *tp,
7355 struct tg3_rx_prodring_set *tpr)
cf7a7298 7356{
2c49a44d
MC
7357 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
7358 GFP_KERNEL);
21f581a5 7359 if (!tpr->rx_std_buffers)
cf7a7298
MC
7360 return -ENOMEM;
7361
4bae65c8
MC
7362 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
7363 TG3_RX_STD_RING_BYTES(tp),
7364 &tpr->rx_std_mapping,
7365 GFP_KERNEL);
21f581a5 7366 if (!tpr->rx_std)
cf7a7298
MC
7367 goto err_out;
7368
63c3a66f 7369 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
2c49a44d 7370 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
21f581a5
MC
7371 GFP_KERNEL);
7372 if (!tpr->rx_jmb_buffers)
cf7a7298
MC
7373 goto err_out;
7374
4bae65c8
MC
7375 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
7376 TG3_RX_JMB_RING_BYTES(tp),
7377 &tpr->rx_jmb_mapping,
7378 GFP_KERNEL);
21f581a5 7379 if (!tpr->rx_jmb)
cf7a7298
MC
7380 goto err_out;
7381 }
7382
7383 return 0;
7384
7385err_out:
21f581a5 7386 tg3_rx_prodring_fini(tp, tpr);
cf7a7298
MC
7387 return -ENOMEM;
7388}
7389
7390/* Free up pending packets in all rx/tx rings.
7391 *
7392 * The chip has been shut down and the driver detached from
7393 * the networking, so no interrupts or new tx packets will
7394 * end up in the driver. tp->{tx,}lock is not held and we are not
7395 * in an interrupt context and thus may sleep.
7396 */
7397static void tg3_free_rings(struct tg3 *tp)
7398{
f77a6a8e 7399 int i, j;
cf7a7298 7400
f77a6a8e
MC
7401 for (j = 0; j < tp->irq_cnt; j++) {
7402 struct tg3_napi *tnapi = &tp->napi[j];
cf7a7298 7403
8fea32b9 7404 tg3_rx_prodring_free(tp, &tnapi->prodring);
b28f6428 7405
0c1d0e2b
MC
7406 if (!tnapi->tx_buffers)
7407 continue;
7408
0d681b27
MC
7409 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
7410 struct sk_buff *skb = tnapi->tx_buffers[i].skb;
cf7a7298 7411
0d681b27 7412 if (!skb)
f77a6a8e 7413 continue;
cf7a7298 7414
ba1142e4
MC
7415 tg3_tx_skb_unmap(tnapi, i,
7416 skb_shinfo(skb)->nr_frags - 1);
f77a6a8e
MC
7417
7418 dev_kfree_skb_any(skb);
7419 }
2b2cdb65 7420 }
298376d3 7421 netdev_reset_queue(tp->dev);
cf7a7298
MC
7422}
7423
7424/* Initialize tx/rx rings for packet processing.
7425 *
7426 * The chip has been shut down and the driver detached from
7427 * the networking, so no interrupts or new tx packets will
7428 * end up in the driver. tp->{tx,}lock are held and thus
7429 * we may not sleep.
7430 */
7431static int tg3_init_rings(struct tg3 *tp)
7432{
f77a6a8e 7433 int i;
72334482 7434
cf7a7298
MC
7435 /* Free up all the SKBs. */
7436 tg3_free_rings(tp);
7437
f77a6a8e
MC
7438 for (i = 0; i < tp->irq_cnt; i++) {
7439 struct tg3_napi *tnapi = &tp->napi[i];
7440
7441 tnapi->last_tag = 0;
7442 tnapi->last_irq_tag = 0;
7443 tnapi->hw_status->status = 0;
7444 tnapi->hw_status->status_tag = 0;
7445 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
cf7a7298 7446
f77a6a8e
MC
7447 tnapi->tx_prod = 0;
7448 tnapi->tx_cons = 0;
0c1d0e2b
MC
7449 if (tnapi->tx_ring)
7450 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
f77a6a8e
MC
7451
7452 tnapi->rx_rcb_ptr = 0;
0c1d0e2b
MC
7453 if (tnapi->rx_rcb)
7454 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
2b2cdb65 7455
8fea32b9 7456 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
e4af1af9 7457 tg3_free_rings(tp);
2b2cdb65 7458 return -ENOMEM;
e4af1af9 7459 }
f77a6a8e 7460 }
72334482 7461
2b2cdb65 7462 return 0;
cf7a7298
MC
7463}
7464
7465/*
7466 * Must not be invoked with interrupt sources disabled and
7467 * the hardware shutdown down.
7468 */
7469static void tg3_free_consistent(struct tg3 *tp)
7470{
f77a6a8e 7471 int i;
898a56f8 7472
f77a6a8e
MC
7473 for (i = 0; i < tp->irq_cnt; i++) {
7474 struct tg3_napi *tnapi = &tp->napi[i];
7475
7476 if (tnapi->tx_ring) {
4bae65c8 7477 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
f77a6a8e
MC
7478 tnapi->tx_ring, tnapi->tx_desc_mapping);
7479 tnapi->tx_ring = NULL;
7480 }
7481
7482 kfree(tnapi->tx_buffers);
7483 tnapi->tx_buffers = NULL;
7484
7485 if (tnapi->rx_rcb) {
4bae65c8
MC
7486 dma_free_coherent(&tp->pdev->dev,
7487 TG3_RX_RCB_RING_BYTES(tp),
7488 tnapi->rx_rcb,
7489 tnapi->rx_rcb_mapping);
f77a6a8e
MC
7490 tnapi->rx_rcb = NULL;
7491 }
7492
8fea32b9
MC
7493 tg3_rx_prodring_fini(tp, &tnapi->prodring);
7494
f77a6a8e 7495 if (tnapi->hw_status) {
4bae65c8
MC
7496 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
7497 tnapi->hw_status,
7498 tnapi->status_mapping);
f77a6a8e
MC
7499 tnapi->hw_status = NULL;
7500 }
1da177e4 7501 }
f77a6a8e 7502
1da177e4 7503 if (tp->hw_stats) {
4bae65c8
MC
7504 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
7505 tp->hw_stats, tp->stats_mapping);
1da177e4
LT
7506 tp->hw_stats = NULL;
7507 }
7508}
7509
7510/*
7511 * Must not be invoked with interrupt sources disabled and
7512 * the hardware shutdown down. Can sleep.
7513 */
7514static int tg3_alloc_consistent(struct tg3 *tp)
7515{
f77a6a8e 7516 int i;
898a56f8 7517
4bae65c8
MC
7518 tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
7519 sizeof(struct tg3_hw_stats),
7520 &tp->stats_mapping,
7521 GFP_KERNEL);
f77a6a8e 7522 if (!tp->hw_stats)
1da177e4
LT
7523 goto err_out;
7524
f77a6a8e 7525 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
1da177e4 7526
f77a6a8e
MC
7527 for (i = 0; i < tp->irq_cnt; i++) {
7528 struct tg3_napi *tnapi = &tp->napi[i];
8d9d7cfc 7529 struct tg3_hw_status *sblk;
1da177e4 7530
4bae65c8
MC
7531 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
7532 TG3_HW_STATUS_SIZE,
7533 &tnapi->status_mapping,
7534 GFP_KERNEL);
f77a6a8e
MC
7535 if (!tnapi->hw_status)
7536 goto err_out;
898a56f8 7537
f77a6a8e 7538 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8d9d7cfc
MC
7539 sblk = tnapi->hw_status;
7540
8fea32b9
MC
7541 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
7542 goto err_out;
7543
19cfaecc
MC
7544 /* If multivector TSS is enabled, vector 0 does not handle
7545 * tx interrupts. Don't allocate any resources for it.
7546 */
63c3a66f
JP
7547 if ((!i && !tg3_flag(tp, ENABLE_TSS)) ||
7548 (i && tg3_flag(tp, ENABLE_TSS))) {
df8944cf
MC
7549 tnapi->tx_buffers = kzalloc(
7550 sizeof(struct tg3_tx_ring_info) *
7551 TG3_TX_RING_SIZE, GFP_KERNEL);
19cfaecc
MC
7552 if (!tnapi->tx_buffers)
7553 goto err_out;
7554
4bae65c8
MC
7555 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
7556 TG3_TX_RING_BYTES,
7557 &tnapi->tx_desc_mapping,
7558 GFP_KERNEL);
19cfaecc
MC
7559 if (!tnapi->tx_ring)
7560 goto err_out;
7561 }
7562
8d9d7cfc
MC
7563 /*
7564 * When RSS is enabled, the status block format changes
7565 * slightly. The "rx_jumbo_consumer", "reserved",
7566 * and "rx_mini_consumer" members get mapped to the
7567 * other three rx return ring producer indexes.
7568 */
7569 switch (i) {
7570 default:
7571 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
7572 break;
7573 case 2:
7574 tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
7575 break;
7576 case 3:
7577 tnapi->rx_rcb_prod_idx = &sblk->reserved;
7578 break;
7579 case 4:
7580 tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
7581 break;
7582 }
72334482 7583
0c1d0e2b
MC
7584 /*
7585 * If multivector RSS is enabled, vector 0 does not handle
7586 * rx or tx interrupts. Don't allocate any resources for it.
7587 */
63c3a66f 7588 if (!i && tg3_flag(tp, ENABLE_RSS))
0c1d0e2b
MC
7589 continue;
7590
4bae65c8
MC
7591 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
7592 TG3_RX_RCB_RING_BYTES(tp),
7593 &tnapi->rx_rcb_mapping,
7594 GFP_KERNEL);
f77a6a8e
MC
7595 if (!tnapi->rx_rcb)
7596 goto err_out;
72334482 7597
f77a6a8e 7598 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
f77a6a8e 7599 }
1da177e4
LT
7600
7601 return 0;
7602
7603err_out:
7604 tg3_free_consistent(tp);
7605 return -ENOMEM;
7606}
7607
7608#define MAX_WAIT_CNT 1000
7609
7610/* To stop a block, clear the enable bit and poll till it
7611 * clears. tp->lock is held.
7612 */
b3b7d6be 7613static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
1da177e4
LT
7614{
7615 unsigned int i;
7616 u32 val;
7617
63c3a66f 7618 if (tg3_flag(tp, 5705_PLUS)) {
1da177e4
LT
7619 switch (ofs) {
7620 case RCVLSC_MODE:
7621 case DMAC_MODE:
7622 case MBFREE_MODE:
7623 case BUFMGR_MODE:
7624 case MEMARB_MODE:
7625 /* We can't enable/disable these bits of the
7626 * 5705/5750, just say success.
7627 */
7628 return 0;
7629
7630 default:
7631 break;
855e1111 7632 }
1da177e4
LT
7633 }
7634
7635 val = tr32(ofs);
7636 val &= ~enable_bit;
7637 tw32_f(ofs, val);
7638
7639 for (i = 0; i < MAX_WAIT_CNT; i++) {
7640 udelay(100);
7641 val = tr32(ofs);
7642 if ((val & enable_bit) == 0)
7643 break;
7644 }
7645
b3b7d6be 7646 if (i == MAX_WAIT_CNT && !silent) {
2445e461
MC
7647 dev_err(&tp->pdev->dev,
7648 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
7649 ofs, enable_bit);
1da177e4
LT
7650 return -ENODEV;
7651 }
7652
7653 return 0;
7654}
7655
7656/* tp->lock is held. */
b3b7d6be 7657static int tg3_abort_hw(struct tg3 *tp, int silent)
1da177e4
LT
7658{
7659 int i, err;
7660
7661 tg3_disable_ints(tp);
7662
7663 tp->rx_mode &= ~RX_MODE_ENABLE;
7664 tw32_f(MAC_RX_MODE, tp->rx_mode);
7665 udelay(10);
7666
b3b7d6be
DM
7667 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
7668 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
7669 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
7670 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
7671 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
7672 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
7673
7674 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
7675 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
7676 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
7677 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
7678 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
7679 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
7680 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
1da177e4
LT
7681
7682 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
7683 tw32_f(MAC_MODE, tp->mac_mode);
7684 udelay(40);
7685
7686 tp->tx_mode &= ~TX_MODE_ENABLE;
7687 tw32_f(MAC_TX_MODE, tp->tx_mode);
7688
7689 for (i = 0; i < MAX_WAIT_CNT; i++) {
7690 udelay(100);
7691 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
7692 break;
7693 }
7694 if (i >= MAX_WAIT_CNT) {
ab96b241
MC
7695 dev_err(&tp->pdev->dev,
7696 "%s timed out, TX_MODE_ENABLE will not clear "
7697 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
e6de8ad1 7698 err |= -ENODEV;
1da177e4
LT
7699 }
7700
e6de8ad1 7701 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
b3b7d6be
DM
7702 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
7703 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
1da177e4
LT
7704
7705 tw32(FTQ_RESET, 0xffffffff);
7706 tw32(FTQ_RESET, 0x00000000);
7707
b3b7d6be
DM
7708 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
7709 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
1da177e4 7710
f77a6a8e
MC
7711 for (i = 0; i < tp->irq_cnt; i++) {
7712 struct tg3_napi *tnapi = &tp->napi[i];
7713 if (tnapi->hw_status)
7714 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7715 }
1da177e4 7716
1da177e4
LT
7717 return err;
7718}
7719
ee6a99b5
MC
7720/* Save PCI command register before chip reset */
7721static void tg3_save_pci_state(struct tg3 *tp)
7722{
8a6eac90 7723 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
ee6a99b5
MC
7724}
7725
7726/* Restore PCI state after chip reset */
7727static void tg3_restore_pci_state(struct tg3 *tp)
7728{
7729 u32 val;
7730
7731 /* Re-enable indirect register accesses. */
7732 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7733 tp->misc_host_ctrl);
7734
7735 /* Set MAX PCI retry to zero. */
7736 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
7737 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
63c3a66f 7738 tg3_flag(tp, PCIX_MODE))
ee6a99b5 7739 val |= PCISTATE_RETRY_SAME_DMA;
0d3031d9 7740 /* Allow reads and writes to the APE register and memory space. */
63c3a66f 7741 if (tg3_flag(tp, ENABLE_APE))
0d3031d9 7742 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
f92d9dc1
MC
7743 PCISTATE_ALLOW_APE_SHMEM_WR |
7744 PCISTATE_ALLOW_APE_PSPACE_WR;
ee6a99b5
MC
7745 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
7746
8a6eac90 7747 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
ee6a99b5 7748
2c55a3d0
MC
7749 if (!tg3_flag(tp, PCI_EXPRESS)) {
7750 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
7751 tp->pci_cacheline_sz);
7752 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
7753 tp->pci_lat_timer);
114342f2 7754 }
5f5c51e3 7755
ee6a99b5 7756 /* Make sure PCI-X relaxed ordering bit is clear. */
63c3a66f 7757 if (tg3_flag(tp, PCIX_MODE)) {
9974a356
MC
7758 u16 pcix_cmd;
7759
7760 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7761 &pcix_cmd);
7762 pcix_cmd &= ~PCI_X_CMD_ERO;
7763 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7764 pcix_cmd);
7765 }
ee6a99b5 7766
63c3a66f 7767 if (tg3_flag(tp, 5780_CLASS)) {
ee6a99b5
MC
7768
7769 /* Chip reset on 5780 will reset MSI enable bit,
7770 * so need to restore it.
7771 */
63c3a66f 7772 if (tg3_flag(tp, USING_MSI)) {
ee6a99b5
MC
7773 u16 ctrl;
7774
7775 pci_read_config_word(tp->pdev,
7776 tp->msi_cap + PCI_MSI_FLAGS,
7777 &ctrl);
7778 pci_write_config_word(tp->pdev,
7779 tp->msi_cap + PCI_MSI_FLAGS,
7780 ctrl | PCI_MSI_FLAGS_ENABLE);
7781 val = tr32(MSGINT_MODE);
7782 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
7783 }
7784 }
7785}
7786
1da177e4
LT
7787/* tp->lock is held. */
7788static int tg3_chip_reset(struct tg3 *tp)
7789{
7790 u32 val;
1ee582d8 7791 void (*write_op)(struct tg3 *, u32, u32);
4f125f42 7792 int i, err;
1da177e4 7793
f49639e6
DM
7794 tg3_nvram_lock(tp);
7795
77b483f1
MC
7796 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
7797
f49639e6
DM
7798 /* No matching tg3_nvram_unlock() after this because
7799 * chip reset below will undo the nvram lock.
7800 */
7801 tp->nvram_lock_cnt = 0;
1da177e4 7802
ee6a99b5
MC
7803 /* GRC_MISC_CFG core clock reset will clear the memory
7804 * enable bit in PCI register 4 and the MSI enable bit
7805 * on some chips, so we save relevant registers here.
7806 */
7807 tg3_save_pci_state(tp);
7808
d9ab5ad1 7809 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
63c3a66f 7810 tg3_flag(tp, 5755_PLUS))
d9ab5ad1
MC
7811 tw32(GRC_FASTBOOT_PC, 0);
7812
1da177e4
LT
7813 /*
7814 * We must avoid the readl() that normally takes place.
7815 * It locks machines, causes machine checks, and other
7816 * fun things. So, temporarily disable the 5701
7817 * hardware workaround, while we do the reset.
7818 */
1ee582d8
MC
7819 write_op = tp->write32;
7820 if (write_op == tg3_write_flush_reg32)
7821 tp->write32 = tg3_write32;
1da177e4 7822
d18edcb2
MC
7823 /* Prevent the irq handler from reading or writing PCI registers
7824 * during chip reset when the memory enable bit in the PCI command
7825 * register may be cleared. The chip does not generate interrupt
7826 * at this time, but the irq handler may still be called due to irq
7827 * sharing or irqpoll.
7828 */
63c3a66f 7829 tg3_flag_set(tp, CHIP_RESETTING);
f77a6a8e
MC
7830 for (i = 0; i < tp->irq_cnt; i++) {
7831 struct tg3_napi *tnapi = &tp->napi[i];
7832 if (tnapi->hw_status) {
7833 tnapi->hw_status->status = 0;
7834 tnapi->hw_status->status_tag = 0;
7835 }
7836 tnapi->last_tag = 0;
7837 tnapi->last_irq_tag = 0;
b8fa2f3a 7838 }
d18edcb2 7839 smp_mb();
4f125f42
MC
7840
7841 for (i = 0; i < tp->irq_cnt; i++)
7842 synchronize_irq(tp->napi[i].irq_vec);
d18edcb2 7843
255ca311
MC
7844 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7845 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7846 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7847 }
7848
1da177e4
LT
7849 /* do the reset */
7850 val = GRC_MISC_CFG_CORECLK_RESET;
7851
63c3a66f 7852 if (tg3_flag(tp, PCI_EXPRESS)) {
88075d91
MC
7853 /* Force PCIe 1.0a mode */
7854 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
63c3a66f 7855 !tg3_flag(tp, 57765_PLUS) &&
88075d91
MC
7856 tr32(TG3_PCIE_PHY_TSTCTL) ==
7857 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
7858 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
7859
1da177e4
LT
7860 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
7861 tw32(GRC_MISC_CFG, (1 << 29));
7862 val |= (1 << 29);
7863 }
7864 }
7865
b5d3772c
MC
7866 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7867 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
7868 tw32(GRC_VCPU_EXT_CTRL,
7869 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
7870 }
7871
f37500d3 7872 /* Manage gphy power for all CPMU absent PCIe devices. */
63c3a66f 7873 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
1da177e4 7874 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
f37500d3 7875
1da177e4
LT
7876 tw32(GRC_MISC_CFG, val);
7877
1ee582d8
MC
7878 /* restore 5701 hardware bug workaround write method */
7879 tp->write32 = write_op;
1da177e4
LT
7880
7881 /* Unfortunately, we have to delay before the PCI read back.
7882 * Some 575X chips even will not respond to a PCI cfg access
7883 * when the reset command is given to the chip.
7884 *
7885 * How do these hardware designers expect things to work
7886 * properly if the PCI write is posted for a long period
7887 * of time? It is always necessary to have some method by
7888 * which a register read back can occur to push the write
7889 * out which does the reset.
7890 *
7891 * For most tg3 variants the trick below was working.
7892 * Ho hum...
7893 */
7894 udelay(120);
7895
7896 /* Flush PCI posted writes. The normal MMIO registers
7897 * are inaccessible at this time so this is the only
7898 * way to make this reliably (actually, this is no longer
7899 * the case, see above). I tried to use indirect
7900 * register read/write but this upset some 5701 variants.
7901 */
7902 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
7903
7904 udelay(120);
7905
708ebb3a 7906 if (tg3_flag(tp, PCI_EXPRESS) && pci_pcie_cap(tp->pdev)) {
e7126997
MC
7907 u16 val16;
7908
1da177e4
LT
7909 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
7910 int i;
7911 u32 cfg_val;
7912
7913 /* Wait for link training to complete. */
7914 for (i = 0; i < 5000; i++)
7915 udelay(100);
7916
7917 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
7918 pci_write_config_dword(tp->pdev, 0xc4,
7919 cfg_val | (1 << 15));
7920 }
5e7dfd0f 7921
e7126997
MC
7922 /* Clear the "no snoop" and "relaxed ordering" bits. */
7923 pci_read_config_word(tp->pdev,
708ebb3a 7924 pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
e7126997
MC
7925 &val16);
7926 val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
7927 PCI_EXP_DEVCTL_NOSNOOP_EN);
7928 /*
7929 * Older PCIe devices only support the 128 byte
7930 * MPS setting. Enforce the restriction.
5e7dfd0f 7931 */
63c3a66f 7932 if (!tg3_flag(tp, CPMU_PRESENT))
e7126997 7933 val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
5e7dfd0f 7934 pci_write_config_word(tp->pdev,
708ebb3a 7935 pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
e7126997 7936 val16);
5e7dfd0f 7937
5e7dfd0f
MC
7938 /* Clear error status */
7939 pci_write_config_word(tp->pdev,
708ebb3a 7940 pci_pcie_cap(tp->pdev) + PCI_EXP_DEVSTA,
5e7dfd0f
MC
7941 PCI_EXP_DEVSTA_CED |
7942 PCI_EXP_DEVSTA_NFED |
7943 PCI_EXP_DEVSTA_FED |
7944 PCI_EXP_DEVSTA_URD);
1da177e4
LT
7945 }
7946
ee6a99b5 7947 tg3_restore_pci_state(tp);
1da177e4 7948
63c3a66f
JP
7949 tg3_flag_clear(tp, CHIP_RESETTING);
7950 tg3_flag_clear(tp, ERROR_PROCESSED);
d18edcb2 7951
ee6a99b5 7952 val = 0;
63c3a66f 7953 if (tg3_flag(tp, 5780_CLASS))
4cf78e4f 7954 val = tr32(MEMARB_MODE);
ee6a99b5 7955 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
1da177e4
LT
7956
7957 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
7958 tg3_stop_fw(tp);
7959 tw32(0x5000, 0x400);
7960 }
7961
7962 tw32(GRC_MODE, tp->grc_mode);
7963
7964 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
ab0049b4 7965 val = tr32(0xc4);
1da177e4
LT
7966
7967 tw32(0xc4, val | (1 << 15));
7968 }
7969
7970 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
7971 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7972 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
7973 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
7974 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
7975 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7976 }
7977
f07e9af3 7978 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9e975cc2 7979 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
d2394e6b 7980 val = tp->mac_mode;
f07e9af3 7981 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9e975cc2 7982 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
d2394e6b 7983 val = tp->mac_mode;
1da177e4 7984 } else
d2394e6b
MC
7985 val = 0;
7986
7987 tw32_f(MAC_MODE, val);
1da177e4
LT
7988 udelay(40);
7989
77b483f1
MC
7990 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
7991
7a6f4369
MC
7992 err = tg3_poll_fw(tp);
7993 if (err)
7994 return err;
1da177e4 7995
0a9140cf
MC
7996 tg3_mdio_start(tp);
7997
63c3a66f 7998 if (tg3_flag(tp, PCI_EXPRESS) &&
f6eb9b1f
MC
7999 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
8000 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
63c3a66f 8001 !tg3_flag(tp, 57765_PLUS)) {
ab0049b4 8002 val = tr32(0x7c00);
1da177e4
LT
8003
8004 tw32(0x7c00, val | (1 << 25));
8005 }
8006
d78b59f5
MC
8007 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8008 val = tr32(TG3_CPMU_CLCK_ORIDE);
8009 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
8010 }
8011
1da177e4 8012 /* Reprobe ASF enable state. */
63c3a66f
JP
8013 tg3_flag_clear(tp, ENABLE_ASF);
8014 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
1da177e4
LT
8015 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
8016 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
8017 u32 nic_cfg;
8018
8019 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
8020 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
63c3a66f 8021 tg3_flag_set(tp, ENABLE_ASF);
4ba526ce 8022 tp->last_event_jiffies = jiffies;
63c3a66f
JP
8023 if (tg3_flag(tp, 5750_PLUS))
8024 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
1da177e4
LT
8025 }
8026 }
8027
8028 return 0;
8029}
8030
92feeabf
MC
8031static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *,
8032 struct rtnl_link_stats64 *);
8033static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *,
8034 struct tg3_ethtool_stats *);
8035
1da177e4 8036/* tp->lock is held. */
944d980e 8037static int tg3_halt(struct tg3 *tp, int kind, int silent)
1da177e4
LT
8038{
8039 int err;
8040
8041 tg3_stop_fw(tp);
8042
944d980e 8043 tg3_write_sig_pre_reset(tp, kind);
1da177e4 8044
b3b7d6be 8045 tg3_abort_hw(tp, silent);
1da177e4
LT
8046 err = tg3_chip_reset(tp);
8047
daba2a63
MC
8048 __tg3_set_mac_addr(tp, 0);
8049
944d980e
MC
8050 tg3_write_sig_legacy(tp, kind);
8051 tg3_write_sig_post_reset(tp, kind);
1da177e4 8052
92feeabf
MC
8053 if (tp->hw_stats) {
8054 /* Save the stats across chip resets... */
8055 tg3_get_stats64(tp->dev, &tp->net_stats_prev),
8056 tg3_get_estats(tp, &tp->estats_prev);
8057
8058 /* And make sure the next sample is new data */
8059 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
8060 }
8061
1da177e4
LT
8062 if (err)
8063 return err;
8064
8065 return 0;
8066}
8067
1da177e4
LT
8068static int tg3_set_mac_addr(struct net_device *dev, void *p)
8069{
8070 struct tg3 *tp = netdev_priv(dev);
8071 struct sockaddr *addr = p;
986e0aeb 8072 int err = 0, skip_mac_1 = 0;
1da177e4 8073
f9804ddb
MC
8074 if (!is_valid_ether_addr(addr->sa_data))
8075 return -EINVAL;
8076
1da177e4
LT
8077 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
8078
e75f7c90
MC
8079 if (!netif_running(dev))
8080 return 0;
8081
63c3a66f 8082 if (tg3_flag(tp, ENABLE_ASF)) {
986e0aeb 8083 u32 addr0_high, addr0_low, addr1_high, addr1_low;
58712ef9 8084
986e0aeb
MC
8085 addr0_high = tr32(MAC_ADDR_0_HIGH);
8086 addr0_low = tr32(MAC_ADDR_0_LOW);
8087 addr1_high = tr32(MAC_ADDR_1_HIGH);
8088 addr1_low = tr32(MAC_ADDR_1_LOW);
8089
8090 /* Skip MAC addr 1 if ASF is using it. */
8091 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
8092 !(addr1_high == 0 && addr1_low == 0))
8093 skip_mac_1 = 1;
58712ef9 8094 }
986e0aeb
MC
8095 spin_lock_bh(&tp->lock);
8096 __tg3_set_mac_addr(tp, skip_mac_1);
8097 spin_unlock_bh(&tp->lock);
1da177e4 8098
b9ec6c1b 8099 return err;
1da177e4
LT
8100}
8101
8102/* tp->lock is held. */
8103static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
8104 dma_addr_t mapping, u32 maxlen_flags,
8105 u32 nic_addr)
8106{
8107 tg3_write_mem(tp,
8108 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
8109 ((u64) mapping >> 32));
8110 tg3_write_mem(tp,
8111 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
8112 ((u64) mapping & 0xffffffff));
8113 tg3_write_mem(tp,
8114 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
8115 maxlen_flags);
8116
63c3a66f 8117 if (!tg3_flag(tp, 5705_PLUS))
1da177e4
LT
8118 tg3_write_mem(tp,
8119 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
8120 nic_addr);
8121}
8122
d244c892 8123static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
15f9850d 8124{
b6080e12
MC
8125 int i;
8126
63c3a66f 8127 if (!tg3_flag(tp, ENABLE_TSS)) {
b6080e12
MC
8128 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
8129 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
8130 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
b6080e12
MC
8131 } else {
8132 tw32(HOSTCC_TXCOL_TICKS, 0);
8133 tw32(HOSTCC_TXMAX_FRAMES, 0);
8134 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
19cfaecc 8135 }
b6080e12 8136
63c3a66f 8137 if (!tg3_flag(tp, ENABLE_RSS)) {
19cfaecc
MC
8138 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
8139 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
8140 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
8141 } else {
b6080e12
MC
8142 tw32(HOSTCC_RXCOL_TICKS, 0);
8143 tw32(HOSTCC_RXMAX_FRAMES, 0);
8144 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
15f9850d 8145 }
b6080e12 8146
63c3a66f 8147 if (!tg3_flag(tp, 5705_PLUS)) {
15f9850d
DM
8148 u32 val = ec->stats_block_coalesce_usecs;
8149
b6080e12
MC
8150 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
8151 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
8152
15f9850d
DM
8153 if (!netif_carrier_ok(tp->dev))
8154 val = 0;
8155
8156 tw32(HOSTCC_STAT_COAL_TICKS, val);
8157 }
b6080e12
MC
8158
8159 for (i = 0; i < tp->irq_cnt - 1; i++) {
8160 u32 reg;
8161
8162 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
8163 tw32(reg, ec->rx_coalesce_usecs);
b6080e12
MC
8164 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
8165 tw32(reg, ec->rx_max_coalesced_frames);
b6080e12
MC
8166 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
8167 tw32(reg, ec->rx_max_coalesced_frames_irq);
19cfaecc 8168
63c3a66f 8169 if (tg3_flag(tp, ENABLE_TSS)) {
19cfaecc
MC
8170 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
8171 tw32(reg, ec->tx_coalesce_usecs);
8172 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
8173 tw32(reg, ec->tx_max_coalesced_frames);
8174 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
8175 tw32(reg, ec->tx_max_coalesced_frames_irq);
8176 }
b6080e12
MC
8177 }
8178
8179 for (; i < tp->irq_max - 1; i++) {
8180 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
b6080e12 8181 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
b6080e12 8182 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
19cfaecc 8183
63c3a66f 8184 if (tg3_flag(tp, ENABLE_TSS)) {
19cfaecc
MC
8185 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
8186 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
8187 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8188 }
b6080e12 8189 }
15f9850d 8190}
1da177e4 8191
2d31ecaf
MC
8192/* tp->lock is held. */
8193static void tg3_rings_reset(struct tg3 *tp)
8194{
8195 int i;
f77a6a8e 8196 u32 stblk, txrcb, rxrcb, limit;
2d31ecaf
MC
8197 struct tg3_napi *tnapi = &tp->napi[0];
8198
8199 /* Disable all transmit rings but the first. */
63c3a66f 8200 if (!tg3_flag(tp, 5705_PLUS))
2d31ecaf 8201 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
63c3a66f 8202 else if (tg3_flag(tp, 5717_PLUS))
3d37728b 8203 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
55086ad9 8204 else if (tg3_flag(tp, 57765_CLASS))
b703df6f 8205 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
2d31ecaf
MC
8206 else
8207 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8208
8209 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8210 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
8211 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
8212 BDINFO_FLAGS_DISABLED);
8213
8214
8215 /* Disable all receive return rings but the first. */
63c3a66f 8216 if (tg3_flag(tp, 5717_PLUS))
f6eb9b1f 8217 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
63c3a66f 8218 else if (!tg3_flag(tp, 5705_PLUS))
2d31ecaf 8219 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
b703df6f 8220 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
55086ad9 8221 tg3_flag(tp, 57765_CLASS))
2d31ecaf
MC
8222 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
8223 else
8224 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8225
8226 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8227 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
8228 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
8229 BDINFO_FLAGS_DISABLED);
8230
8231 /* Disable interrupts */
8232 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
0e6cf6a9
MC
8233 tp->napi[0].chk_msi_cnt = 0;
8234 tp->napi[0].last_rx_cons = 0;
8235 tp->napi[0].last_tx_cons = 0;
2d31ecaf
MC
8236
8237 /* Zero mailbox registers. */
63c3a66f 8238 if (tg3_flag(tp, SUPPORT_MSIX)) {
6fd45cb8 8239 for (i = 1; i < tp->irq_max; i++) {
f77a6a8e
MC
8240 tp->napi[i].tx_prod = 0;
8241 tp->napi[i].tx_cons = 0;
63c3a66f 8242 if (tg3_flag(tp, ENABLE_TSS))
c2353a32 8243 tw32_mailbox(tp->napi[i].prodmbox, 0);
f77a6a8e
MC
8244 tw32_rx_mbox(tp->napi[i].consmbox, 0);
8245 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
7f230735 8246 tp->napi[i].chk_msi_cnt = 0;
0e6cf6a9
MC
8247 tp->napi[i].last_rx_cons = 0;
8248 tp->napi[i].last_tx_cons = 0;
f77a6a8e 8249 }
63c3a66f 8250 if (!tg3_flag(tp, ENABLE_TSS))
c2353a32 8251 tw32_mailbox(tp->napi[0].prodmbox, 0);
f77a6a8e
MC
8252 } else {
8253 tp->napi[0].tx_prod = 0;
8254 tp->napi[0].tx_cons = 0;
8255 tw32_mailbox(tp->napi[0].prodmbox, 0);
8256 tw32_rx_mbox(tp->napi[0].consmbox, 0);
8257 }
2d31ecaf
MC
8258
8259 /* Make sure the NIC-based send BD rings are disabled. */
63c3a66f 8260 if (!tg3_flag(tp, 5705_PLUS)) {
2d31ecaf
MC
8261 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
8262 for (i = 0; i < 16; i++)
8263 tw32_tx_mbox(mbox + i * 8, 0);
8264 }
8265
8266 txrcb = NIC_SRAM_SEND_RCB;
8267 rxrcb = NIC_SRAM_RCV_RET_RCB;
8268
8269 /* Clear status block in ram. */
8270 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8271
8272 /* Set status block DMA address */
8273 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8274 ((u64) tnapi->status_mapping >> 32));
8275 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8276 ((u64) tnapi->status_mapping & 0xffffffff));
8277
f77a6a8e
MC
8278 if (tnapi->tx_ring) {
8279 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8280 (TG3_TX_RING_SIZE <<
8281 BDINFO_FLAGS_MAXLEN_SHIFT),
8282 NIC_SRAM_TX_BUFFER_DESC);
8283 txrcb += TG3_BDINFO_SIZE;
8284 }
8285
8286 if (tnapi->rx_rcb) {
8287 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7cb32cf2
MC
8288 (tp->rx_ret_ring_mask + 1) <<
8289 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
f77a6a8e
MC
8290 rxrcb += TG3_BDINFO_SIZE;
8291 }
8292
8293 stblk = HOSTCC_STATBLCK_RING1;
2d31ecaf 8294
f77a6a8e
MC
8295 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
8296 u64 mapping = (u64)tnapi->status_mapping;
8297 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
8298 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
8299
8300 /* Clear status block in ram. */
8301 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8302
19cfaecc
MC
8303 if (tnapi->tx_ring) {
8304 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8305 (TG3_TX_RING_SIZE <<
8306 BDINFO_FLAGS_MAXLEN_SHIFT),
8307 NIC_SRAM_TX_BUFFER_DESC);
8308 txrcb += TG3_BDINFO_SIZE;
8309 }
f77a6a8e
MC
8310
8311 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7cb32cf2 8312 ((tp->rx_ret_ring_mask + 1) <<
f77a6a8e
MC
8313 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
8314
8315 stblk += 8;
f77a6a8e
MC
8316 rxrcb += TG3_BDINFO_SIZE;
8317 }
2d31ecaf
MC
8318}
8319
eb07a940
MC
8320static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
8321{
8322 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
8323
63c3a66f
JP
8324 if (!tg3_flag(tp, 5750_PLUS) ||
8325 tg3_flag(tp, 5780_CLASS) ||
eb07a940 8326 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
513aa6ea
MC
8327 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
8328 tg3_flag(tp, 57765_PLUS))
eb07a940
MC
8329 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
8330 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8331 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8332 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
8333 else
8334 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
8335
8336 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
8337 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
8338
8339 val = min(nic_rep_thresh, host_rep_thresh);
8340 tw32(RCVBDI_STD_THRESH, val);
8341
63c3a66f 8342 if (tg3_flag(tp, 57765_PLUS))
eb07a940
MC
8343 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
8344
63c3a66f 8345 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
eb07a940
MC
8346 return;
8347
513aa6ea 8348 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
eb07a940
MC
8349
8350 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
8351
8352 val = min(bdcache_maxcnt / 2, host_rep_thresh);
8353 tw32(RCVBDI_JUMBO_THRESH, val);
8354
63c3a66f 8355 if (tg3_flag(tp, 57765_PLUS))
eb07a940
MC
8356 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
8357}
8358
ccd5ba9d
MC
8359static inline u32 calc_crc(unsigned char *buf, int len)
8360{
8361 u32 reg;
8362 u32 tmp;
8363 int j, k;
8364
8365 reg = 0xffffffff;
8366
8367 for (j = 0; j < len; j++) {
8368 reg ^= buf[j];
8369
8370 for (k = 0; k < 8; k++) {
8371 tmp = reg & 0x01;
8372
8373 reg >>= 1;
8374
8375 if (tmp)
8376 reg ^= 0xedb88320;
8377 }
8378 }
8379
8380 return ~reg;
8381}
8382
8383static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
8384{
8385 /* accept or reject all multicast frames */
8386 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
8387 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
8388 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
8389 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
8390}
8391
8392static void __tg3_set_rx_mode(struct net_device *dev)
8393{
8394 struct tg3 *tp = netdev_priv(dev);
8395 u32 rx_mode;
8396
8397 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
8398 RX_MODE_KEEP_VLAN_TAG);
8399
8400#if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
8401 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8402 * flag clear.
8403 */
8404 if (!tg3_flag(tp, ENABLE_ASF))
8405 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8406#endif
8407
8408 if (dev->flags & IFF_PROMISC) {
8409 /* Promiscuous mode. */
8410 rx_mode |= RX_MODE_PROMISC;
8411 } else if (dev->flags & IFF_ALLMULTI) {
8412 /* Accept all multicast. */
8413 tg3_set_multi(tp, 1);
8414 } else if (netdev_mc_empty(dev)) {
8415 /* Reject all multicast. */
8416 tg3_set_multi(tp, 0);
8417 } else {
8418 /* Accept one or more multicast(s). */
8419 struct netdev_hw_addr *ha;
8420 u32 mc_filter[4] = { 0, };
8421 u32 regidx;
8422 u32 bit;
8423 u32 crc;
8424
8425 netdev_for_each_mc_addr(ha, dev) {
8426 crc = calc_crc(ha->addr, ETH_ALEN);
8427 bit = ~crc & 0x7f;
8428 regidx = (bit & 0x60) >> 5;
8429 bit &= 0x1f;
8430 mc_filter[regidx] |= (1 << bit);
8431 }
8432
8433 tw32(MAC_HASH_REG_0, mc_filter[0]);
8434 tw32(MAC_HASH_REG_1, mc_filter[1]);
8435 tw32(MAC_HASH_REG_2, mc_filter[2]);
8436 tw32(MAC_HASH_REG_3, mc_filter[3]);
8437 }
8438
8439 if (rx_mode != tp->rx_mode) {
8440 tp->rx_mode = rx_mode;
8441 tw32_f(MAC_RX_MODE, rx_mode);
8442 udelay(10);
8443 }
8444}
8445
90415477
MC
8446static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp)
8447{
8448 int i;
8449
8450 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
8451 tp->rss_ind_tbl[i] =
8452 ethtool_rxfh_indir_default(i, tp->irq_cnt - 1);
8453}
8454
8455static void tg3_rss_check_indir_tbl(struct tg3 *tp)
bcebcc46
MC
8456{
8457 int i;
8458
8459 if (!tg3_flag(tp, SUPPORT_MSIX))
8460 return;
8461
90415477 8462 if (tp->irq_cnt <= 2) {
bcebcc46 8463 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
90415477
MC
8464 return;
8465 }
8466
8467 /* Validate table against current IRQ count */
8468 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
8469 if (tp->rss_ind_tbl[i] >= tp->irq_cnt - 1)
8470 break;
8471 }
8472
8473 if (i != TG3_RSS_INDIR_TBL_SIZE)
8474 tg3_rss_init_dflt_indir_tbl(tp);
bcebcc46
MC
8475}
8476
90415477 8477static void tg3_rss_write_indir_tbl(struct tg3 *tp)
bcebcc46
MC
8478{
8479 int i = 0;
8480 u32 reg = MAC_RSS_INDIR_TBL_0;
8481
8482 while (i < TG3_RSS_INDIR_TBL_SIZE) {
8483 u32 val = tp->rss_ind_tbl[i];
8484 i++;
8485 for (; i % 8; i++) {
8486 val <<= 4;
8487 val |= tp->rss_ind_tbl[i];
8488 }
8489 tw32(reg, val);
8490 reg += 4;
8491 }
8492}
8493
1da177e4 8494/* tp->lock is held. */
8e7a22e3 8495static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
1da177e4
LT
8496{
8497 u32 val, rdmac_mode;
8498 int i, err, limit;
8fea32b9 8499 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
1da177e4
LT
8500
8501 tg3_disable_ints(tp);
8502
8503 tg3_stop_fw(tp);
8504
8505 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
8506
63c3a66f 8507 if (tg3_flag(tp, INIT_COMPLETE))
e6de8ad1 8508 tg3_abort_hw(tp, 1);
1da177e4 8509
699c0193
MC
8510 /* Enable MAC control of LPI */
8511 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
8512 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
8513 TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
8514 TG3_CPMU_EEE_LNKIDL_UART_IDL);
8515
8516 tw32_f(TG3_CPMU_EEE_CTRL,
8517 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
8518
a386b901
MC
8519 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
8520 TG3_CPMU_EEEMD_LPI_IN_TX |
8521 TG3_CPMU_EEEMD_LPI_IN_RX |
8522 TG3_CPMU_EEEMD_EEE_ENABLE;
8523
8524 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8525 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
8526
63c3a66f 8527 if (tg3_flag(tp, ENABLE_APE))
a386b901
MC
8528 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
8529
8530 tw32_f(TG3_CPMU_EEE_MODE, val);
8531
8532 tw32_f(TG3_CPMU_EEE_DBTMR1,
8533 TG3_CPMU_DBTMR1_PCIEXIT_2047US |
8534 TG3_CPMU_DBTMR1_LNKIDLE_2047US);
8535
8536 tw32_f(TG3_CPMU_EEE_DBTMR2,
d7f2ab20 8537 TG3_CPMU_DBTMR2_APE_TX_2047US |
a386b901 8538 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
699c0193
MC
8539 }
8540
603f1173 8541 if (reset_phy)
d4d2c558
MC
8542 tg3_phy_reset(tp);
8543
1da177e4
LT
8544 err = tg3_chip_reset(tp);
8545 if (err)
8546 return err;
8547
8548 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
8549
bcb37f6c 8550 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
d30cdd28
MC
8551 val = tr32(TG3_CPMU_CTRL);
8552 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
8553 tw32(TG3_CPMU_CTRL, val);
9acb961e
MC
8554
8555 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8556 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8557 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8558 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8559
8560 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
8561 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
8562 val |= CPMU_LNK_AWARE_MACCLK_6_25;
8563 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
8564
8565 val = tr32(TG3_CPMU_HST_ACC);
8566 val &= ~CPMU_HST_ACC_MACCLK_MASK;
8567 val |= CPMU_HST_ACC_MACCLK_6_25;
8568 tw32(TG3_CPMU_HST_ACC, val);
d30cdd28
MC
8569 }
8570
33466d93
MC
8571 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
8572 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
8573 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
8574 PCIE_PWR_MGMT_L1_THRESH_4MS;
8575 tw32(PCIE_PWR_MGMT_THRESH, val);
521e6b90
MC
8576
8577 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
8578 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
8579
8580 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
33466d93 8581
f40386c8
MC
8582 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8583 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
255ca311
MC
8584 }
8585
63c3a66f 8586 if (tg3_flag(tp, L1PLLPD_EN)) {
614b0590
MC
8587 u32 grc_mode = tr32(GRC_MODE);
8588
8589 /* Access the lower 1K of PL PCIE block registers. */
8590 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8591 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8592
8593 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
8594 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
8595 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
8596
8597 tw32(GRC_MODE, grc_mode);
8598 }
8599
55086ad9 8600 if (tg3_flag(tp, 57765_CLASS)) {
5093eedc
MC
8601 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
8602 u32 grc_mode = tr32(GRC_MODE);
cea46462 8603
5093eedc
MC
8604 /* Access the lower 1K of PL PCIE block registers. */
8605 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8606 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
cea46462 8607
5093eedc
MC
8608 val = tr32(TG3_PCIE_TLDLPL_PORT +
8609 TG3_PCIE_PL_LO_PHYCTL5);
8610 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
8611 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
cea46462 8612
5093eedc
MC
8613 tw32(GRC_MODE, grc_mode);
8614 }
a977dbe8 8615
1ff30a59
MC
8616 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
8617 u32 grc_mode = tr32(GRC_MODE);
8618
8619 /* Access the lower 1K of DL PCIE block registers. */
8620 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8621 tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
8622
8623 val = tr32(TG3_PCIE_TLDLPL_PORT +
8624 TG3_PCIE_DL_LO_FTSMAX);
8625 val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
8626 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
8627 val | TG3_PCIE_DL_LO_FTSMAX_VAL);
8628
8629 tw32(GRC_MODE, grc_mode);
8630 }
8631
a977dbe8
MC
8632 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8633 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8634 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8635 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
cea46462
MC
8636 }
8637
1da177e4
LT
8638 /* This works around an issue with Athlon chipsets on
8639 * B3 tigon3 silicon. This bit has no effect on any
8640 * other revision. But do not set this on PCI Express
795d01c5 8641 * chips and don't even touch the clocks if the CPMU is present.
1da177e4 8642 */
63c3a66f
JP
8643 if (!tg3_flag(tp, CPMU_PRESENT)) {
8644 if (!tg3_flag(tp, PCI_EXPRESS))
795d01c5
MC
8645 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
8646 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8647 }
1da177e4
LT
8648
8649 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
63c3a66f 8650 tg3_flag(tp, PCIX_MODE)) {
1da177e4
LT
8651 val = tr32(TG3PCI_PCISTATE);
8652 val |= PCISTATE_RETRY_SAME_DMA;
8653 tw32(TG3PCI_PCISTATE, val);
8654 }
8655
63c3a66f 8656 if (tg3_flag(tp, ENABLE_APE)) {
0d3031d9
MC
8657 /* Allow reads and writes to the
8658 * APE register and memory space.
8659 */
8660 val = tr32(TG3PCI_PCISTATE);
8661 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
f92d9dc1
MC
8662 PCISTATE_ALLOW_APE_SHMEM_WR |
8663 PCISTATE_ALLOW_APE_PSPACE_WR;
0d3031d9
MC
8664 tw32(TG3PCI_PCISTATE, val);
8665 }
8666
1da177e4
LT
8667 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
8668 /* Enable some hw fixes. */
8669 val = tr32(TG3PCI_MSI_DATA);
8670 val |= (1 << 26) | (1 << 28) | (1 << 29);
8671 tw32(TG3PCI_MSI_DATA, val);
8672 }
8673
8674 /* Descriptor ring init may make accesses to the
8675 * NIC SRAM area to setup the TX descriptors, so we
8676 * can only do this after the hardware has been
8677 * successfully reset.
8678 */
32d8c572
MC
8679 err = tg3_init_rings(tp);
8680 if (err)
8681 return err;
1da177e4 8682
63c3a66f 8683 if (tg3_flag(tp, 57765_PLUS)) {
cbf9ca6c
MC
8684 val = tr32(TG3PCI_DMA_RW_CTRL) &
8685 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
1a319025
MC
8686 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
8687 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
55086ad9 8688 if (!tg3_flag(tp, 57765_CLASS) &&
0aebff48
MC
8689 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8690 val |= DMA_RWCTRL_TAGGED_STAT_WA;
cbf9ca6c
MC
8691 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
8692 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
8693 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
d30cdd28
MC
8694 /* This value is determined during the probe time DMA
8695 * engine test, tg3_test_dma.
8696 */
8697 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8698 }
1da177e4
LT
8699
8700 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
8701 GRC_MODE_4X_NIC_SEND_RINGS |
8702 GRC_MODE_NO_TX_PHDR_CSUM |
8703 GRC_MODE_NO_RX_PHDR_CSUM);
8704 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
d2d746f8
MC
8705
8706 /* Pseudo-header checksum is done by hardware logic and not
8707 * the offload processers, so make the chip do the pseudo-
8708 * header checksums on receive. For transmit it is more
8709 * convenient to do the pseudo-header checksum in software
8710 * as Linux does that on transmit for us in all cases.
8711 */
8712 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
1da177e4
LT
8713
8714 tw32(GRC_MODE,
8715 tp->grc_mode |
8716 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
8717
8718 /* Setup the timer prescalar register. Clock is always 66Mhz. */
8719 val = tr32(GRC_MISC_CFG);
8720 val &= ~0xff;
8721 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
8722 tw32(GRC_MISC_CFG, val);
8723
8724 /* Initialize MBUF/DESC pool. */
63c3a66f 8725 if (tg3_flag(tp, 5750_PLUS)) {
1da177e4
LT
8726 /* Do nothing. */
8727 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
8728 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
8729 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8730 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
8731 else
8732 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
8733 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
8734 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
63c3a66f 8735 } else if (tg3_flag(tp, TSO_CAPABLE)) {
1da177e4
LT
8736 int fw_len;
8737
077f849d 8738 fw_len = tp->fw_len;
1da177e4
LT
8739 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
8740 tw32(BUFMGR_MB_POOL_ADDR,
8741 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
8742 tw32(BUFMGR_MB_POOL_SIZE,
8743 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
8744 }
1da177e4 8745
0f893dc6 8746 if (tp->dev->mtu <= ETH_DATA_LEN) {
1da177e4
LT
8747 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8748 tp->bufmgr_config.mbuf_read_dma_low_water);
8749 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8750 tp->bufmgr_config.mbuf_mac_rx_low_water);
8751 tw32(BUFMGR_MB_HIGH_WATER,
8752 tp->bufmgr_config.mbuf_high_water);
8753 } else {
8754 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8755 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
8756 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8757 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
8758 tw32(BUFMGR_MB_HIGH_WATER,
8759 tp->bufmgr_config.mbuf_high_water_jumbo);
8760 }
8761 tw32(BUFMGR_DMA_LOW_WATER,
8762 tp->bufmgr_config.dma_low_water);
8763 tw32(BUFMGR_DMA_HIGH_WATER,
8764 tp->bufmgr_config.dma_high_water);
8765
d309a46e
MC
8766 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
8767 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8768 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
4d958473
MC
8769 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8770 tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8771 tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
8772 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
d309a46e 8773 tw32(BUFMGR_MODE, val);
1da177e4
LT
8774 for (i = 0; i < 2000; i++) {
8775 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
8776 break;
8777 udelay(10);
8778 }
8779 if (i >= 2000) {
05dbe005 8780 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
1da177e4
LT
8781 return -ENODEV;
8782 }
8783
eb07a940
MC
8784 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
8785 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
b5d3772c 8786
eb07a940 8787 tg3_setup_rxbd_thresholds(tp);
1da177e4
LT
8788
8789 /* Initialize TG3_BDINFO's at:
8790 * RCVDBDI_STD_BD: standard eth size rx ring
8791 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
8792 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
8793 *
8794 * like so:
8795 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
8796 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
8797 * ring attribute flags
8798 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
8799 *
8800 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
8801 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
8802 *
8803 * The size of each ring is fixed in the firmware, but the location is
8804 * configurable.
8805 */
8806 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
21f581a5 8807 ((u64) tpr->rx_std_mapping >> 32));
1da177e4 8808 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
21f581a5 8809 ((u64) tpr->rx_std_mapping & 0xffffffff));
63c3a66f 8810 if (!tg3_flag(tp, 5717_PLUS))
87668d35
MC
8811 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
8812 NIC_SRAM_RX_BUFFER_DESC);
1da177e4 8813
fdb72b38 8814 /* Disable the mini ring */
63c3a66f 8815 if (!tg3_flag(tp, 5705_PLUS))
1da177e4
LT
8816 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
8817 BDINFO_FLAGS_DISABLED);
8818
fdb72b38
MC
8819 /* Program the jumbo buffer descriptor ring control
8820 * blocks on those devices that have them.
8821 */
a0512944 8822 if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
63c3a66f 8823 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
1da177e4 8824
63c3a66f 8825 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
1da177e4 8826 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
21f581a5 8827 ((u64) tpr->rx_jmb_mapping >> 32));
1da177e4 8828 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
21f581a5 8829 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
de9f5230
MC
8830 val = TG3_RX_JMB_RING_SIZE(tp) <<
8831 BDINFO_FLAGS_MAXLEN_SHIFT;
1da177e4 8832 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
de9f5230 8833 val | BDINFO_FLAGS_USE_EXT_RECV);
63c3a66f 8834 if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
55086ad9 8835 tg3_flag(tp, 57765_CLASS))
87668d35
MC
8836 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
8837 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
1da177e4
LT
8838 } else {
8839 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8840 BDINFO_FLAGS_DISABLED);
8841 }
8842
63c3a66f 8843 if (tg3_flag(tp, 57765_PLUS)) {
fa6b2aae 8844 val = TG3_RX_STD_RING_SIZE(tp);
7cb32cf2
MC
8845 val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
8846 val |= (TG3_RX_STD_DMA_SZ << 2);
8847 } else
04380d40 8848 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
fdb72b38 8849 } else
de9f5230 8850 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
fdb72b38
MC
8851
8852 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
1da177e4 8853
411da640 8854 tpr->rx_std_prod_idx = tp->rx_pending;
66711e66 8855 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
1da177e4 8856
63c3a66f
JP
8857 tpr->rx_jmb_prod_idx =
8858 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
66711e66 8859 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
1da177e4 8860
2d31ecaf
MC
8861 tg3_rings_reset(tp);
8862
1da177e4 8863 /* Initialize MAC address and backoff seed. */
986e0aeb 8864 __tg3_set_mac_addr(tp, 0);
1da177e4
LT
8865
8866 /* MTU + ethernet header + FCS + optional VLAN tag */
f7b493e0
MC
8867 tw32(MAC_RX_MTU_SIZE,
8868 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
1da177e4
LT
8869
8870 /* The slot time is changed by tg3_setup_phy if we
8871 * run at gigabit with half duplex.
8872 */
f2096f94
MC
8873 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
8874 (6 << TX_LENGTHS_IPG_SHIFT) |
8875 (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
8876
8877 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8878 val |= tr32(MAC_TX_LENGTHS) &
8879 (TX_LENGTHS_JMB_FRM_LEN_MSK |
8880 TX_LENGTHS_CNT_DWN_VAL_MSK);
8881
8882 tw32(MAC_TX_LENGTHS, val);
1da177e4
LT
8883
8884 /* Receive rules. */
8885 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
8886 tw32(RCVLPC_CONFIG, 0x0181);
8887
8888 /* Calculate RDMAC_MODE setting early, we need it to determine
8889 * the RCVLPC_STATE_ENABLE mask.
8890 */
8891 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
8892 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
8893 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
8894 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
8895 RDMAC_MODE_LNGREAD_ENAB);
85e94ced 8896
deabaac8 8897 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
0339e4e3
MC
8898 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
8899
57e6983c 8900 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
321d32a0
MC
8901 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8902 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
d30cdd28
MC
8903 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
8904 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
8905 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
8906
c5908939
MC
8907 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8908 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
63c3a66f 8909 if (tg3_flag(tp, TSO_CAPABLE) &&
c13e3713 8910 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1da177e4
LT
8911 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
8912 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
63c3a66f 8913 !tg3_flag(tp, IS_5788)) {
1da177e4
LT
8914 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8915 }
8916 }
8917
63c3a66f 8918 if (tg3_flag(tp, PCI_EXPRESS))
85e94ced
MC
8919 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8920
63c3a66f
JP
8921 if (tg3_flag(tp, HW_TSO_1) ||
8922 tg3_flag(tp, HW_TSO_2) ||
8923 tg3_flag(tp, HW_TSO_3))
027455ad
MC
8924 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
8925
108a6c16 8926 if (tg3_flag(tp, 57765_PLUS) ||
e849cdc3 8927 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
027455ad
MC
8928 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8929 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
1da177e4 8930
f2096f94
MC
8931 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8932 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
8933
41a8a7ee
MC
8934 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
8935 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8936 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8937 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
63c3a66f 8938 tg3_flag(tp, 57765_PLUS)) {
41a8a7ee 8939 val = tr32(TG3_RDMA_RSRVCTRL_REG);
d78b59f5
MC
8940 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8941 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
b4495ed8
MC
8942 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
8943 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
8944 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
8945 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
8946 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
8947 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
b75cc0e4 8948 }
41a8a7ee
MC
8949 tw32(TG3_RDMA_RSRVCTRL_REG,
8950 val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
8951 }
8952
d78b59f5
MC
8953 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8954 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
d309a46e
MC
8955 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
8956 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
8957 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
8958 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
8959 }
8960
1da177e4 8961 /* Receive/send statistics. */
63c3a66f 8962 if (tg3_flag(tp, 5750_PLUS)) {
1661394e
MC
8963 val = tr32(RCVLPC_STATS_ENABLE);
8964 val &= ~RCVLPC_STATSENAB_DACK_FIX;
8965 tw32(RCVLPC_STATS_ENABLE, val);
8966 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
63c3a66f 8967 tg3_flag(tp, TSO_CAPABLE)) {
1da177e4
LT
8968 val = tr32(RCVLPC_STATS_ENABLE);
8969 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
8970 tw32(RCVLPC_STATS_ENABLE, val);
8971 } else {
8972 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
8973 }
8974 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
8975 tw32(SNDDATAI_STATSENAB, 0xffffff);
8976 tw32(SNDDATAI_STATSCTRL,
8977 (SNDDATAI_SCTRL_ENABLE |
8978 SNDDATAI_SCTRL_FASTUPD));
8979
8980 /* Setup host coalescing engine. */
8981 tw32(HOSTCC_MODE, 0);
8982 for (i = 0; i < 2000; i++) {
8983 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
8984 break;
8985 udelay(10);
8986 }
8987
d244c892 8988 __tg3_set_coalesce(tp, &tp->coal);
1da177e4 8989
63c3a66f 8990 if (!tg3_flag(tp, 5705_PLUS)) {
1da177e4
LT
8991 /* Status/statistics block address. See tg3_timer,
8992 * the tg3_periodic_fetch_stats call there, and
8993 * tg3_get_stats to see how this works for 5705/5750 chips.
8994 */
1da177e4
LT
8995 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8996 ((u64) tp->stats_mapping >> 32));
8997 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8998 ((u64) tp->stats_mapping & 0xffffffff));
8999 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
2d31ecaf 9000
1da177e4 9001 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
2d31ecaf
MC
9002
9003 /* Clear statistics and status block memory areas */
9004 for (i = NIC_SRAM_STATS_BLK;
9005 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
9006 i += sizeof(u32)) {
9007 tg3_write_mem(tp, i, 0);
9008 udelay(40);
9009 }
1da177e4
LT
9010 }
9011
9012 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
9013
9014 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
9015 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
63c3a66f 9016 if (!tg3_flag(tp, 5705_PLUS))
1da177e4
LT
9017 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
9018
f07e9af3
MC
9019 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9020 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
c94e3941
MC
9021 /* reset to prevent losing 1st rx packet intermittently */
9022 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9023 udelay(10);
9024 }
9025
3bda1258 9026 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
9e975cc2
MC
9027 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
9028 MAC_MODE_FHDE_ENABLE;
9029 if (tg3_flag(tp, ENABLE_APE))
9030 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
63c3a66f 9031 if (!tg3_flag(tp, 5705_PLUS) &&
f07e9af3 9032 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
e8f3f6ca
MC
9033 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
9034 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1da177e4
LT
9035 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
9036 udelay(40);
9037
314fba34 9038 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
63c3a66f 9039 * If TG3_FLAG_IS_NIC is zero, we should read the
314fba34
MC
9040 * register to preserve the GPIO settings for LOMs. The GPIOs,
9041 * whether used as inputs or outputs, are set by boot code after
9042 * reset.
9043 */
63c3a66f 9044 if (!tg3_flag(tp, IS_NIC)) {
314fba34
MC
9045 u32 gpio_mask;
9046
9d26e213
MC
9047 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
9048 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
9049 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
3e7d83bc
MC
9050
9051 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9052 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
9053 GRC_LCLCTRL_GPIO_OUTPUT3;
9054
af36e6b6
MC
9055 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
9056 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
9057
aaf84465 9058 tp->grc_local_ctrl &= ~gpio_mask;
314fba34
MC
9059 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
9060
9061 /* GPIO1 must be driven high for eeprom write protect */
63c3a66f 9062 if (tg3_flag(tp, EEPROM_WRITE_PROT))
9d26e213
MC
9063 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
9064 GRC_LCLCTRL_GPIO_OUTPUT1);
314fba34 9065 }
1da177e4
LT
9066 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9067 udelay(100);
9068
c3b5003b 9069 if (tg3_flag(tp, USING_MSIX)) {
baf8a94a 9070 val = tr32(MSGINT_MODE);
c3b5003b
MC
9071 val |= MSGINT_MODE_ENABLE;
9072 if (tp->irq_cnt > 1)
9073 val |= MSGINT_MODE_MULTIVEC_EN;
5b39de91
MC
9074 if (!tg3_flag(tp, 1SHOT_MSI))
9075 val |= MSGINT_MODE_ONE_SHOT_DISABLE;
baf8a94a
MC
9076 tw32(MSGINT_MODE, val);
9077 }
9078
63c3a66f 9079 if (!tg3_flag(tp, 5705_PLUS)) {
1da177e4
LT
9080 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
9081 udelay(40);
9082 }
9083
9084 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
9085 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
9086 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
9087 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
9088 WDMAC_MODE_LNGREAD_ENAB);
9089
c5908939
MC
9090 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9091 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
63c3a66f 9092 if (tg3_flag(tp, TSO_CAPABLE) &&
1da177e4
LT
9093 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
9094 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
9095 /* nothing */
9096 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
63c3a66f 9097 !tg3_flag(tp, IS_5788)) {
1da177e4
LT
9098 val |= WDMAC_MODE_RX_ACCEL;
9099 }
9100 }
9101
d9ab5ad1 9102 /* Enable host coalescing bug fix */
63c3a66f 9103 if (tg3_flag(tp, 5755_PLUS))
f51f3562 9104 val |= WDMAC_MODE_STATUS_TAG_FIX;
d9ab5ad1 9105
788a035e
MC
9106 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
9107 val |= WDMAC_MODE_BURST_ALL_DATA;
9108
1da177e4
LT
9109 tw32_f(WDMAC_MODE, val);
9110 udelay(40);
9111
63c3a66f 9112 if (tg3_flag(tp, PCIX_MODE)) {
9974a356
MC
9113 u16 pcix_cmd;
9114
9115 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9116 &pcix_cmd);
1da177e4 9117 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
9974a356
MC
9118 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
9119 pcix_cmd |= PCI_X_CMD_READ_2K;
1da177e4 9120 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
9974a356
MC
9121 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
9122 pcix_cmd |= PCI_X_CMD_READ_2K;
1da177e4 9123 }
9974a356
MC
9124 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9125 pcix_cmd);
1da177e4
LT
9126 }
9127
9128 tw32_f(RDMAC_MODE, rdmac_mode);
9129 udelay(40);
9130
9131 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
63c3a66f 9132 if (!tg3_flag(tp, 5705_PLUS))
1da177e4 9133 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
9936bcf6
MC
9134
9135 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
9136 tw32(SNDDATAC_MODE,
9137 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
9138 else
9139 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
9140
1da177e4
LT
9141 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
9142 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
7cb32cf2 9143 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
63c3a66f 9144 if (tg3_flag(tp, LRG_PROD_RING_CAP))
7cb32cf2
MC
9145 val |= RCVDBDI_MODE_LRG_RING_SZ;
9146 tw32(RCVDBDI_MODE, val);
1da177e4 9147 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
63c3a66f
JP
9148 if (tg3_flag(tp, HW_TSO_1) ||
9149 tg3_flag(tp, HW_TSO_2) ||
9150 tg3_flag(tp, HW_TSO_3))
1da177e4 9151 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
baf8a94a 9152 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
63c3a66f 9153 if (tg3_flag(tp, ENABLE_TSS))
baf8a94a
MC
9154 val |= SNDBDI_MODE_MULTI_TXQ_EN;
9155 tw32(SNDBDI_MODE, val);
1da177e4
LT
9156 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
9157
9158 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9159 err = tg3_load_5701_a0_firmware_fix(tp);
9160 if (err)
9161 return err;
9162 }
9163
63c3a66f 9164 if (tg3_flag(tp, TSO_CAPABLE)) {
1da177e4
LT
9165 err = tg3_load_tso_firmware(tp);
9166 if (err)
9167 return err;
9168 }
1da177e4
LT
9169
9170 tp->tx_mode = TX_MODE_ENABLE;
f2096f94 9171
63c3a66f 9172 if (tg3_flag(tp, 5755_PLUS) ||
b1d05210
MC
9173 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9174 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
f2096f94
MC
9175
9176 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9177 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
9178 tp->tx_mode &= ~val;
9179 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
9180 }
9181
1da177e4
LT
9182 tw32_f(MAC_TX_MODE, tp->tx_mode);
9183 udelay(100);
9184
63c3a66f 9185 if (tg3_flag(tp, ENABLE_RSS)) {
bcebcc46 9186 tg3_rss_write_indir_tbl(tp);
baf8a94a
MC
9187
9188 /* Setup the "secret" hash key. */
9189 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
9190 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
9191 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
9192 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
9193 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
9194 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
9195 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
9196 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
9197 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
9198 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
9199 }
9200
1da177e4 9201 tp->rx_mode = RX_MODE_ENABLE;
63c3a66f 9202 if (tg3_flag(tp, 5755_PLUS))
af36e6b6
MC
9203 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
9204
63c3a66f 9205 if (tg3_flag(tp, ENABLE_RSS))
baf8a94a
MC
9206 tp->rx_mode |= RX_MODE_RSS_ENABLE |
9207 RX_MODE_RSS_ITBL_HASH_BITS_7 |
9208 RX_MODE_RSS_IPV6_HASH_EN |
9209 RX_MODE_RSS_TCP_IPV6_HASH_EN |
9210 RX_MODE_RSS_IPV4_HASH_EN |
9211 RX_MODE_RSS_TCP_IPV4_HASH_EN;
9212
1da177e4
LT
9213 tw32_f(MAC_RX_MODE, tp->rx_mode);
9214 udelay(10);
9215
1da177e4
LT
9216 tw32(MAC_LED_CTRL, tp->led_ctrl);
9217
9218 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
f07e9af3 9219 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
1da177e4
LT
9220 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9221 udelay(10);
9222 }
9223 tw32_f(MAC_RX_MODE, tp->rx_mode);
9224 udelay(10);
9225
f07e9af3 9226 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
1da177e4 9227 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
f07e9af3 9228 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
1da177e4
LT
9229 /* Set drive transmission level to 1.2V */
9230 /* only if the signal pre-emphasis bit is not set */
9231 val = tr32(MAC_SERDES_CFG);
9232 val &= 0xfffff000;
9233 val |= 0x880;
9234 tw32(MAC_SERDES_CFG, val);
9235 }
9236 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
9237 tw32(MAC_SERDES_CFG, 0x616000);
9238 }
9239
9240 /* Prevent chip from dropping frames when flow control
9241 * is enabled.
9242 */
55086ad9 9243 if (tg3_flag(tp, 57765_CLASS))
666bc831
MC
9244 val = 1;
9245 else
9246 val = 2;
9247 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
1da177e4
LT
9248
9249 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
f07e9af3 9250 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
1da177e4 9251 /* Use hardware link auto-negotiation */
63c3a66f 9252 tg3_flag_set(tp, HW_AUTONEG);
1da177e4
LT
9253 }
9254
f07e9af3 9255 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
6ff6f81d 9256 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
d4d2c558
MC
9257 u32 tmp;
9258
9259 tmp = tr32(SERDES_RX_CTRL);
9260 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
9261 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
9262 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
9263 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9264 }
9265
63c3a66f 9266 if (!tg3_flag(tp, USE_PHYLIB)) {
80096068
MC
9267 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
9268 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
dd477003
MC
9269 tp->link_config.speed = tp->link_config.orig_speed;
9270 tp->link_config.duplex = tp->link_config.orig_duplex;
9271 tp->link_config.autoneg = tp->link_config.orig_autoneg;
9272 }
1da177e4 9273
dd477003
MC
9274 err = tg3_setup_phy(tp, 0);
9275 if (err)
9276 return err;
1da177e4 9277
f07e9af3
MC
9278 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9279 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
dd477003
MC
9280 u32 tmp;
9281
9282 /* Clear CRC stats. */
9283 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
9284 tg3_writephy(tp, MII_TG3_TEST1,
9285 tmp | MII_TG3_TEST1_CRC_EN);
f08aa1a8 9286 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
dd477003 9287 }
1da177e4
LT
9288 }
9289 }
9290
9291 __tg3_set_rx_mode(tp->dev);
9292
9293 /* Initialize receive rules. */
9294 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
9295 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
9296 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
9297 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
9298
63c3a66f 9299 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
1da177e4
LT
9300 limit = 8;
9301 else
9302 limit = 16;
63c3a66f 9303 if (tg3_flag(tp, ENABLE_ASF))
1da177e4
LT
9304 limit -= 4;
9305 switch (limit) {
9306 case 16:
9307 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
9308 case 15:
9309 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
9310 case 14:
9311 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
9312 case 13:
9313 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
9314 case 12:
9315 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
9316 case 11:
9317 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
9318 case 10:
9319 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
9320 case 9:
9321 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
9322 case 8:
9323 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
9324 case 7:
9325 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
9326 case 6:
9327 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
9328 case 5:
9329 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
9330 case 4:
9331 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
9332 case 3:
9333 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
9334 case 2:
9335 case 1:
9336
9337 default:
9338 break;
855e1111 9339 }
1da177e4 9340
63c3a66f 9341 if (tg3_flag(tp, ENABLE_APE))
9ce768ea
MC
9342 /* Write our heartbeat update interval to APE. */
9343 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
9344 APE_HOST_HEARTBEAT_INT_DISABLE);
0d3031d9 9345
1da177e4
LT
9346 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
9347
1da177e4
LT
9348 return 0;
9349}
9350
9351/* Called at device open time to get the chip ready for
9352 * packet processing. Invoked with tp->lock held.
9353 */
8e7a22e3 9354static int tg3_init_hw(struct tg3 *tp, int reset_phy)
1da177e4 9355{
1da177e4
LT
9356 tg3_switch_clocks(tp);
9357
9358 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
9359
2f751b67 9360 return tg3_reset_hw(tp, reset_phy);
1da177e4
LT
9361}
9362
ebf3312e
MC
9363/* Restart hardware after configuration changes, self-test, etc.
9364 * Invoked with tp->lock held.
9365 */
9366static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
9367 __releases(tp->lock)
9368 __acquires(tp->lock)
9369{
9370 int err;
9371
9372 err = tg3_init_hw(tp, reset_phy);
9373 if (err) {
9374 netdev_err(tp->dev,
9375 "Failed to re-initialize device, aborting\n");
9376 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9377 tg3_full_unlock(tp);
9378 del_timer_sync(&tp->timer);
9379 tp->irq_sync = 0;
9380 tg3_napi_enable(tp);
9381 dev_close(tp->dev);
9382 tg3_full_lock(tp, 0);
9383 }
9384 return err;
9385}
9386
9a21fb8f
MC
9387static void tg3_reset_task(struct work_struct *work)
9388{
9389 struct tg3 *tp = container_of(work, struct tg3, reset_task);
9390 int err;
9391
9392 tg3_full_lock(tp, 0);
9393
9394 if (!netif_running(tp->dev)) {
9395 tg3_flag_clear(tp, RESET_TASK_PENDING);
9396 tg3_full_unlock(tp);
9397 return;
9398 }
9399
9400 tg3_full_unlock(tp);
9401
9402 tg3_phy_stop(tp);
9403
9404 tg3_netif_stop(tp);
9405
9406 tg3_full_lock(tp, 1);
9407
9408 if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
9409 tp->write32_tx_mbox = tg3_write32_tx_mbox;
9410 tp->write32_rx_mbox = tg3_write_flush_reg32;
9411 tg3_flag_set(tp, MBOX_WRITE_REORDER);
9412 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
9413 }
9414
9415 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
9416 err = tg3_init_hw(tp, 1);
9417 if (err)
9418 goto out;
9419
9420 tg3_netif_start(tp);
9421
9422out:
9423 tg3_full_unlock(tp);
9424
9425 if (!err)
9426 tg3_phy_start(tp);
9427
9428 tg3_flag_clear(tp, RESET_TASK_PENDING);
9429}
9430
1da177e4
LT
9431#define TG3_STAT_ADD32(PSTAT, REG) \
9432do { u32 __val = tr32(REG); \
9433 (PSTAT)->low += __val; \
9434 if ((PSTAT)->low < __val) \
9435 (PSTAT)->high += 1; \
9436} while (0)
9437
9438static void tg3_periodic_fetch_stats(struct tg3 *tp)
9439{
9440 struct tg3_hw_stats *sp = tp->hw_stats;
9441
9442 if (!netif_carrier_ok(tp->dev))
9443 return;
9444
9445 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
9446 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
9447 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
9448 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
9449 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
9450 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
9451 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
9452 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
9453 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
9454 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
9455 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
9456 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
9457 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
9458
9459 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
9460 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
9461 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
9462 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
9463 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
9464 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
9465 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
9466 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
9467 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
9468 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
9469 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
9470 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
9471 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
9472 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
463d305b
MC
9473
9474 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
310050fa
MC
9475 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9476 tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 &&
9477 tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) {
4d958473
MC
9478 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
9479 } else {
9480 u32 val = tr32(HOSTCC_FLOW_ATTN);
9481 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
9482 if (val) {
9483 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
9484 sp->rx_discards.low += val;
9485 if (sp->rx_discards.low < val)
9486 sp->rx_discards.high += 1;
9487 }
9488 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
9489 }
463d305b 9490 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
1da177e4
LT
9491}
9492
0e6cf6a9
MC
9493static void tg3_chk_missed_msi(struct tg3 *tp)
9494{
9495 u32 i;
9496
9497 for (i = 0; i < tp->irq_cnt; i++) {
9498 struct tg3_napi *tnapi = &tp->napi[i];
9499
9500 if (tg3_has_work(tnapi)) {
9501 if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
9502 tnapi->last_tx_cons == tnapi->tx_cons) {
9503 if (tnapi->chk_msi_cnt < 1) {
9504 tnapi->chk_msi_cnt++;
9505 return;
9506 }
7f230735 9507 tg3_msi(0, tnapi);
0e6cf6a9
MC
9508 }
9509 }
9510 tnapi->chk_msi_cnt = 0;
9511 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
9512 tnapi->last_tx_cons = tnapi->tx_cons;
9513 }
9514}
9515
1da177e4
LT
9516static void tg3_timer(unsigned long __opaque)
9517{
9518 struct tg3 *tp = (struct tg3 *) __opaque;
1da177e4 9519
5b190624 9520 if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
f475f163
MC
9521 goto restart_timer;
9522
f47c11ee 9523 spin_lock(&tp->lock);
1da177e4 9524
0e6cf6a9 9525 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
55086ad9 9526 tg3_flag(tp, 57765_CLASS))
0e6cf6a9
MC
9527 tg3_chk_missed_msi(tp);
9528
63c3a66f 9529 if (!tg3_flag(tp, TAGGED_STATUS)) {
fac9b83e
DM
9530 /* All of this garbage is because when using non-tagged
9531 * IRQ status the mailbox/status_block protocol the chip
9532 * uses with the cpu is race prone.
9533 */
898a56f8 9534 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
fac9b83e
DM
9535 tw32(GRC_LOCAL_CTRL,
9536 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
9537 } else {
9538 tw32(HOSTCC_MODE, tp->coalesce_mode |
fd2ce37f 9539 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
fac9b83e 9540 }
1da177e4 9541
fac9b83e 9542 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
f47c11ee 9543 spin_unlock(&tp->lock);
db219973 9544 tg3_reset_task_schedule(tp);
5b190624 9545 goto restart_timer;
fac9b83e 9546 }
1da177e4
LT
9547 }
9548
1da177e4
LT
9549 /* This part only runs once per second. */
9550 if (!--tp->timer_counter) {
63c3a66f 9551 if (tg3_flag(tp, 5705_PLUS))
fac9b83e
DM
9552 tg3_periodic_fetch_stats(tp);
9553
b0c5943f
MC
9554 if (tp->setlpicnt && !--tp->setlpicnt)
9555 tg3_phy_eee_enable(tp);
52b02d04 9556
63c3a66f 9557 if (tg3_flag(tp, USE_LINKCHG_REG)) {
1da177e4
LT
9558 u32 mac_stat;
9559 int phy_event;
9560
9561 mac_stat = tr32(MAC_STATUS);
9562
9563 phy_event = 0;
f07e9af3 9564 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
1da177e4
LT
9565 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
9566 phy_event = 1;
9567 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
9568 phy_event = 1;
9569
9570 if (phy_event)
9571 tg3_setup_phy(tp, 0);
63c3a66f 9572 } else if (tg3_flag(tp, POLL_SERDES)) {
1da177e4
LT
9573 u32 mac_stat = tr32(MAC_STATUS);
9574 int need_setup = 0;
9575
9576 if (netif_carrier_ok(tp->dev) &&
9577 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
9578 need_setup = 1;
9579 }
be98da6a 9580 if (!netif_carrier_ok(tp->dev) &&
1da177e4
LT
9581 (mac_stat & (MAC_STATUS_PCS_SYNCED |
9582 MAC_STATUS_SIGNAL_DET))) {
9583 need_setup = 1;
9584 }
9585 if (need_setup) {
3d3ebe74
MC
9586 if (!tp->serdes_counter) {
9587 tw32_f(MAC_MODE,
9588 (tp->mac_mode &
9589 ~MAC_MODE_PORT_MODE_MASK));
9590 udelay(40);
9591 tw32_f(MAC_MODE, tp->mac_mode);
9592 udelay(40);
9593 }
1da177e4
LT
9594 tg3_setup_phy(tp, 0);
9595 }
f07e9af3 9596 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
63c3a66f 9597 tg3_flag(tp, 5780_CLASS)) {
747e8f8b 9598 tg3_serdes_parallel_detect(tp);
57d8b880 9599 }
1da177e4
LT
9600
9601 tp->timer_counter = tp->timer_multiplier;
9602 }
9603
130b8e4d
MC
9604 /* Heartbeat is only sent once every 2 seconds.
9605 *
9606 * The heartbeat is to tell the ASF firmware that the host
9607 * driver is still alive. In the event that the OS crashes,
9608 * ASF needs to reset the hardware to free up the FIFO space
9609 * that may be filled with rx packets destined for the host.
9610 * If the FIFO is full, ASF will no longer function properly.
9611 *
9612 * Unintended resets have been reported on real time kernels
9613 * where the timer doesn't run on time. Netpoll will also have
9614 * same problem.
9615 *
9616 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
9617 * to check the ring condition when the heartbeat is expiring
9618 * before doing the reset. This will prevent most unintended
9619 * resets.
9620 */
1da177e4 9621 if (!--tp->asf_counter) {
63c3a66f 9622 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
7c5026aa
MC
9623 tg3_wait_for_event_ack(tp);
9624
bbadf503 9625 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
130b8e4d 9626 FWCMD_NICDRV_ALIVE3);
bbadf503 9627 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
c6cdf436
MC
9628 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
9629 TG3_FW_UPDATE_TIMEOUT_SEC);
4ba526ce
MC
9630
9631 tg3_generate_fw_event(tp);
1da177e4
LT
9632 }
9633 tp->asf_counter = tp->asf_multiplier;
9634 }
9635
f47c11ee 9636 spin_unlock(&tp->lock);
1da177e4 9637
f475f163 9638restart_timer:
1da177e4
LT
9639 tp->timer.expires = jiffies + tp->timer_offset;
9640 add_timer(&tp->timer);
9641}
9642
4f125f42 9643static int tg3_request_irq(struct tg3 *tp, int irq_num)
fcfa0a32 9644{
7d12e780 9645 irq_handler_t fn;
fcfa0a32 9646 unsigned long flags;
4f125f42
MC
9647 char *name;
9648 struct tg3_napi *tnapi = &tp->napi[irq_num];
9649
9650 if (tp->irq_cnt == 1)
9651 name = tp->dev->name;
9652 else {
9653 name = &tnapi->irq_lbl[0];
9654 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
9655 name[IFNAMSIZ-1] = 0;
9656 }
fcfa0a32 9657
63c3a66f 9658 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
fcfa0a32 9659 fn = tg3_msi;
63c3a66f 9660 if (tg3_flag(tp, 1SHOT_MSI))
fcfa0a32 9661 fn = tg3_msi_1shot;
ab392d2d 9662 flags = 0;
fcfa0a32
MC
9663 } else {
9664 fn = tg3_interrupt;
63c3a66f 9665 if (tg3_flag(tp, TAGGED_STATUS))
fcfa0a32 9666 fn = tg3_interrupt_tagged;
ab392d2d 9667 flags = IRQF_SHARED;
fcfa0a32 9668 }
4f125f42
MC
9669
9670 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
fcfa0a32
MC
9671}
9672
7938109f
MC
9673static int tg3_test_interrupt(struct tg3 *tp)
9674{
09943a18 9675 struct tg3_napi *tnapi = &tp->napi[0];
7938109f 9676 struct net_device *dev = tp->dev;
b16250e3 9677 int err, i, intr_ok = 0;
f6eb9b1f 9678 u32 val;
7938109f 9679
d4bc3927
MC
9680 if (!netif_running(dev))
9681 return -ENODEV;
9682
7938109f
MC
9683 tg3_disable_ints(tp);
9684
4f125f42 9685 free_irq(tnapi->irq_vec, tnapi);
7938109f 9686
f6eb9b1f
MC
9687 /*
9688 * Turn off MSI one shot mode. Otherwise this test has no
9689 * observable way to know whether the interrupt was delivered.
9690 */
3aa1cdf8 9691 if (tg3_flag(tp, 57765_PLUS)) {
f6eb9b1f
MC
9692 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
9693 tw32(MSGINT_MODE, val);
9694 }
9695
4f125f42 9696 err = request_irq(tnapi->irq_vec, tg3_test_isr,
09943a18 9697 IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, tnapi);
7938109f
MC
9698 if (err)
9699 return err;
9700
898a56f8 9701 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
7938109f
MC
9702 tg3_enable_ints(tp);
9703
9704 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
fd2ce37f 9705 tnapi->coal_now);
7938109f
MC
9706
9707 for (i = 0; i < 5; i++) {
b16250e3
MC
9708 u32 int_mbox, misc_host_ctrl;
9709
898a56f8 9710 int_mbox = tr32_mailbox(tnapi->int_mbox);
b16250e3
MC
9711 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
9712
9713 if ((int_mbox != 0) ||
9714 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
9715 intr_ok = 1;
7938109f 9716 break;
b16250e3
MC
9717 }
9718
3aa1cdf8
MC
9719 if (tg3_flag(tp, 57765_PLUS) &&
9720 tnapi->hw_status->status_tag != tnapi->last_tag)
9721 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
9722
7938109f
MC
9723 msleep(10);
9724 }
9725
9726 tg3_disable_ints(tp);
9727
4f125f42 9728 free_irq(tnapi->irq_vec, tnapi);
6aa20a22 9729
4f125f42 9730 err = tg3_request_irq(tp, 0);
7938109f
MC
9731
9732 if (err)
9733 return err;
9734
f6eb9b1f
MC
9735 if (intr_ok) {
9736 /* Reenable MSI one shot mode. */
5b39de91 9737 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
f6eb9b1f
MC
9738 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
9739 tw32(MSGINT_MODE, val);
9740 }
7938109f 9741 return 0;
f6eb9b1f 9742 }
7938109f
MC
9743
9744 return -EIO;
9745}
9746
9747/* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
9748 * successfully restored
9749 */
9750static int tg3_test_msi(struct tg3 *tp)
9751{
7938109f
MC
9752 int err;
9753 u16 pci_cmd;
9754
63c3a66f 9755 if (!tg3_flag(tp, USING_MSI))
7938109f
MC
9756 return 0;
9757
9758 /* Turn off SERR reporting in case MSI terminates with Master
9759 * Abort.
9760 */
9761 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9762 pci_write_config_word(tp->pdev, PCI_COMMAND,
9763 pci_cmd & ~PCI_COMMAND_SERR);
9764
9765 err = tg3_test_interrupt(tp);
9766
9767 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9768
9769 if (!err)
9770 return 0;
9771
9772 /* other failures */
9773 if (err != -EIO)
9774 return err;
9775
9776 /* MSI test failed, go back to INTx mode */
5129c3a3
MC
9777 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
9778 "to INTx mode. Please report this failure to the PCI "
9779 "maintainer and include system chipset information\n");
7938109f 9780
4f125f42 9781 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
09943a18 9782
7938109f
MC
9783 pci_disable_msi(tp->pdev);
9784
63c3a66f 9785 tg3_flag_clear(tp, USING_MSI);
dc8bf1b1 9786 tp->napi[0].irq_vec = tp->pdev->irq;
7938109f 9787
4f125f42 9788 err = tg3_request_irq(tp, 0);
7938109f
MC
9789 if (err)
9790 return err;
9791
9792 /* Need to reset the chip because the MSI cycle may have terminated
9793 * with Master Abort.
9794 */
f47c11ee 9795 tg3_full_lock(tp, 1);
7938109f 9796
944d980e 9797 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8e7a22e3 9798 err = tg3_init_hw(tp, 1);
7938109f 9799
f47c11ee 9800 tg3_full_unlock(tp);
7938109f
MC
9801
9802 if (err)
4f125f42 9803 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
7938109f
MC
9804
9805 return err;
9806}
9807
9e9fd12d
MC
9808static int tg3_request_firmware(struct tg3 *tp)
9809{
9810 const __be32 *fw_data;
9811
9812 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
05dbe005
JP
9813 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
9814 tp->fw_needed);
9e9fd12d
MC
9815 return -ENOENT;
9816 }
9817
9818 fw_data = (void *)tp->fw->data;
9819
9820 /* Firmware blob starts with version numbers, followed by
9821 * start address and _full_ length including BSS sections
9822 * (which must be longer than the actual data, of course
9823 */
9824
9825 tp->fw_len = be32_to_cpu(fw_data[2]); /* includes bss */
9826 if (tp->fw_len < (tp->fw->size - 12)) {
05dbe005
JP
9827 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
9828 tp->fw_len, tp->fw_needed);
9e9fd12d
MC
9829 release_firmware(tp->fw);
9830 tp->fw = NULL;
9831 return -EINVAL;
9832 }
9833
9834 /* We no longer need firmware; we have it. */
9835 tp->fw_needed = NULL;
9836 return 0;
9837}
9838
679563f4
MC
9839static bool tg3_enable_msix(struct tg3 *tp)
9840{
c3b5003b 9841 int i, rc;
679563f4
MC
9842 struct msix_entry msix_ent[tp->irq_max];
9843
c3b5003b
MC
9844 tp->irq_cnt = num_online_cpus();
9845 if (tp->irq_cnt > 1) {
9846 /* We want as many rx rings enabled as there are cpus.
9847 * In multiqueue MSI-X mode, the first MSI-X vector
9848 * only deals with link interrupts, etc, so we add
9849 * one to the number of vectors we are requesting.
9850 */
9851 tp->irq_cnt = min_t(unsigned, tp->irq_cnt + 1, tp->irq_max);
9852 }
679563f4
MC
9853
9854 for (i = 0; i < tp->irq_max; i++) {
9855 msix_ent[i].entry = i;
9856 msix_ent[i].vector = 0;
9857 }
9858
9859 rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
2430b031
MC
9860 if (rc < 0) {
9861 return false;
9862 } else if (rc != 0) {
679563f4
MC
9863 if (pci_enable_msix(tp->pdev, msix_ent, rc))
9864 return false;
05dbe005
JP
9865 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
9866 tp->irq_cnt, rc);
679563f4
MC
9867 tp->irq_cnt = rc;
9868 }
9869
9870 for (i = 0; i < tp->irq_max; i++)
9871 tp->napi[i].irq_vec = msix_ent[i].vector;
9872
2ddaad39
BH
9873 netif_set_real_num_tx_queues(tp->dev, 1);
9874 rc = tp->irq_cnt > 1 ? tp->irq_cnt - 1 : 1;
9875 if (netif_set_real_num_rx_queues(tp->dev, rc)) {
9876 pci_disable_msix(tp->pdev);
9877 return false;
9878 }
b92b9040
MC
9879
9880 if (tp->irq_cnt > 1) {
63c3a66f 9881 tg3_flag_set(tp, ENABLE_RSS);
d78b59f5
MC
9882
9883 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9884 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
63c3a66f 9885 tg3_flag_set(tp, ENABLE_TSS);
b92b9040
MC
9886 netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1);
9887 }
9888 }
2430b031 9889
679563f4
MC
9890 return true;
9891}
9892
07b0173c
MC
9893static void tg3_ints_init(struct tg3 *tp)
9894{
63c3a66f
JP
9895 if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
9896 !tg3_flag(tp, TAGGED_STATUS)) {
07b0173c
MC
9897 /* All MSI supporting chips should support tagged
9898 * status. Assert that this is the case.
9899 */
5129c3a3
MC
9900 netdev_warn(tp->dev,
9901 "MSI without TAGGED_STATUS? Not using MSI\n");
679563f4 9902 goto defcfg;
07b0173c 9903 }
4f125f42 9904
63c3a66f
JP
9905 if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
9906 tg3_flag_set(tp, USING_MSIX);
9907 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
9908 tg3_flag_set(tp, USING_MSI);
679563f4 9909
63c3a66f 9910 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
679563f4 9911 u32 msi_mode = tr32(MSGINT_MODE);
63c3a66f 9912 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
baf8a94a 9913 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
5b39de91
MC
9914 if (!tg3_flag(tp, 1SHOT_MSI))
9915 msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
679563f4
MC
9916 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
9917 }
9918defcfg:
63c3a66f 9919 if (!tg3_flag(tp, USING_MSIX)) {
679563f4
MC
9920 tp->irq_cnt = 1;
9921 tp->napi[0].irq_vec = tp->pdev->irq;
2ddaad39 9922 netif_set_real_num_tx_queues(tp->dev, 1);
85407885 9923 netif_set_real_num_rx_queues(tp->dev, 1);
679563f4 9924 }
07b0173c
MC
9925}
9926
9927static void tg3_ints_fini(struct tg3 *tp)
9928{
63c3a66f 9929 if (tg3_flag(tp, USING_MSIX))
679563f4 9930 pci_disable_msix(tp->pdev);
63c3a66f 9931 else if (tg3_flag(tp, USING_MSI))
679563f4 9932 pci_disable_msi(tp->pdev);
63c3a66f
JP
9933 tg3_flag_clear(tp, USING_MSI);
9934 tg3_flag_clear(tp, USING_MSIX);
9935 tg3_flag_clear(tp, ENABLE_RSS);
9936 tg3_flag_clear(tp, ENABLE_TSS);
07b0173c
MC
9937}
9938
1da177e4
LT
9939static int tg3_open(struct net_device *dev)
9940{
9941 struct tg3 *tp = netdev_priv(dev);
4f125f42 9942 int i, err;
1da177e4 9943
9e9fd12d
MC
9944 if (tp->fw_needed) {
9945 err = tg3_request_firmware(tp);
9946 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9947 if (err)
9948 return err;
9949 } else if (err) {
05dbe005 9950 netdev_warn(tp->dev, "TSO capability disabled\n");
63c3a66f
JP
9951 tg3_flag_clear(tp, TSO_CAPABLE);
9952 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
05dbe005 9953 netdev_notice(tp->dev, "TSO capability restored\n");
63c3a66f 9954 tg3_flag_set(tp, TSO_CAPABLE);
9e9fd12d
MC
9955 }
9956 }
9957
c49a1561
MC
9958 netif_carrier_off(tp->dev);
9959
c866b7ea 9960 err = tg3_power_up(tp);
2f751b67 9961 if (err)
bc1c7567 9962 return err;
2f751b67
MC
9963
9964 tg3_full_lock(tp, 0);
bc1c7567 9965
1da177e4 9966 tg3_disable_ints(tp);
63c3a66f 9967 tg3_flag_clear(tp, INIT_COMPLETE);
1da177e4 9968
f47c11ee 9969 tg3_full_unlock(tp);
1da177e4 9970
679563f4
MC
9971 /*
9972 * Setup interrupts first so we know how
9973 * many NAPI resources to allocate
9974 */
9975 tg3_ints_init(tp);
9976
90415477 9977 tg3_rss_check_indir_tbl(tp);
bcebcc46 9978
1da177e4
LT
9979 /* The placement of this call is tied
9980 * to the setup and use of Host TX descriptors.
9981 */
9982 err = tg3_alloc_consistent(tp);
9983 if (err)
679563f4 9984 goto err_out1;
88b06bc2 9985
66cfd1bd
MC
9986 tg3_napi_init(tp);
9987
fed97810 9988 tg3_napi_enable(tp);
1da177e4 9989
4f125f42
MC
9990 for (i = 0; i < tp->irq_cnt; i++) {
9991 struct tg3_napi *tnapi = &tp->napi[i];
9992 err = tg3_request_irq(tp, i);
9993 if (err) {
5bc09186
MC
9994 for (i--; i >= 0; i--) {
9995 tnapi = &tp->napi[i];
4f125f42 9996 free_irq(tnapi->irq_vec, tnapi);
5bc09186
MC
9997 }
9998 goto err_out2;
4f125f42
MC
9999 }
10000 }
1da177e4 10001
f47c11ee 10002 tg3_full_lock(tp, 0);
1da177e4 10003
8e7a22e3 10004 err = tg3_init_hw(tp, 1);
1da177e4 10005 if (err) {
944d980e 10006 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
1da177e4
LT
10007 tg3_free_rings(tp);
10008 } else {
0e6cf6a9 10009 if (tg3_flag(tp, TAGGED_STATUS) &&
55086ad9
MC
10010 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
10011 !tg3_flag(tp, 57765_CLASS))
fac9b83e
DM
10012 tp->timer_offset = HZ;
10013 else
10014 tp->timer_offset = HZ / 10;
10015
10016 BUG_ON(tp->timer_offset > HZ);
10017 tp->timer_counter = tp->timer_multiplier =
10018 (HZ / tp->timer_offset);
10019 tp->asf_counter = tp->asf_multiplier =
28fbef78 10020 ((HZ / tp->timer_offset) * 2);
1da177e4
LT
10021
10022 init_timer(&tp->timer);
10023 tp->timer.expires = jiffies + tp->timer_offset;
10024 tp->timer.data = (unsigned long) tp;
10025 tp->timer.function = tg3_timer;
1da177e4
LT
10026 }
10027
f47c11ee 10028 tg3_full_unlock(tp);
1da177e4 10029
07b0173c 10030 if (err)
679563f4 10031 goto err_out3;
1da177e4 10032
63c3a66f 10033 if (tg3_flag(tp, USING_MSI)) {
7938109f 10034 err = tg3_test_msi(tp);
fac9b83e 10035
7938109f 10036 if (err) {
f47c11ee 10037 tg3_full_lock(tp, 0);
944d980e 10038 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7938109f 10039 tg3_free_rings(tp);
f47c11ee 10040 tg3_full_unlock(tp);
7938109f 10041
679563f4 10042 goto err_out2;
7938109f 10043 }
fcfa0a32 10044
63c3a66f 10045 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
f6eb9b1f 10046 u32 val = tr32(PCIE_TRANSACTION_CFG);
fcfa0a32 10047
f6eb9b1f
MC
10048 tw32(PCIE_TRANSACTION_CFG,
10049 val | PCIE_TRANS_CFG_1SHOT_MSI);
fcfa0a32 10050 }
7938109f
MC
10051 }
10052
b02fd9e3
MC
10053 tg3_phy_start(tp);
10054
f47c11ee 10055 tg3_full_lock(tp, 0);
1da177e4 10056
7938109f 10057 add_timer(&tp->timer);
63c3a66f 10058 tg3_flag_set(tp, INIT_COMPLETE);
1da177e4
LT
10059 tg3_enable_ints(tp);
10060
f47c11ee 10061 tg3_full_unlock(tp);
1da177e4 10062
fe5f5787 10063 netif_tx_start_all_queues(dev);
1da177e4 10064
06c03c02
MB
10065 /*
10066 * Reset loopback feature if it was turned on while the device was down
10067 * make sure that it's installed properly now.
10068 */
10069 if (dev->features & NETIF_F_LOOPBACK)
10070 tg3_set_loopback(dev, dev->features);
10071
1da177e4 10072 return 0;
07b0173c 10073
679563f4 10074err_out3:
4f125f42
MC
10075 for (i = tp->irq_cnt - 1; i >= 0; i--) {
10076 struct tg3_napi *tnapi = &tp->napi[i];
10077 free_irq(tnapi->irq_vec, tnapi);
10078 }
07b0173c 10079
679563f4 10080err_out2:
fed97810 10081 tg3_napi_disable(tp);
66cfd1bd 10082 tg3_napi_fini(tp);
07b0173c 10083 tg3_free_consistent(tp);
679563f4
MC
10084
10085err_out1:
10086 tg3_ints_fini(tp);
cd0d7228
MC
10087 tg3_frob_aux_power(tp, false);
10088 pci_set_power_state(tp->pdev, PCI_D3hot);
07b0173c 10089 return err;
1da177e4
LT
10090}
10091
1da177e4
LT
10092static int tg3_close(struct net_device *dev)
10093{
4f125f42 10094 int i;
1da177e4
LT
10095 struct tg3 *tp = netdev_priv(dev);
10096
fed97810 10097 tg3_napi_disable(tp);
db219973 10098 tg3_reset_task_cancel(tp);
7faa006f 10099
fe5f5787 10100 netif_tx_stop_all_queues(dev);
1da177e4
LT
10101
10102 del_timer_sync(&tp->timer);
10103
24bb4fb6
MC
10104 tg3_phy_stop(tp);
10105
f47c11ee 10106 tg3_full_lock(tp, 1);
1da177e4
LT
10107
10108 tg3_disable_ints(tp);
10109
944d980e 10110 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
1da177e4 10111 tg3_free_rings(tp);
63c3a66f 10112 tg3_flag_clear(tp, INIT_COMPLETE);
1da177e4 10113
f47c11ee 10114 tg3_full_unlock(tp);
1da177e4 10115
4f125f42
MC
10116 for (i = tp->irq_cnt - 1; i >= 0; i--) {
10117 struct tg3_napi *tnapi = &tp->napi[i];
10118 free_irq(tnapi->irq_vec, tnapi);
10119 }
07b0173c
MC
10120
10121 tg3_ints_fini(tp);
1da177e4 10122
92feeabf
MC
10123 /* Clear stats across close / open calls */
10124 memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
10125 memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
1da177e4 10126
66cfd1bd
MC
10127 tg3_napi_fini(tp);
10128
1da177e4
LT
10129 tg3_free_consistent(tp);
10130
c866b7ea 10131 tg3_power_down(tp);
bc1c7567
MC
10132
10133 netif_carrier_off(tp->dev);
10134
1da177e4
LT
10135 return 0;
10136}
10137
511d2224 10138static inline u64 get_stat64(tg3_stat64_t *val)
816f8b86
SB
10139{
10140 return ((u64)val->high << 32) | ((u64)val->low);
10141}
10142
511d2224 10143static u64 calc_crc_errors(struct tg3 *tp)
1da177e4
LT
10144{
10145 struct tg3_hw_stats *hw_stats = tp->hw_stats;
10146
f07e9af3 10147 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
1da177e4
LT
10148 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10149 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1da177e4
LT
10150 u32 val;
10151
f47c11ee 10152 spin_lock_bh(&tp->lock);
569a5df8
MC
10153 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
10154 tg3_writephy(tp, MII_TG3_TEST1,
10155 val | MII_TG3_TEST1_CRC_EN);
f08aa1a8 10156 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
1da177e4
LT
10157 } else
10158 val = 0;
f47c11ee 10159 spin_unlock_bh(&tp->lock);
1da177e4
LT
10160
10161 tp->phy_crc_errors += val;
10162
10163 return tp->phy_crc_errors;
10164 }
10165
10166 return get_stat64(&hw_stats->rx_fcs_errors);
10167}
10168
10169#define ESTAT_ADD(member) \
10170 estats->member = old_estats->member + \
511d2224 10171 get_stat64(&hw_stats->member)
1da177e4 10172
0e6c9da3
MC
10173static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp,
10174 struct tg3_ethtool_stats *estats)
1da177e4 10175{
1da177e4
LT
10176 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
10177 struct tg3_hw_stats *hw_stats = tp->hw_stats;
10178
1da177e4
LT
10179 ESTAT_ADD(rx_octets);
10180 ESTAT_ADD(rx_fragments);
10181 ESTAT_ADD(rx_ucast_packets);
10182 ESTAT_ADD(rx_mcast_packets);
10183 ESTAT_ADD(rx_bcast_packets);
10184 ESTAT_ADD(rx_fcs_errors);
10185 ESTAT_ADD(rx_align_errors);
10186 ESTAT_ADD(rx_xon_pause_rcvd);
10187 ESTAT_ADD(rx_xoff_pause_rcvd);
10188 ESTAT_ADD(rx_mac_ctrl_rcvd);
10189 ESTAT_ADD(rx_xoff_entered);
10190 ESTAT_ADD(rx_frame_too_long_errors);
10191 ESTAT_ADD(rx_jabbers);
10192 ESTAT_ADD(rx_undersize_packets);
10193 ESTAT_ADD(rx_in_length_errors);
10194 ESTAT_ADD(rx_out_length_errors);
10195 ESTAT_ADD(rx_64_or_less_octet_packets);
10196 ESTAT_ADD(rx_65_to_127_octet_packets);
10197 ESTAT_ADD(rx_128_to_255_octet_packets);
10198 ESTAT_ADD(rx_256_to_511_octet_packets);
10199 ESTAT_ADD(rx_512_to_1023_octet_packets);
10200 ESTAT_ADD(rx_1024_to_1522_octet_packets);
10201 ESTAT_ADD(rx_1523_to_2047_octet_packets);
10202 ESTAT_ADD(rx_2048_to_4095_octet_packets);
10203 ESTAT_ADD(rx_4096_to_8191_octet_packets);
10204 ESTAT_ADD(rx_8192_to_9022_octet_packets);
10205
10206 ESTAT_ADD(tx_octets);
10207 ESTAT_ADD(tx_collisions);
10208 ESTAT_ADD(tx_xon_sent);
10209 ESTAT_ADD(tx_xoff_sent);
10210 ESTAT_ADD(tx_flow_control);
10211 ESTAT_ADD(tx_mac_errors);
10212 ESTAT_ADD(tx_single_collisions);
10213 ESTAT_ADD(tx_mult_collisions);
10214 ESTAT_ADD(tx_deferred);
10215 ESTAT_ADD(tx_excessive_collisions);
10216 ESTAT_ADD(tx_late_collisions);
10217 ESTAT_ADD(tx_collide_2times);
10218 ESTAT_ADD(tx_collide_3times);
10219 ESTAT_ADD(tx_collide_4times);
10220 ESTAT_ADD(tx_collide_5times);
10221 ESTAT_ADD(tx_collide_6times);
10222 ESTAT_ADD(tx_collide_7times);
10223 ESTAT_ADD(tx_collide_8times);
10224 ESTAT_ADD(tx_collide_9times);
10225 ESTAT_ADD(tx_collide_10times);
10226 ESTAT_ADD(tx_collide_11times);
10227 ESTAT_ADD(tx_collide_12times);
10228 ESTAT_ADD(tx_collide_13times);
10229 ESTAT_ADD(tx_collide_14times);
10230 ESTAT_ADD(tx_collide_15times);
10231 ESTAT_ADD(tx_ucast_packets);
10232 ESTAT_ADD(tx_mcast_packets);
10233 ESTAT_ADD(tx_bcast_packets);
10234 ESTAT_ADD(tx_carrier_sense_errors);
10235 ESTAT_ADD(tx_discards);
10236 ESTAT_ADD(tx_errors);
10237
10238 ESTAT_ADD(dma_writeq_full);
10239 ESTAT_ADD(dma_write_prioq_full);
10240 ESTAT_ADD(rxbds_empty);
10241 ESTAT_ADD(rx_discards);
10242 ESTAT_ADD(rx_errors);
10243 ESTAT_ADD(rx_threshold_hit);
10244
10245 ESTAT_ADD(dma_readq_full);
10246 ESTAT_ADD(dma_read_prioq_full);
10247 ESTAT_ADD(tx_comp_queue_full);
10248
10249 ESTAT_ADD(ring_set_send_prod_index);
10250 ESTAT_ADD(ring_status_update);
10251 ESTAT_ADD(nic_irqs);
10252 ESTAT_ADD(nic_avoided_irqs);
10253 ESTAT_ADD(nic_tx_threshold_hit);
10254
4452d099
MC
10255 ESTAT_ADD(mbuf_lwm_thresh_hit);
10256
1da177e4
LT
10257 return estats;
10258}
10259
511d2224
ED
10260static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
10261 struct rtnl_link_stats64 *stats)
1da177e4
LT
10262{
10263 struct tg3 *tp = netdev_priv(dev);
511d2224 10264 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
1da177e4
LT
10265 struct tg3_hw_stats *hw_stats = tp->hw_stats;
10266
10267 if (!hw_stats)
10268 return old_stats;
10269
10270 stats->rx_packets = old_stats->rx_packets +
10271 get_stat64(&hw_stats->rx_ucast_packets) +
10272 get_stat64(&hw_stats->rx_mcast_packets) +
10273 get_stat64(&hw_stats->rx_bcast_packets);
6aa20a22 10274
1da177e4
LT
10275 stats->tx_packets = old_stats->tx_packets +
10276 get_stat64(&hw_stats->tx_ucast_packets) +
10277 get_stat64(&hw_stats->tx_mcast_packets) +
10278 get_stat64(&hw_stats->tx_bcast_packets);
10279
10280 stats->rx_bytes = old_stats->rx_bytes +
10281 get_stat64(&hw_stats->rx_octets);
10282 stats->tx_bytes = old_stats->tx_bytes +
10283 get_stat64(&hw_stats->tx_octets);
10284
10285 stats->rx_errors = old_stats->rx_errors +
4f63b877 10286 get_stat64(&hw_stats->rx_errors);
1da177e4
LT
10287 stats->tx_errors = old_stats->tx_errors +
10288 get_stat64(&hw_stats->tx_errors) +
10289 get_stat64(&hw_stats->tx_mac_errors) +
10290 get_stat64(&hw_stats->tx_carrier_sense_errors) +
10291 get_stat64(&hw_stats->tx_discards);
10292
10293 stats->multicast = old_stats->multicast +
10294 get_stat64(&hw_stats->rx_mcast_packets);
10295 stats->collisions = old_stats->collisions +
10296 get_stat64(&hw_stats->tx_collisions);
10297
10298 stats->rx_length_errors = old_stats->rx_length_errors +
10299 get_stat64(&hw_stats->rx_frame_too_long_errors) +
10300 get_stat64(&hw_stats->rx_undersize_packets);
10301
10302 stats->rx_over_errors = old_stats->rx_over_errors +
10303 get_stat64(&hw_stats->rxbds_empty);
10304 stats->rx_frame_errors = old_stats->rx_frame_errors +
10305 get_stat64(&hw_stats->rx_align_errors);
10306 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
10307 get_stat64(&hw_stats->tx_discards);
10308 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
10309 get_stat64(&hw_stats->tx_carrier_sense_errors);
10310
10311 stats->rx_crc_errors = old_stats->rx_crc_errors +
10312 calc_crc_errors(tp);
10313
4f63b877
JL
10314 stats->rx_missed_errors = old_stats->rx_missed_errors +
10315 get_stat64(&hw_stats->rx_discards);
10316
b0057c51 10317 stats->rx_dropped = tp->rx_dropped;
48855432 10318 stats->tx_dropped = tp->tx_dropped;
b0057c51 10319
1da177e4
LT
10320 return stats;
10321}
10322
1da177e4
LT
10323static int tg3_get_regs_len(struct net_device *dev)
10324{
97bd8e49 10325 return TG3_REG_BLK_SIZE;
1da177e4
LT
10326}
10327
10328static void tg3_get_regs(struct net_device *dev,
10329 struct ethtool_regs *regs, void *_p)
10330{
1da177e4 10331 struct tg3 *tp = netdev_priv(dev);
1da177e4
LT
10332
10333 regs->version = 0;
10334
97bd8e49 10335 memset(_p, 0, TG3_REG_BLK_SIZE);
1da177e4 10336
80096068 10337 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
bc1c7567
MC
10338 return;
10339
f47c11ee 10340 tg3_full_lock(tp, 0);
1da177e4 10341
97bd8e49 10342 tg3_dump_legacy_regs(tp, (u32 *)_p);
1da177e4 10343
f47c11ee 10344 tg3_full_unlock(tp);
1da177e4
LT
10345}
10346
10347static int tg3_get_eeprom_len(struct net_device *dev)
10348{
10349 struct tg3 *tp = netdev_priv(dev);
10350
10351 return tp->nvram_size;
10352}
10353
1da177e4
LT
10354static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10355{
10356 struct tg3 *tp = netdev_priv(dev);
10357 int ret;
10358 u8 *pd;
b9fc7dc5 10359 u32 i, offset, len, b_offset, b_count;
a9dc529d 10360 __be32 val;
1da177e4 10361
63c3a66f 10362 if (tg3_flag(tp, NO_NVRAM))
df259d8c
MC
10363 return -EINVAL;
10364
80096068 10365 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
bc1c7567
MC
10366 return -EAGAIN;
10367
1da177e4
LT
10368 offset = eeprom->offset;
10369 len = eeprom->len;
10370 eeprom->len = 0;
10371
10372 eeprom->magic = TG3_EEPROM_MAGIC;
10373
10374 if (offset & 3) {
10375 /* adjustments to start on required 4 byte boundary */
10376 b_offset = offset & 3;
10377 b_count = 4 - b_offset;
10378 if (b_count > len) {
10379 /* i.e. offset=1 len=2 */
10380 b_count = len;
10381 }
a9dc529d 10382 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
1da177e4
LT
10383 if (ret)
10384 return ret;
be98da6a 10385 memcpy(data, ((char *)&val) + b_offset, b_count);
1da177e4
LT
10386 len -= b_count;
10387 offset += b_count;
c6cdf436 10388 eeprom->len += b_count;
1da177e4
LT
10389 }
10390
25985edc 10391 /* read bytes up to the last 4 byte boundary */
1da177e4
LT
10392 pd = &data[eeprom->len];
10393 for (i = 0; i < (len - (len & 3)); i += 4) {
a9dc529d 10394 ret = tg3_nvram_read_be32(tp, offset + i, &val);
1da177e4
LT
10395 if (ret) {
10396 eeprom->len += i;
10397 return ret;
10398 }
1da177e4
LT
10399 memcpy(pd + i, &val, 4);
10400 }
10401 eeprom->len += i;
10402
10403 if (len & 3) {
10404 /* read last bytes not ending on 4 byte boundary */
10405 pd = &data[eeprom->len];
10406 b_count = len & 3;
10407 b_offset = offset + len - b_count;
a9dc529d 10408 ret = tg3_nvram_read_be32(tp, b_offset, &val);
1da177e4
LT
10409 if (ret)
10410 return ret;
b9fc7dc5 10411 memcpy(pd, &val, b_count);
1da177e4
LT
10412 eeprom->len += b_count;
10413 }
10414 return 0;
10415}
10416
1da177e4
LT
10417static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10418{
10419 struct tg3 *tp = netdev_priv(dev);
10420 int ret;
b9fc7dc5 10421 u32 offset, len, b_offset, odd_len;
1da177e4 10422 u8 *buf;
a9dc529d 10423 __be32 start, end;
1da177e4 10424
80096068 10425 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
bc1c7567
MC
10426 return -EAGAIN;
10427
63c3a66f 10428 if (tg3_flag(tp, NO_NVRAM) ||
df259d8c 10429 eeprom->magic != TG3_EEPROM_MAGIC)
1da177e4
LT
10430 return -EINVAL;
10431
10432 offset = eeprom->offset;
10433 len = eeprom->len;
10434
10435 if ((b_offset = (offset & 3))) {
10436 /* adjustments to start on required 4 byte boundary */
a9dc529d 10437 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
1da177e4
LT
10438 if (ret)
10439 return ret;
1da177e4
LT
10440 len += b_offset;
10441 offset &= ~3;
1c8594b4
MC
10442 if (len < 4)
10443 len = 4;
1da177e4
LT
10444 }
10445
10446 odd_len = 0;
1c8594b4 10447 if (len & 3) {
1da177e4
LT
10448 /* adjustments to end on required 4 byte boundary */
10449 odd_len = 1;
10450 len = (len + 3) & ~3;
a9dc529d 10451 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
1da177e4
LT
10452 if (ret)
10453 return ret;
1da177e4
LT
10454 }
10455
10456 buf = data;
10457 if (b_offset || odd_len) {
10458 buf = kmalloc(len, GFP_KERNEL);
ab0049b4 10459 if (!buf)
1da177e4
LT
10460 return -ENOMEM;
10461 if (b_offset)
10462 memcpy(buf, &start, 4);
10463 if (odd_len)
10464 memcpy(buf+len-4, &end, 4);
10465 memcpy(buf + b_offset, data, eeprom->len);
10466 }
10467
10468 ret = tg3_nvram_write_block(tp, offset, len, buf);
10469
10470 if (buf != data)
10471 kfree(buf);
10472
10473 return ret;
10474}
10475
10476static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10477{
b02fd9e3
MC
10478 struct tg3 *tp = netdev_priv(dev);
10479
63c3a66f 10480 if (tg3_flag(tp, USE_PHYLIB)) {
3f0e3ad7 10481 struct phy_device *phydev;
f07e9af3 10482 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
b02fd9e3 10483 return -EAGAIN;
3f0e3ad7
MC
10484 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10485 return phy_ethtool_gset(phydev, cmd);
b02fd9e3 10486 }
6aa20a22 10487
1da177e4
LT
10488 cmd->supported = (SUPPORTED_Autoneg);
10489
f07e9af3 10490 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
1da177e4
LT
10491 cmd->supported |= (SUPPORTED_1000baseT_Half |
10492 SUPPORTED_1000baseT_Full);
10493
f07e9af3 10494 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
1da177e4
LT
10495 cmd->supported |= (SUPPORTED_100baseT_Half |
10496 SUPPORTED_100baseT_Full |
10497 SUPPORTED_10baseT_Half |
10498 SUPPORTED_10baseT_Full |
3bebab59 10499 SUPPORTED_TP);
ef348144
KK
10500 cmd->port = PORT_TP;
10501 } else {
1da177e4 10502 cmd->supported |= SUPPORTED_FIBRE;
ef348144
KK
10503 cmd->port = PORT_FIBRE;
10504 }
6aa20a22 10505
1da177e4 10506 cmd->advertising = tp->link_config.advertising;
5bb09778
MC
10507 if (tg3_flag(tp, PAUSE_AUTONEG)) {
10508 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
10509 if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10510 cmd->advertising |= ADVERTISED_Pause;
10511 } else {
10512 cmd->advertising |= ADVERTISED_Pause |
10513 ADVERTISED_Asym_Pause;
10514 }
10515 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10516 cmd->advertising |= ADVERTISED_Asym_Pause;
10517 }
10518 }
859edb26 10519 if (netif_running(dev) && netif_carrier_ok(dev)) {
70739497 10520 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
1da177e4 10521 cmd->duplex = tp->link_config.active_duplex;
859edb26 10522 cmd->lp_advertising = tp->link_config.rmt_adv;
e348c5e7
MC
10523 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10524 if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
10525 cmd->eth_tp_mdix = ETH_TP_MDI_X;
10526 else
10527 cmd->eth_tp_mdix = ETH_TP_MDI;
10528 }
64c22182 10529 } else {
70739497 10530 ethtool_cmd_speed_set(cmd, SPEED_INVALID);
64c22182 10531 cmd->duplex = DUPLEX_INVALID;
e348c5e7 10532 cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
1da177e4 10533 }
882e9793 10534 cmd->phy_address = tp->phy_addr;
7e5856bd 10535 cmd->transceiver = XCVR_INTERNAL;
1da177e4
LT
10536 cmd->autoneg = tp->link_config.autoneg;
10537 cmd->maxtxpkt = 0;
10538 cmd->maxrxpkt = 0;
10539 return 0;
10540}
6aa20a22 10541
1da177e4
LT
10542static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10543{
10544 struct tg3 *tp = netdev_priv(dev);
25db0338 10545 u32 speed = ethtool_cmd_speed(cmd);
6aa20a22 10546
63c3a66f 10547 if (tg3_flag(tp, USE_PHYLIB)) {
3f0e3ad7 10548 struct phy_device *phydev;
f07e9af3 10549 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
b02fd9e3 10550 return -EAGAIN;
3f0e3ad7
MC
10551 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10552 return phy_ethtool_sset(phydev, cmd);
b02fd9e3
MC
10553 }
10554
7e5856bd
MC
10555 if (cmd->autoneg != AUTONEG_ENABLE &&
10556 cmd->autoneg != AUTONEG_DISABLE)
37ff238d 10557 return -EINVAL;
7e5856bd
MC
10558
10559 if (cmd->autoneg == AUTONEG_DISABLE &&
10560 cmd->duplex != DUPLEX_FULL &&
10561 cmd->duplex != DUPLEX_HALF)
37ff238d 10562 return -EINVAL;
1da177e4 10563
7e5856bd
MC
10564 if (cmd->autoneg == AUTONEG_ENABLE) {
10565 u32 mask = ADVERTISED_Autoneg |
10566 ADVERTISED_Pause |
10567 ADVERTISED_Asym_Pause;
10568
f07e9af3 10569 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
7e5856bd
MC
10570 mask |= ADVERTISED_1000baseT_Half |
10571 ADVERTISED_1000baseT_Full;
10572
f07e9af3 10573 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
7e5856bd
MC
10574 mask |= ADVERTISED_100baseT_Half |
10575 ADVERTISED_100baseT_Full |
10576 ADVERTISED_10baseT_Half |
10577 ADVERTISED_10baseT_Full |
10578 ADVERTISED_TP;
10579 else
10580 mask |= ADVERTISED_FIBRE;
10581
10582 if (cmd->advertising & ~mask)
10583 return -EINVAL;
10584
10585 mask &= (ADVERTISED_1000baseT_Half |
10586 ADVERTISED_1000baseT_Full |
10587 ADVERTISED_100baseT_Half |
10588 ADVERTISED_100baseT_Full |
10589 ADVERTISED_10baseT_Half |
10590 ADVERTISED_10baseT_Full);
10591
10592 cmd->advertising &= mask;
10593 } else {
f07e9af3 10594 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
25db0338 10595 if (speed != SPEED_1000)
7e5856bd
MC
10596 return -EINVAL;
10597
10598 if (cmd->duplex != DUPLEX_FULL)
10599 return -EINVAL;
10600 } else {
25db0338
DD
10601 if (speed != SPEED_100 &&
10602 speed != SPEED_10)
7e5856bd
MC
10603 return -EINVAL;
10604 }
10605 }
10606
f47c11ee 10607 tg3_full_lock(tp, 0);
1da177e4
LT
10608
10609 tp->link_config.autoneg = cmd->autoneg;
10610 if (cmd->autoneg == AUTONEG_ENABLE) {
405d8e5c
AG
10611 tp->link_config.advertising = (cmd->advertising |
10612 ADVERTISED_Autoneg);
1da177e4
LT
10613 tp->link_config.speed = SPEED_INVALID;
10614 tp->link_config.duplex = DUPLEX_INVALID;
10615 } else {
10616 tp->link_config.advertising = 0;
25db0338 10617 tp->link_config.speed = speed;
1da177e4 10618 tp->link_config.duplex = cmd->duplex;
b02fd9e3 10619 }
6aa20a22 10620
24fcad6b
MC
10621 tp->link_config.orig_speed = tp->link_config.speed;
10622 tp->link_config.orig_duplex = tp->link_config.duplex;
10623 tp->link_config.orig_autoneg = tp->link_config.autoneg;
10624
1da177e4
LT
10625 if (netif_running(dev))
10626 tg3_setup_phy(tp, 1);
10627
f47c11ee 10628 tg3_full_unlock(tp);
6aa20a22 10629
1da177e4
LT
10630 return 0;
10631}
6aa20a22 10632
1da177e4
LT
10633static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
10634{
10635 struct tg3 *tp = netdev_priv(dev);
6aa20a22 10636
68aad78c
RJ
10637 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
10638 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
10639 strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
10640 strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
1da177e4 10641}
6aa20a22 10642
1da177e4
LT
10643static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10644{
10645 struct tg3 *tp = netdev_priv(dev);
6aa20a22 10646
63c3a66f 10647 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
a85feb8c
GZ
10648 wol->supported = WAKE_MAGIC;
10649 else
10650 wol->supported = 0;
1da177e4 10651 wol->wolopts = 0;
63c3a66f 10652 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
1da177e4
LT
10653 wol->wolopts = WAKE_MAGIC;
10654 memset(&wol->sopass, 0, sizeof(wol->sopass));
10655}
6aa20a22 10656
1da177e4
LT
10657static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10658{
10659 struct tg3 *tp = netdev_priv(dev);
12dac075 10660 struct device *dp = &tp->pdev->dev;
6aa20a22 10661
1da177e4
LT
10662 if (wol->wolopts & ~WAKE_MAGIC)
10663 return -EINVAL;
10664 if ((wol->wolopts & WAKE_MAGIC) &&
63c3a66f 10665 !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
1da177e4 10666 return -EINVAL;
6aa20a22 10667
f2dc0d18
RW
10668 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
10669
f47c11ee 10670 spin_lock_bh(&tp->lock);
f2dc0d18 10671 if (device_may_wakeup(dp))
63c3a66f 10672 tg3_flag_set(tp, WOL_ENABLE);
f2dc0d18 10673 else
63c3a66f 10674 tg3_flag_clear(tp, WOL_ENABLE);
f47c11ee 10675 spin_unlock_bh(&tp->lock);
6aa20a22 10676
1da177e4
LT
10677 return 0;
10678}
6aa20a22 10679
1da177e4
LT
10680static u32 tg3_get_msglevel(struct net_device *dev)
10681{
10682 struct tg3 *tp = netdev_priv(dev);
10683 return tp->msg_enable;
10684}
6aa20a22 10685
1da177e4
LT
10686static void tg3_set_msglevel(struct net_device *dev, u32 value)
10687{
10688 struct tg3 *tp = netdev_priv(dev);
10689 tp->msg_enable = value;
10690}
6aa20a22 10691
1da177e4
LT
10692static int tg3_nway_reset(struct net_device *dev)
10693{
10694 struct tg3 *tp = netdev_priv(dev);
1da177e4 10695 int r;
6aa20a22 10696
1da177e4
LT
10697 if (!netif_running(dev))
10698 return -EAGAIN;
10699
f07e9af3 10700 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
c94e3941
MC
10701 return -EINVAL;
10702
63c3a66f 10703 if (tg3_flag(tp, USE_PHYLIB)) {
f07e9af3 10704 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
b02fd9e3 10705 return -EAGAIN;
3f0e3ad7 10706 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
b02fd9e3
MC
10707 } else {
10708 u32 bmcr;
10709
10710 spin_lock_bh(&tp->lock);
10711 r = -EINVAL;
10712 tg3_readphy(tp, MII_BMCR, &bmcr);
10713 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
10714 ((bmcr & BMCR_ANENABLE) ||
f07e9af3 10715 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
b02fd9e3
MC
10716 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
10717 BMCR_ANENABLE);
10718 r = 0;
10719 }
10720 spin_unlock_bh(&tp->lock);
1da177e4 10721 }
6aa20a22 10722
1da177e4
LT
10723 return r;
10724}
6aa20a22 10725
1da177e4
LT
10726static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10727{
10728 struct tg3 *tp = netdev_priv(dev);
6aa20a22 10729
2c49a44d 10730 ering->rx_max_pending = tp->rx_std_ring_mask;
63c3a66f 10731 if (tg3_flag(tp, JUMBO_RING_ENABLE))
2c49a44d 10732 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
4f81c32b
MC
10733 else
10734 ering->rx_jumbo_max_pending = 0;
10735
10736 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
1da177e4
LT
10737
10738 ering->rx_pending = tp->rx_pending;
63c3a66f 10739 if (tg3_flag(tp, JUMBO_RING_ENABLE))
4f81c32b
MC
10740 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
10741 else
10742 ering->rx_jumbo_pending = 0;
10743
f3f3f27e 10744 ering->tx_pending = tp->napi[0].tx_pending;
1da177e4 10745}
6aa20a22 10746
1da177e4
LT
10747static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10748{
10749 struct tg3 *tp = netdev_priv(dev);
646c9edd 10750 int i, irq_sync = 0, err = 0;
6aa20a22 10751
2c49a44d
MC
10752 if ((ering->rx_pending > tp->rx_std_ring_mask) ||
10753 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
bc3a9254
MC
10754 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
10755 (ering->tx_pending <= MAX_SKB_FRAGS) ||
63c3a66f 10756 (tg3_flag(tp, TSO_BUG) &&
bc3a9254 10757 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
1da177e4 10758 return -EINVAL;
6aa20a22 10759
bbe832c0 10760 if (netif_running(dev)) {
b02fd9e3 10761 tg3_phy_stop(tp);
1da177e4 10762 tg3_netif_stop(tp);
bbe832c0
MC
10763 irq_sync = 1;
10764 }
1da177e4 10765
bbe832c0 10766 tg3_full_lock(tp, irq_sync);
6aa20a22 10767
1da177e4
LT
10768 tp->rx_pending = ering->rx_pending;
10769
63c3a66f 10770 if (tg3_flag(tp, MAX_RXPEND_64) &&
1da177e4
LT
10771 tp->rx_pending > 63)
10772 tp->rx_pending = 63;
10773 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
646c9edd 10774
6fd45cb8 10775 for (i = 0; i < tp->irq_max; i++)
646c9edd 10776 tp->napi[i].tx_pending = ering->tx_pending;
1da177e4
LT
10777
10778 if (netif_running(dev)) {
944d980e 10779 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
b9ec6c1b
MC
10780 err = tg3_restart_hw(tp, 1);
10781 if (!err)
10782 tg3_netif_start(tp);
1da177e4
LT
10783 }
10784
f47c11ee 10785 tg3_full_unlock(tp);
6aa20a22 10786
b02fd9e3
MC
10787 if (irq_sync && !err)
10788 tg3_phy_start(tp);
10789
b9ec6c1b 10790 return err;
1da177e4 10791}
6aa20a22 10792
1da177e4
LT
10793static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10794{
10795 struct tg3 *tp = netdev_priv(dev);
6aa20a22 10796
63c3a66f 10797 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
8d018621 10798
4a2db503 10799 if (tp->link_config.flowctrl & FLOW_CTRL_RX)
8d018621
MC
10800 epause->rx_pause = 1;
10801 else
10802 epause->rx_pause = 0;
10803
4a2db503 10804 if (tp->link_config.flowctrl & FLOW_CTRL_TX)
8d018621
MC
10805 epause->tx_pause = 1;
10806 else
10807 epause->tx_pause = 0;
1da177e4 10808}
6aa20a22 10809
1da177e4
LT
10810static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10811{
10812 struct tg3 *tp = netdev_priv(dev);
b02fd9e3 10813 int err = 0;
6aa20a22 10814
63c3a66f 10815 if (tg3_flag(tp, USE_PHYLIB)) {
2712168f
MC
10816 u32 newadv;
10817 struct phy_device *phydev;
1da177e4 10818
2712168f 10819 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
f47c11ee 10820
2712168f
MC
10821 if (!(phydev->supported & SUPPORTED_Pause) ||
10822 (!(phydev->supported & SUPPORTED_Asym_Pause) &&
2259dca3 10823 (epause->rx_pause != epause->tx_pause)))
2712168f 10824 return -EINVAL;
1da177e4 10825
2712168f
MC
10826 tp->link_config.flowctrl = 0;
10827 if (epause->rx_pause) {
10828 tp->link_config.flowctrl |= FLOW_CTRL_RX;
10829
10830 if (epause->tx_pause) {
10831 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10832 newadv = ADVERTISED_Pause;
b02fd9e3 10833 } else
2712168f
MC
10834 newadv = ADVERTISED_Pause |
10835 ADVERTISED_Asym_Pause;
10836 } else if (epause->tx_pause) {
10837 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10838 newadv = ADVERTISED_Asym_Pause;
10839 } else
10840 newadv = 0;
10841
10842 if (epause->autoneg)
63c3a66f 10843 tg3_flag_set(tp, PAUSE_AUTONEG);
2712168f 10844 else
63c3a66f 10845 tg3_flag_clear(tp, PAUSE_AUTONEG);
2712168f 10846
f07e9af3 10847 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2712168f
MC
10848 u32 oldadv = phydev->advertising &
10849 (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
10850 if (oldadv != newadv) {
10851 phydev->advertising &=
10852 ~(ADVERTISED_Pause |
10853 ADVERTISED_Asym_Pause);
10854 phydev->advertising |= newadv;
10855 if (phydev->autoneg) {
10856 /*
10857 * Always renegotiate the link to
10858 * inform our link partner of our
10859 * flow control settings, even if the
10860 * flow control is forced. Let
10861 * tg3_adjust_link() do the final
10862 * flow control setup.
10863 */
10864 return phy_start_aneg(phydev);
b02fd9e3 10865 }
b02fd9e3 10866 }
b02fd9e3 10867
2712168f 10868 if (!epause->autoneg)
b02fd9e3 10869 tg3_setup_flow_control(tp, 0, 0);
2712168f
MC
10870 } else {
10871 tp->link_config.orig_advertising &=
10872 ~(ADVERTISED_Pause |
10873 ADVERTISED_Asym_Pause);
10874 tp->link_config.orig_advertising |= newadv;
b02fd9e3
MC
10875 }
10876 } else {
10877 int irq_sync = 0;
10878
10879 if (netif_running(dev)) {
10880 tg3_netif_stop(tp);
10881 irq_sync = 1;
10882 }
10883
10884 tg3_full_lock(tp, irq_sync);
10885
10886 if (epause->autoneg)
63c3a66f 10887 tg3_flag_set(tp, PAUSE_AUTONEG);
b02fd9e3 10888 else
63c3a66f 10889 tg3_flag_clear(tp, PAUSE_AUTONEG);
b02fd9e3 10890 if (epause->rx_pause)
e18ce346 10891 tp->link_config.flowctrl |= FLOW_CTRL_RX;
b02fd9e3 10892 else
e18ce346 10893 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
b02fd9e3 10894 if (epause->tx_pause)
e18ce346 10895 tp->link_config.flowctrl |= FLOW_CTRL_TX;
b02fd9e3 10896 else
e18ce346 10897 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
b02fd9e3
MC
10898
10899 if (netif_running(dev)) {
10900 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10901 err = tg3_restart_hw(tp, 1);
10902 if (!err)
10903 tg3_netif_start(tp);
10904 }
10905
10906 tg3_full_unlock(tp);
10907 }
6aa20a22 10908
b9ec6c1b 10909 return err;
1da177e4 10910}
6aa20a22 10911
de6f31eb 10912static int tg3_get_sset_count(struct net_device *dev, int sset)
1da177e4 10913{
b9f2c044
JG
10914 switch (sset) {
10915 case ETH_SS_TEST:
10916 return TG3_NUM_TEST;
10917 case ETH_SS_STATS:
10918 return TG3_NUM_STATS;
10919 default:
10920 return -EOPNOTSUPP;
10921 }
4cafd3f5
MC
10922}
10923
90415477
MC
10924static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
10925 u32 *rules __always_unused)
10926{
10927 struct tg3 *tp = netdev_priv(dev);
10928
10929 if (!tg3_flag(tp, SUPPORT_MSIX))
10930 return -EOPNOTSUPP;
10931
10932 switch (info->cmd) {
10933 case ETHTOOL_GRXRINGS:
10934 if (netif_running(tp->dev))
10935 info->data = tp->irq_cnt;
10936 else {
10937 info->data = num_online_cpus();
10938 if (info->data > TG3_IRQ_MAX_VECS_RSS)
10939 info->data = TG3_IRQ_MAX_VECS_RSS;
10940 }
10941
10942 /* The first interrupt vector only
10943 * handles link interrupts.
10944 */
10945 info->data -= 1;
10946 return 0;
10947
10948 default:
10949 return -EOPNOTSUPP;
10950 }
10951}
10952
10953static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
10954{
10955 u32 size = 0;
10956 struct tg3 *tp = netdev_priv(dev);
10957
10958 if (tg3_flag(tp, SUPPORT_MSIX))
10959 size = TG3_RSS_INDIR_TBL_SIZE;
10960
10961 return size;
10962}
10963
10964static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
10965{
10966 struct tg3 *tp = netdev_priv(dev);
10967 int i;
10968
10969 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
10970 indir[i] = tp->rss_ind_tbl[i];
10971
10972 return 0;
10973}
10974
10975static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
10976{
10977 struct tg3 *tp = netdev_priv(dev);
10978 size_t i;
10979
10980 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
10981 tp->rss_ind_tbl[i] = indir[i];
10982
10983 if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
10984 return 0;
10985
10986 /* It is legal to write the indirection
10987 * table while the device is running.
10988 */
10989 tg3_full_lock(tp, 0);
10990 tg3_rss_write_indir_tbl(tp);
10991 tg3_full_unlock(tp);
10992
10993 return 0;
10994}
10995
de6f31eb 10996static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
1da177e4
LT
10997{
10998 switch (stringset) {
10999 case ETH_SS_STATS:
11000 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
11001 break;
4cafd3f5
MC
11002 case ETH_SS_TEST:
11003 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
11004 break;
1da177e4
LT
11005 default:
11006 WARN_ON(1); /* we need a WARN() */
11007 break;
11008 }
11009}
11010
81b8709c 11011static int tg3_set_phys_id(struct net_device *dev,
11012 enum ethtool_phys_id_state state)
4009a93d
MC
11013{
11014 struct tg3 *tp = netdev_priv(dev);
4009a93d
MC
11015
11016 if (!netif_running(tp->dev))
11017 return -EAGAIN;
11018
81b8709c 11019 switch (state) {
11020 case ETHTOOL_ID_ACTIVE:
fce55922 11021 return 1; /* cycle on/off once per second */
4009a93d 11022
81b8709c 11023 case ETHTOOL_ID_ON:
11024 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11025 LED_CTRL_1000MBPS_ON |
11026 LED_CTRL_100MBPS_ON |
11027 LED_CTRL_10MBPS_ON |
11028 LED_CTRL_TRAFFIC_OVERRIDE |
11029 LED_CTRL_TRAFFIC_BLINK |
11030 LED_CTRL_TRAFFIC_LED);
11031 break;
6aa20a22 11032
81b8709c 11033 case ETHTOOL_ID_OFF:
11034 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11035 LED_CTRL_TRAFFIC_OVERRIDE);
11036 break;
4009a93d 11037
81b8709c 11038 case ETHTOOL_ID_INACTIVE:
11039 tw32(MAC_LED_CTRL, tp->led_ctrl);
11040 break;
4009a93d 11041 }
81b8709c 11042
4009a93d
MC
11043 return 0;
11044}
11045
de6f31eb 11046static void tg3_get_ethtool_stats(struct net_device *dev,
1da177e4
LT
11047 struct ethtool_stats *estats, u64 *tmp_stats)
11048{
11049 struct tg3 *tp = netdev_priv(dev);
0e6c9da3 11050
b546e46f
MC
11051 if (tp->hw_stats)
11052 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
11053 else
11054 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
1da177e4
LT
11055}
11056
535a490e 11057static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
c3e94500
MC
11058{
11059 int i;
11060 __be32 *buf;
11061 u32 offset = 0, len = 0;
11062 u32 magic, val;
11063
63c3a66f 11064 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
c3e94500
MC
11065 return NULL;
11066
11067 if (magic == TG3_EEPROM_MAGIC) {
11068 for (offset = TG3_NVM_DIR_START;
11069 offset < TG3_NVM_DIR_END;
11070 offset += TG3_NVM_DIRENT_SIZE) {
11071 if (tg3_nvram_read(tp, offset, &val))
11072 return NULL;
11073
11074 if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
11075 TG3_NVM_DIRTYPE_EXTVPD)
11076 break;
11077 }
11078
11079 if (offset != TG3_NVM_DIR_END) {
11080 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
11081 if (tg3_nvram_read(tp, offset + 4, &offset))
11082 return NULL;
11083
11084 offset = tg3_nvram_logical_addr(tp, offset);
11085 }
11086 }
11087
11088 if (!offset || !len) {
11089 offset = TG3_NVM_VPD_OFF;
11090 len = TG3_NVM_VPD_LEN;
11091 }
11092
11093 buf = kmalloc(len, GFP_KERNEL);
11094 if (buf == NULL)
11095 return NULL;
11096
11097 if (magic == TG3_EEPROM_MAGIC) {
11098 for (i = 0; i < len; i += 4) {
11099 /* The data is in little-endian format in NVRAM.
11100 * Use the big-endian read routines to preserve
11101 * the byte order as it exists in NVRAM.
11102 */
11103 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
11104 goto error;
11105 }
11106 } else {
11107 u8 *ptr;
11108 ssize_t cnt;
11109 unsigned int pos = 0;
11110
11111 ptr = (u8 *)&buf[0];
11112 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
11113 cnt = pci_read_vpd(tp->pdev, pos,
11114 len - pos, ptr);
11115 if (cnt == -ETIMEDOUT || cnt == -EINTR)
11116 cnt = 0;
11117 else if (cnt < 0)
11118 goto error;
11119 }
11120 if (pos != len)
11121 goto error;
11122 }
11123
535a490e
MC
11124 *vpdlen = len;
11125
c3e94500
MC
11126 return buf;
11127
11128error:
11129 kfree(buf);
11130 return NULL;
11131}
11132
566f86ad 11133#define NVRAM_TEST_SIZE 0x100
a5767dec
MC
11134#define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
11135#define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
11136#define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
727a6d9f
MC
11137#define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
11138#define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
bda18faf 11139#define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
b16250e3
MC
11140#define NVRAM_SELFBOOT_HW_SIZE 0x20
11141#define NVRAM_SELFBOOT_DATA_SIZE 0x1c
566f86ad
MC
11142
11143static int tg3_test_nvram(struct tg3 *tp)
11144{
535a490e 11145 u32 csum, magic, len;
a9dc529d 11146 __be32 *buf;
ab0049b4 11147 int i, j, k, err = 0, size;
566f86ad 11148
63c3a66f 11149 if (tg3_flag(tp, NO_NVRAM))
df259d8c
MC
11150 return 0;
11151
e4f34110 11152 if (tg3_nvram_read(tp, 0, &magic) != 0)
1b27777a
MC
11153 return -EIO;
11154
1b27777a
MC
11155 if (magic == TG3_EEPROM_MAGIC)
11156 size = NVRAM_TEST_SIZE;
b16250e3 11157 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
a5767dec
MC
11158 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
11159 TG3_EEPROM_SB_FORMAT_1) {
11160 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
11161 case TG3_EEPROM_SB_REVISION_0:
11162 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
11163 break;
11164 case TG3_EEPROM_SB_REVISION_2:
11165 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
11166 break;
11167 case TG3_EEPROM_SB_REVISION_3:
11168 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
11169 break;
727a6d9f
MC
11170 case TG3_EEPROM_SB_REVISION_4:
11171 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
11172 break;
11173 case TG3_EEPROM_SB_REVISION_5:
11174 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
11175 break;
11176 case TG3_EEPROM_SB_REVISION_6:
11177 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
11178 break;
a5767dec 11179 default:
727a6d9f 11180 return -EIO;
a5767dec
MC
11181 }
11182 } else
1b27777a 11183 return 0;
b16250e3
MC
11184 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
11185 size = NVRAM_SELFBOOT_HW_SIZE;
11186 else
1b27777a
MC
11187 return -EIO;
11188
11189 buf = kmalloc(size, GFP_KERNEL);
566f86ad
MC
11190 if (buf == NULL)
11191 return -ENOMEM;
11192
1b27777a
MC
11193 err = -EIO;
11194 for (i = 0, j = 0; i < size; i += 4, j++) {
a9dc529d
MC
11195 err = tg3_nvram_read_be32(tp, i, &buf[j]);
11196 if (err)
566f86ad 11197 break;
566f86ad 11198 }
1b27777a 11199 if (i < size)
566f86ad
MC
11200 goto out;
11201
1b27777a 11202 /* Selfboot format */
a9dc529d 11203 magic = be32_to_cpu(buf[0]);
b9fc7dc5 11204 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
b16250e3 11205 TG3_EEPROM_MAGIC_FW) {
1b27777a
MC
11206 u8 *buf8 = (u8 *) buf, csum8 = 0;
11207
b9fc7dc5 11208 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
a5767dec
MC
11209 TG3_EEPROM_SB_REVISION_2) {
11210 /* For rev 2, the csum doesn't include the MBA. */
11211 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
11212 csum8 += buf8[i];
11213 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
11214 csum8 += buf8[i];
11215 } else {
11216 for (i = 0; i < size; i++)
11217 csum8 += buf8[i];
11218 }
1b27777a 11219
ad96b485
AB
11220 if (csum8 == 0) {
11221 err = 0;
11222 goto out;
11223 }
11224
11225 err = -EIO;
11226 goto out;
1b27777a 11227 }
566f86ad 11228
b9fc7dc5 11229 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
b16250e3
MC
11230 TG3_EEPROM_MAGIC_HW) {
11231 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
a9dc529d 11232 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
b16250e3 11233 u8 *buf8 = (u8 *) buf;
b16250e3
MC
11234
11235 /* Separate the parity bits and the data bytes. */
11236 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
11237 if ((i == 0) || (i == 8)) {
11238 int l;
11239 u8 msk;
11240
11241 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
11242 parity[k++] = buf8[i] & msk;
11243 i++;
859a5887 11244 } else if (i == 16) {
b16250e3
MC
11245 int l;
11246 u8 msk;
11247
11248 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
11249 parity[k++] = buf8[i] & msk;
11250 i++;
11251
11252 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
11253 parity[k++] = buf8[i] & msk;
11254 i++;
11255 }
11256 data[j++] = buf8[i];
11257 }
11258
11259 err = -EIO;
11260 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
11261 u8 hw8 = hweight8(data[i]);
11262
11263 if ((hw8 & 0x1) && parity[i])
11264 goto out;
11265 else if (!(hw8 & 0x1) && !parity[i])
11266 goto out;
11267 }
11268 err = 0;
11269 goto out;
11270 }
11271
01c3a392
MC
11272 err = -EIO;
11273
566f86ad
MC
11274 /* Bootstrap checksum at offset 0x10 */
11275 csum = calc_crc((unsigned char *) buf, 0x10);
01c3a392 11276 if (csum != le32_to_cpu(buf[0x10/4]))
566f86ad
MC
11277 goto out;
11278
11279 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
11280 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
01c3a392 11281 if (csum != le32_to_cpu(buf[0xfc/4]))
a9dc529d 11282 goto out;
566f86ad 11283
c3e94500
MC
11284 kfree(buf);
11285
535a490e 11286 buf = tg3_vpd_readblock(tp, &len);
c3e94500
MC
11287 if (!buf)
11288 return -ENOMEM;
d4894f3e 11289
535a490e 11290 i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
d4894f3e
MC
11291 if (i > 0) {
11292 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
11293 if (j < 0)
11294 goto out;
11295
535a490e 11296 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
d4894f3e
MC
11297 goto out;
11298
11299 i += PCI_VPD_LRDT_TAG_SIZE;
11300 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
11301 PCI_VPD_RO_KEYWORD_CHKSUM);
11302 if (j > 0) {
11303 u8 csum8 = 0;
11304
11305 j += PCI_VPD_INFO_FLD_HDR_SIZE;
11306
11307 for (i = 0; i <= j; i++)
11308 csum8 += ((u8 *)buf)[i];
11309
11310 if (csum8)
11311 goto out;
11312 }
11313 }
11314
566f86ad
MC
11315 err = 0;
11316
11317out:
11318 kfree(buf);
11319 return err;
11320}
11321
ca43007a
MC
11322#define TG3_SERDES_TIMEOUT_SEC 2
11323#define TG3_COPPER_TIMEOUT_SEC 6
11324
11325static int tg3_test_link(struct tg3 *tp)
11326{
11327 int i, max;
11328
11329 if (!netif_running(tp->dev))
11330 return -ENODEV;
11331
f07e9af3 11332 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
ca43007a
MC
11333 max = TG3_SERDES_TIMEOUT_SEC;
11334 else
11335 max = TG3_COPPER_TIMEOUT_SEC;
11336
11337 for (i = 0; i < max; i++) {
11338 if (netif_carrier_ok(tp->dev))
11339 return 0;
11340
11341 if (msleep_interruptible(1000))
11342 break;
11343 }
11344
11345 return -EIO;
11346}
11347
a71116d1 11348/* Only test the commonly used registers */
30ca3e37 11349static int tg3_test_registers(struct tg3 *tp)
a71116d1 11350{
b16250e3 11351 int i, is_5705, is_5750;
a71116d1
MC
11352 u32 offset, read_mask, write_mask, val, save_val, read_val;
11353 static struct {
11354 u16 offset;
11355 u16 flags;
11356#define TG3_FL_5705 0x1
11357#define TG3_FL_NOT_5705 0x2
11358#define TG3_FL_NOT_5788 0x4
b16250e3 11359#define TG3_FL_NOT_5750 0x8
a71116d1
MC
11360 u32 read_mask;
11361 u32 write_mask;
11362 } reg_tbl[] = {
11363 /* MAC Control Registers */
11364 { MAC_MODE, TG3_FL_NOT_5705,
11365 0x00000000, 0x00ef6f8c },
11366 { MAC_MODE, TG3_FL_5705,
11367 0x00000000, 0x01ef6b8c },
11368 { MAC_STATUS, TG3_FL_NOT_5705,
11369 0x03800107, 0x00000000 },
11370 { MAC_STATUS, TG3_FL_5705,
11371 0x03800100, 0x00000000 },
11372 { MAC_ADDR_0_HIGH, 0x0000,
11373 0x00000000, 0x0000ffff },
11374 { MAC_ADDR_0_LOW, 0x0000,
c6cdf436 11375 0x00000000, 0xffffffff },
a71116d1
MC
11376 { MAC_RX_MTU_SIZE, 0x0000,
11377 0x00000000, 0x0000ffff },
11378 { MAC_TX_MODE, 0x0000,
11379 0x00000000, 0x00000070 },
11380 { MAC_TX_LENGTHS, 0x0000,
11381 0x00000000, 0x00003fff },
11382 { MAC_RX_MODE, TG3_FL_NOT_5705,
11383 0x00000000, 0x000007fc },
11384 { MAC_RX_MODE, TG3_FL_5705,
11385 0x00000000, 0x000007dc },
11386 { MAC_HASH_REG_0, 0x0000,
11387 0x00000000, 0xffffffff },
11388 { MAC_HASH_REG_1, 0x0000,
11389 0x00000000, 0xffffffff },
11390 { MAC_HASH_REG_2, 0x0000,
11391 0x00000000, 0xffffffff },
11392 { MAC_HASH_REG_3, 0x0000,
11393 0x00000000, 0xffffffff },
11394
11395 /* Receive Data and Receive BD Initiator Control Registers. */
11396 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
11397 0x00000000, 0xffffffff },
11398 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
11399 0x00000000, 0xffffffff },
11400 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
11401 0x00000000, 0x00000003 },
11402 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
11403 0x00000000, 0xffffffff },
11404 { RCVDBDI_STD_BD+0, 0x0000,
11405 0x00000000, 0xffffffff },
11406 { RCVDBDI_STD_BD+4, 0x0000,
11407 0x00000000, 0xffffffff },
11408 { RCVDBDI_STD_BD+8, 0x0000,
11409 0x00000000, 0xffff0002 },
11410 { RCVDBDI_STD_BD+0xc, 0x0000,
11411 0x00000000, 0xffffffff },
6aa20a22 11412
a71116d1
MC
11413 /* Receive BD Initiator Control Registers. */
11414 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
11415 0x00000000, 0xffffffff },
11416 { RCVBDI_STD_THRESH, TG3_FL_5705,
11417 0x00000000, 0x000003ff },
11418 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
11419 0x00000000, 0xffffffff },
6aa20a22 11420
a71116d1
MC
11421 /* Host Coalescing Control Registers. */
11422 { HOSTCC_MODE, TG3_FL_NOT_5705,
11423 0x00000000, 0x00000004 },
11424 { HOSTCC_MODE, TG3_FL_5705,
11425 0x00000000, 0x000000f6 },
11426 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
11427 0x00000000, 0xffffffff },
11428 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
11429 0x00000000, 0x000003ff },
11430 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
11431 0x00000000, 0xffffffff },
11432 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
11433 0x00000000, 0x000003ff },
11434 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
11435 0x00000000, 0xffffffff },
11436 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11437 0x00000000, 0x000000ff },
11438 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
11439 0x00000000, 0xffffffff },
11440 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11441 0x00000000, 0x000000ff },
11442 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
11443 0x00000000, 0xffffffff },
11444 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
11445 0x00000000, 0xffffffff },
11446 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11447 0x00000000, 0xffffffff },
11448 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11449 0x00000000, 0x000000ff },
11450 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11451 0x00000000, 0xffffffff },
11452 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11453 0x00000000, 0x000000ff },
11454 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
11455 0x00000000, 0xffffffff },
11456 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
11457 0x00000000, 0xffffffff },
11458 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
11459 0x00000000, 0xffffffff },
11460 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
11461 0x00000000, 0xffffffff },
11462 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
11463 0x00000000, 0xffffffff },
11464 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
11465 0xffffffff, 0x00000000 },
11466 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
11467 0xffffffff, 0x00000000 },
11468
11469 /* Buffer Manager Control Registers. */
b16250e3 11470 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
a71116d1 11471 0x00000000, 0x007fff80 },
b16250e3 11472 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
a71116d1
MC
11473 0x00000000, 0x007fffff },
11474 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
11475 0x00000000, 0x0000003f },
11476 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
11477 0x00000000, 0x000001ff },
11478 { BUFMGR_MB_HIGH_WATER, 0x0000,
11479 0x00000000, 0x000001ff },
11480 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
11481 0xffffffff, 0x00000000 },
11482 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
11483 0xffffffff, 0x00000000 },
6aa20a22 11484
a71116d1
MC
11485 /* Mailbox Registers */
11486 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
11487 0x00000000, 0x000001ff },
11488 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
11489 0x00000000, 0x000001ff },
11490 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
11491 0x00000000, 0x000007ff },
11492 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
11493 0x00000000, 0x000001ff },
11494
11495 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
11496 };
11497
b16250e3 11498 is_5705 = is_5750 = 0;
63c3a66f 11499 if (tg3_flag(tp, 5705_PLUS)) {
a71116d1 11500 is_5705 = 1;
63c3a66f 11501 if (tg3_flag(tp, 5750_PLUS))
b16250e3
MC
11502 is_5750 = 1;
11503 }
a71116d1
MC
11504
11505 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
11506 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
11507 continue;
11508
11509 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
11510 continue;
11511
63c3a66f 11512 if (tg3_flag(tp, IS_5788) &&
a71116d1
MC
11513 (reg_tbl[i].flags & TG3_FL_NOT_5788))
11514 continue;
11515
b16250e3
MC
11516 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
11517 continue;
11518
a71116d1
MC
11519 offset = (u32) reg_tbl[i].offset;
11520 read_mask = reg_tbl[i].read_mask;
11521 write_mask = reg_tbl[i].write_mask;
11522
11523 /* Save the original register content */
11524 save_val = tr32(offset);
11525
11526 /* Determine the read-only value. */
11527 read_val = save_val & read_mask;
11528
11529 /* Write zero to the register, then make sure the read-only bits
11530 * are not changed and the read/write bits are all zeros.
11531 */
11532 tw32(offset, 0);
11533
11534 val = tr32(offset);
11535
11536 /* Test the read-only and read/write bits. */
11537 if (((val & read_mask) != read_val) || (val & write_mask))
11538 goto out;
11539
11540 /* Write ones to all the bits defined by RdMask and WrMask, then
11541 * make sure the read-only bits are not changed and the
11542 * read/write bits are all ones.
11543 */
11544 tw32(offset, read_mask | write_mask);
11545
11546 val = tr32(offset);
11547
11548 /* Test the read-only bits. */
11549 if ((val & read_mask) != read_val)
11550 goto out;
11551
11552 /* Test the read/write bits. */
11553 if ((val & write_mask) != write_mask)
11554 goto out;
11555
11556 tw32(offset, save_val);
11557 }
11558
11559 return 0;
11560
11561out:
9f88f29f 11562 if (netif_msg_hw(tp))
2445e461
MC
11563 netdev_err(tp->dev,
11564 "Register test failed at offset %x\n", offset);
a71116d1
MC
11565 tw32(offset, save_val);
11566 return -EIO;
11567}
11568
7942e1db
MC
11569static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
11570{
f71e1309 11571 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
7942e1db
MC
11572 int i;
11573 u32 j;
11574
e9edda69 11575 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
7942e1db
MC
11576 for (j = 0; j < len; j += 4) {
11577 u32 val;
11578
11579 tg3_write_mem(tp, offset + j, test_pattern[i]);
11580 tg3_read_mem(tp, offset + j, &val);
11581 if (val != test_pattern[i])
11582 return -EIO;
11583 }
11584 }
11585 return 0;
11586}
11587
11588static int tg3_test_memory(struct tg3 *tp)
11589{
11590 static struct mem_entry {
11591 u32 offset;
11592 u32 len;
11593 } mem_tbl_570x[] = {
38690194 11594 { 0x00000000, 0x00b50},
7942e1db
MC
11595 { 0x00002000, 0x1c000},
11596 { 0xffffffff, 0x00000}
11597 }, mem_tbl_5705[] = {
11598 { 0x00000100, 0x0000c},
11599 { 0x00000200, 0x00008},
7942e1db
MC
11600 { 0x00004000, 0x00800},
11601 { 0x00006000, 0x01000},
11602 { 0x00008000, 0x02000},
11603 { 0x00010000, 0x0e000},
11604 { 0xffffffff, 0x00000}
79f4d13a
MC
11605 }, mem_tbl_5755[] = {
11606 { 0x00000200, 0x00008},
11607 { 0x00004000, 0x00800},
11608 { 0x00006000, 0x00800},
11609 { 0x00008000, 0x02000},
11610 { 0x00010000, 0x0c000},
11611 { 0xffffffff, 0x00000}
b16250e3
MC
11612 }, mem_tbl_5906[] = {
11613 { 0x00000200, 0x00008},
11614 { 0x00004000, 0x00400},
11615 { 0x00006000, 0x00400},
11616 { 0x00008000, 0x01000},
11617 { 0x00010000, 0x01000},
11618 { 0xffffffff, 0x00000}
8b5a6c42
MC
11619 }, mem_tbl_5717[] = {
11620 { 0x00000200, 0x00008},
11621 { 0x00010000, 0x0a000},
11622 { 0x00020000, 0x13c00},
11623 { 0xffffffff, 0x00000}
11624 }, mem_tbl_57765[] = {
11625 { 0x00000200, 0x00008},
11626 { 0x00004000, 0x00800},
11627 { 0x00006000, 0x09800},
11628 { 0x00010000, 0x0a000},
11629 { 0xffffffff, 0x00000}
7942e1db
MC
11630 };
11631 struct mem_entry *mem_tbl;
11632 int err = 0;
11633 int i;
11634
63c3a66f 11635 if (tg3_flag(tp, 5717_PLUS))
8b5a6c42 11636 mem_tbl = mem_tbl_5717;
55086ad9 11637 else if (tg3_flag(tp, 57765_CLASS))
8b5a6c42 11638 mem_tbl = mem_tbl_57765;
63c3a66f 11639 else if (tg3_flag(tp, 5755_PLUS))
321d32a0
MC
11640 mem_tbl = mem_tbl_5755;
11641 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11642 mem_tbl = mem_tbl_5906;
63c3a66f 11643 else if (tg3_flag(tp, 5705_PLUS))
321d32a0
MC
11644 mem_tbl = mem_tbl_5705;
11645 else
7942e1db
MC
11646 mem_tbl = mem_tbl_570x;
11647
11648 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
be98da6a
MC
11649 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
11650 if (err)
7942e1db
MC
11651 break;
11652 }
6aa20a22 11653
7942e1db
MC
11654 return err;
11655}
11656
bb158d69
MC
11657#define TG3_TSO_MSS 500
11658
11659#define TG3_TSO_IP_HDR_LEN 20
11660#define TG3_TSO_TCP_HDR_LEN 20
11661#define TG3_TSO_TCP_OPT_LEN 12
11662
11663static const u8 tg3_tso_header[] = {
116640x08, 0x00,
116650x45, 0x00, 0x00, 0x00,
116660x00, 0x00, 0x40, 0x00,
116670x40, 0x06, 0x00, 0x00,
116680x0a, 0x00, 0x00, 0x01,
116690x0a, 0x00, 0x00, 0x02,
116700x0d, 0x00, 0xe0, 0x00,
116710x00, 0x00, 0x01, 0x00,
116720x00, 0x00, 0x02, 0x00,
116730x80, 0x10, 0x10, 0x00,
116740x14, 0x09, 0x00, 0x00,
116750x01, 0x01, 0x08, 0x0a,
116760x11, 0x11, 0x11, 0x11,
116770x11, 0x11, 0x11, 0x11,
11678};
9f40dead 11679
28a45957 11680static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
c76949a6 11681{
5e5a7f37 11682 u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
bb158d69 11683 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
84b67b27 11684 u32 budget;
9205fd9c
ED
11685 struct sk_buff *skb;
11686 u8 *tx_data, *rx_data;
c76949a6
MC
11687 dma_addr_t map;
11688 int num_pkts, tx_len, rx_len, i, err;
11689 struct tg3_rx_buffer_desc *desc;
898a56f8 11690 struct tg3_napi *tnapi, *rnapi;
8fea32b9 11691 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
c76949a6 11692
c8873405
MC
11693 tnapi = &tp->napi[0];
11694 rnapi = &tp->napi[0];
0c1d0e2b 11695 if (tp->irq_cnt > 1) {
63c3a66f 11696 if (tg3_flag(tp, ENABLE_RSS))
1da85aa3 11697 rnapi = &tp->napi[1];
63c3a66f 11698 if (tg3_flag(tp, ENABLE_TSS))
c8873405 11699 tnapi = &tp->napi[1];
0c1d0e2b 11700 }
fd2ce37f 11701 coal_now = tnapi->coal_now | rnapi->coal_now;
898a56f8 11702
c76949a6
MC
11703 err = -EIO;
11704
4852a861 11705 tx_len = pktsz;
a20e9c62 11706 skb = netdev_alloc_skb(tp->dev, tx_len);
a50bb7b9
JJ
11707 if (!skb)
11708 return -ENOMEM;
11709
c76949a6
MC
11710 tx_data = skb_put(skb, tx_len);
11711 memcpy(tx_data, tp->dev->dev_addr, 6);
11712 memset(tx_data + 6, 0x0, 8);
11713
4852a861 11714 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
c76949a6 11715
28a45957 11716 if (tso_loopback) {
bb158d69
MC
11717 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
11718
11719 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
11720 TG3_TSO_TCP_OPT_LEN;
11721
11722 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
11723 sizeof(tg3_tso_header));
11724 mss = TG3_TSO_MSS;
11725
11726 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
11727 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
11728
11729 /* Set the total length field in the IP header */
11730 iph->tot_len = htons((u16)(mss + hdr_len));
11731
11732 base_flags = (TXD_FLAG_CPU_PRE_DMA |
11733 TXD_FLAG_CPU_POST_DMA);
11734
63c3a66f
JP
11735 if (tg3_flag(tp, HW_TSO_1) ||
11736 tg3_flag(tp, HW_TSO_2) ||
11737 tg3_flag(tp, HW_TSO_3)) {
bb158d69
MC
11738 struct tcphdr *th;
11739 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
11740 th = (struct tcphdr *)&tx_data[val];
11741 th->check = 0;
11742 } else
11743 base_flags |= TXD_FLAG_TCPUDP_CSUM;
11744
63c3a66f 11745 if (tg3_flag(tp, HW_TSO_3)) {
bb158d69
MC
11746 mss |= (hdr_len & 0xc) << 12;
11747 if (hdr_len & 0x10)
11748 base_flags |= 0x00000010;
11749 base_flags |= (hdr_len & 0x3e0) << 5;
63c3a66f 11750 } else if (tg3_flag(tp, HW_TSO_2))
bb158d69 11751 mss |= hdr_len << 9;
63c3a66f 11752 else if (tg3_flag(tp, HW_TSO_1) ||
bb158d69
MC
11753 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
11754 mss |= (TG3_TSO_TCP_OPT_LEN << 9);
11755 } else {
11756 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
11757 }
11758
11759 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
11760 } else {
11761 num_pkts = 1;
11762 data_off = ETH_HLEN;
11763 }
11764
11765 for (i = data_off; i < tx_len; i++)
c76949a6
MC
11766 tx_data[i] = (u8) (i & 0xff);
11767
f4188d8a
AD
11768 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
11769 if (pci_dma_mapping_error(tp->pdev, map)) {
a21771dd
MC
11770 dev_kfree_skb(skb);
11771 return -EIO;
11772 }
c76949a6 11773
0d681b27
MC
11774 val = tnapi->tx_prod;
11775 tnapi->tx_buffers[val].skb = skb;
11776 dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
11777
c76949a6 11778 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
fd2ce37f 11779 rnapi->coal_now);
c76949a6
MC
11780
11781 udelay(10);
11782
898a56f8 11783 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
c76949a6 11784
84b67b27
MC
11785 budget = tg3_tx_avail(tnapi);
11786 if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
d1a3b737
MC
11787 base_flags | TXD_FLAG_END, mss, 0)) {
11788 tnapi->tx_buffers[val].skb = NULL;
11789 dev_kfree_skb(skb);
11790 return -EIO;
11791 }
c76949a6 11792
f3f3f27e 11793 tnapi->tx_prod++;
c76949a6 11794
f3f3f27e
MC
11795 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
11796 tr32_mailbox(tnapi->prodmbox);
c76949a6
MC
11797
11798 udelay(10);
11799
303fc921
MC
11800 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
11801 for (i = 0; i < 35; i++) {
c76949a6 11802 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
fd2ce37f 11803 coal_now);
c76949a6
MC
11804
11805 udelay(10);
11806
898a56f8
MC
11807 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
11808 rx_idx = rnapi->hw_status->idx[0].rx_producer;
f3f3f27e 11809 if ((tx_idx == tnapi->tx_prod) &&
c76949a6
MC
11810 (rx_idx == (rx_start_idx + num_pkts)))
11811 break;
11812 }
11813
ba1142e4 11814 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
c76949a6
MC
11815 dev_kfree_skb(skb);
11816
f3f3f27e 11817 if (tx_idx != tnapi->tx_prod)
c76949a6
MC
11818 goto out;
11819
11820 if (rx_idx != rx_start_idx + num_pkts)
11821 goto out;
11822
bb158d69
MC
11823 val = data_off;
11824 while (rx_idx != rx_start_idx) {
11825 desc = &rnapi->rx_rcb[rx_start_idx++];
11826 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
11827 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
c76949a6 11828
bb158d69
MC
11829 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
11830 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
11831 goto out;
c76949a6 11832
bb158d69
MC
11833 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
11834 - ETH_FCS_LEN;
c76949a6 11835
28a45957 11836 if (!tso_loopback) {
bb158d69
MC
11837 if (rx_len != tx_len)
11838 goto out;
4852a861 11839
bb158d69
MC
11840 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
11841 if (opaque_key != RXD_OPAQUE_RING_STD)
11842 goto out;
11843 } else {
11844 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
11845 goto out;
11846 }
11847 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
11848 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
54e0a67f 11849 >> RXD_TCPCSUM_SHIFT != 0xffff) {
4852a861 11850 goto out;
bb158d69 11851 }
4852a861 11852
bb158d69 11853 if (opaque_key == RXD_OPAQUE_RING_STD) {
9205fd9c 11854 rx_data = tpr->rx_std_buffers[desc_idx].data;
bb158d69
MC
11855 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
11856 mapping);
11857 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
9205fd9c 11858 rx_data = tpr->rx_jmb_buffers[desc_idx].data;
bb158d69
MC
11859 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
11860 mapping);
11861 } else
11862 goto out;
c76949a6 11863
bb158d69
MC
11864 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
11865 PCI_DMA_FROMDEVICE);
c76949a6 11866
9205fd9c 11867 rx_data += TG3_RX_OFFSET(tp);
bb158d69 11868 for (i = data_off; i < rx_len; i++, val++) {
9205fd9c 11869 if (*(rx_data + i) != (u8) (val & 0xff))
bb158d69
MC
11870 goto out;
11871 }
c76949a6 11872 }
bb158d69 11873
c76949a6 11874 err = 0;
6aa20a22 11875
9205fd9c 11876 /* tg3_free_rings will unmap and free the rx_data */
c76949a6
MC
11877out:
11878 return err;
11879}
11880
00c266b7
MC
11881#define TG3_STD_LOOPBACK_FAILED 1
11882#define TG3_JMB_LOOPBACK_FAILED 2
bb158d69 11883#define TG3_TSO_LOOPBACK_FAILED 4
28a45957
MC
11884#define TG3_LOOPBACK_FAILED \
11885 (TG3_STD_LOOPBACK_FAILED | \
11886 TG3_JMB_LOOPBACK_FAILED | \
11887 TG3_TSO_LOOPBACK_FAILED)
00c266b7 11888
941ec90f 11889static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
9f40dead 11890{
28a45957 11891 int err = -EIO;
2215e24c 11892 u32 eee_cap;
9f40dead 11893
ab789046
MC
11894 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
11895 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11896
28a45957
MC
11897 if (!netif_running(tp->dev)) {
11898 data[0] = TG3_LOOPBACK_FAILED;
11899 data[1] = TG3_LOOPBACK_FAILED;
941ec90f
MC
11900 if (do_extlpbk)
11901 data[2] = TG3_LOOPBACK_FAILED;
28a45957
MC
11902 goto done;
11903 }
11904
b9ec6c1b 11905 err = tg3_reset_hw(tp, 1);
ab789046 11906 if (err) {
28a45957
MC
11907 data[0] = TG3_LOOPBACK_FAILED;
11908 data[1] = TG3_LOOPBACK_FAILED;
941ec90f
MC
11909 if (do_extlpbk)
11910 data[2] = TG3_LOOPBACK_FAILED;
ab789046
MC
11911 goto done;
11912 }
9f40dead 11913
63c3a66f 11914 if (tg3_flag(tp, ENABLE_RSS)) {
4a85f098
MC
11915 int i;
11916
11917 /* Reroute all rx packets to the 1st queue */
11918 for (i = MAC_RSS_INDIR_TBL_0;
11919 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
11920 tw32(i, 0x0);
11921 }
11922
6e01b20b
MC
11923 /* HW errata - mac loopback fails in some cases on 5780.
11924 * Normal traffic and PHY loopback are not affected by
11925 * errata. Also, the MAC loopback test is deprecated for
11926 * all newer ASIC revisions.
11927 */
11928 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
11929 !tg3_flag(tp, CPMU_PRESENT)) {
11930 tg3_mac_loopback(tp, true);
9936bcf6 11931
28a45957
MC
11932 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11933 data[0] |= TG3_STD_LOOPBACK_FAILED;
6e01b20b
MC
11934
11935 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
28a45957
MC
11936 tg3_run_loopback(tp, 9000 + ETH_HLEN, false))
11937 data[0] |= TG3_JMB_LOOPBACK_FAILED;
6e01b20b
MC
11938
11939 tg3_mac_loopback(tp, false);
11940 }
4852a861 11941
f07e9af3 11942 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
63c3a66f 11943 !tg3_flag(tp, USE_PHYLIB)) {
5e5a7f37
MC
11944 int i;
11945
941ec90f 11946 tg3_phy_lpbk_set(tp, 0, false);
5e5a7f37
MC
11947
11948 /* Wait for link */
11949 for (i = 0; i < 100; i++) {
11950 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
11951 break;
11952 mdelay(1);
11953 }
11954
28a45957
MC
11955 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11956 data[1] |= TG3_STD_LOOPBACK_FAILED;
63c3a66f 11957 if (tg3_flag(tp, TSO_CAPABLE) &&
28a45957
MC
11958 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
11959 data[1] |= TG3_TSO_LOOPBACK_FAILED;
63c3a66f 11960 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
28a45957
MC
11961 tg3_run_loopback(tp, 9000 + ETH_HLEN, false))
11962 data[1] |= TG3_JMB_LOOPBACK_FAILED;
9f40dead 11963
941ec90f
MC
11964 if (do_extlpbk) {
11965 tg3_phy_lpbk_set(tp, 0, true);
11966
11967 /* All link indications report up, but the hardware
11968 * isn't really ready for about 20 msec. Double it
11969 * to be sure.
11970 */
11971 mdelay(40);
11972
11973 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11974 data[2] |= TG3_STD_LOOPBACK_FAILED;
11975 if (tg3_flag(tp, TSO_CAPABLE) &&
11976 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
11977 data[2] |= TG3_TSO_LOOPBACK_FAILED;
11978 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11979 tg3_run_loopback(tp, 9000 + ETH_HLEN, false))
11980 data[2] |= TG3_JMB_LOOPBACK_FAILED;
11981 }
11982
5e5a7f37
MC
11983 /* Re-enable gphy autopowerdown. */
11984 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11985 tg3_phy_toggle_apd(tp, true);
11986 }
6833c043 11987
941ec90f 11988 err = (data[0] | data[1] | data[2]) ? -EIO : 0;
28a45957 11989
ab789046
MC
11990done:
11991 tp->phy_flags |= eee_cap;
11992
9f40dead
MC
11993 return err;
11994}
11995
4cafd3f5
MC
11996static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
11997 u64 *data)
11998{
566f86ad 11999 struct tg3 *tp = netdev_priv(dev);
941ec90f 12000 bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
566f86ad 12001
bed9829f
MC
12002 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
12003 tg3_power_up(tp)) {
12004 etest->flags |= ETH_TEST_FL_FAILED;
12005 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
12006 return;
12007 }
bc1c7567 12008
566f86ad
MC
12009 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
12010
12011 if (tg3_test_nvram(tp) != 0) {
12012 etest->flags |= ETH_TEST_FL_FAILED;
12013 data[0] = 1;
12014 }
941ec90f 12015 if (!doextlpbk && tg3_test_link(tp)) {
ca43007a
MC
12016 etest->flags |= ETH_TEST_FL_FAILED;
12017 data[1] = 1;
12018 }
a71116d1 12019 if (etest->flags & ETH_TEST_FL_OFFLINE) {
b02fd9e3 12020 int err, err2 = 0, irq_sync = 0;
bbe832c0
MC
12021
12022 if (netif_running(dev)) {
b02fd9e3 12023 tg3_phy_stop(tp);
a71116d1 12024 tg3_netif_stop(tp);
bbe832c0
MC
12025 irq_sync = 1;
12026 }
a71116d1 12027
bbe832c0 12028 tg3_full_lock(tp, irq_sync);
a71116d1
MC
12029
12030 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
ec41c7df 12031 err = tg3_nvram_lock(tp);
a71116d1 12032 tg3_halt_cpu(tp, RX_CPU_BASE);
63c3a66f 12033 if (!tg3_flag(tp, 5705_PLUS))
a71116d1 12034 tg3_halt_cpu(tp, TX_CPU_BASE);
ec41c7df
MC
12035 if (!err)
12036 tg3_nvram_unlock(tp);
a71116d1 12037
f07e9af3 12038 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
d9ab5ad1
MC
12039 tg3_phy_reset(tp);
12040
a71116d1
MC
12041 if (tg3_test_registers(tp) != 0) {
12042 etest->flags |= ETH_TEST_FL_FAILED;
12043 data[2] = 1;
12044 }
28a45957 12045
7942e1db
MC
12046 if (tg3_test_memory(tp) != 0) {
12047 etest->flags |= ETH_TEST_FL_FAILED;
12048 data[3] = 1;
12049 }
28a45957 12050
941ec90f
MC
12051 if (doextlpbk)
12052 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
12053
12054 if (tg3_test_loopback(tp, &data[4], doextlpbk))
c76949a6 12055 etest->flags |= ETH_TEST_FL_FAILED;
a71116d1 12056
f47c11ee
DM
12057 tg3_full_unlock(tp);
12058
d4bc3927
MC
12059 if (tg3_test_interrupt(tp) != 0) {
12060 etest->flags |= ETH_TEST_FL_FAILED;
941ec90f 12061 data[7] = 1;
d4bc3927 12062 }
f47c11ee
DM
12063
12064 tg3_full_lock(tp, 0);
d4bc3927 12065
a71116d1
MC
12066 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12067 if (netif_running(dev)) {
63c3a66f 12068 tg3_flag_set(tp, INIT_COMPLETE);
b02fd9e3
MC
12069 err2 = tg3_restart_hw(tp, 1);
12070 if (!err2)
b9ec6c1b 12071 tg3_netif_start(tp);
a71116d1 12072 }
f47c11ee
DM
12073
12074 tg3_full_unlock(tp);
b02fd9e3
MC
12075
12076 if (irq_sync && !err2)
12077 tg3_phy_start(tp);
a71116d1 12078 }
80096068 12079 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
c866b7ea 12080 tg3_power_down(tp);
bc1c7567 12081
4cafd3f5
MC
12082}
12083
1da177e4
LT
12084static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
12085{
12086 struct mii_ioctl_data *data = if_mii(ifr);
12087 struct tg3 *tp = netdev_priv(dev);
12088 int err;
12089
63c3a66f 12090 if (tg3_flag(tp, USE_PHYLIB)) {
3f0e3ad7 12091 struct phy_device *phydev;
f07e9af3 12092 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
b02fd9e3 12093 return -EAGAIN;
3f0e3ad7 12094 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
28b04113 12095 return phy_mii_ioctl(phydev, ifr, cmd);
b02fd9e3
MC
12096 }
12097
33f401ae 12098 switch (cmd) {
1da177e4 12099 case SIOCGMIIPHY:
882e9793 12100 data->phy_id = tp->phy_addr;
1da177e4
LT
12101
12102 /* fallthru */
12103 case SIOCGMIIREG: {
12104 u32 mii_regval;
12105
f07e9af3 12106 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
1da177e4
LT
12107 break; /* We have no PHY */
12108
34eea5ac 12109 if (!netif_running(dev))
bc1c7567
MC
12110 return -EAGAIN;
12111
f47c11ee 12112 spin_lock_bh(&tp->lock);
1da177e4 12113 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
f47c11ee 12114 spin_unlock_bh(&tp->lock);
1da177e4
LT
12115
12116 data->val_out = mii_regval;
12117
12118 return err;
12119 }
12120
12121 case SIOCSMIIREG:
f07e9af3 12122 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
1da177e4
LT
12123 break; /* We have no PHY */
12124
34eea5ac 12125 if (!netif_running(dev))
bc1c7567
MC
12126 return -EAGAIN;
12127
f47c11ee 12128 spin_lock_bh(&tp->lock);
1da177e4 12129 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
f47c11ee 12130 spin_unlock_bh(&tp->lock);
1da177e4
LT
12131
12132 return err;
12133
12134 default:
12135 /* do nothing */
12136 break;
12137 }
12138 return -EOPNOTSUPP;
12139}
12140
15f9850d
DM
12141static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
12142{
12143 struct tg3 *tp = netdev_priv(dev);
12144
12145 memcpy(ec, &tp->coal, sizeof(*ec));
12146 return 0;
12147}
12148
d244c892
MC
12149static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
12150{
12151 struct tg3 *tp = netdev_priv(dev);
12152 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
12153 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
12154
63c3a66f 12155 if (!tg3_flag(tp, 5705_PLUS)) {
d244c892
MC
12156 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
12157 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
12158 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
12159 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
12160 }
12161
12162 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
12163 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
12164 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
12165 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
12166 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
12167 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
12168 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
12169 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
12170 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
12171 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
12172 return -EINVAL;
12173
12174 /* No rx interrupts will be generated if both are zero */
12175 if ((ec->rx_coalesce_usecs == 0) &&
12176 (ec->rx_max_coalesced_frames == 0))
12177 return -EINVAL;
12178
12179 /* No tx interrupts will be generated if both are zero */
12180 if ((ec->tx_coalesce_usecs == 0) &&
12181 (ec->tx_max_coalesced_frames == 0))
12182 return -EINVAL;
12183
12184 /* Only copy relevant parameters, ignore all others. */
12185 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
12186 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
12187 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
12188 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
12189 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
12190 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
12191 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
12192 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
12193 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
12194
12195 if (netif_running(dev)) {
12196 tg3_full_lock(tp, 0);
12197 __tg3_set_coalesce(tp, &tp->coal);
12198 tg3_full_unlock(tp);
12199 }
12200 return 0;
12201}
12202
7282d491 12203static const struct ethtool_ops tg3_ethtool_ops = {
1da177e4
LT
12204 .get_settings = tg3_get_settings,
12205 .set_settings = tg3_set_settings,
12206 .get_drvinfo = tg3_get_drvinfo,
12207 .get_regs_len = tg3_get_regs_len,
12208 .get_regs = tg3_get_regs,
12209 .get_wol = tg3_get_wol,
12210 .set_wol = tg3_set_wol,
12211 .get_msglevel = tg3_get_msglevel,
12212 .set_msglevel = tg3_set_msglevel,
12213 .nway_reset = tg3_nway_reset,
12214 .get_link = ethtool_op_get_link,
12215 .get_eeprom_len = tg3_get_eeprom_len,
12216 .get_eeprom = tg3_get_eeprom,
12217 .set_eeprom = tg3_set_eeprom,
12218 .get_ringparam = tg3_get_ringparam,
12219 .set_ringparam = tg3_set_ringparam,
12220 .get_pauseparam = tg3_get_pauseparam,
12221 .set_pauseparam = tg3_set_pauseparam,
4cafd3f5 12222 .self_test = tg3_self_test,
1da177e4 12223 .get_strings = tg3_get_strings,
81b8709c 12224 .set_phys_id = tg3_set_phys_id,
1da177e4 12225 .get_ethtool_stats = tg3_get_ethtool_stats,
15f9850d 12226 .get_coalesce = tg3_get_coalesce,
d244c892 12227 .set_coalesce = tg3_set_coalesce,
b9f2c044 12228 .get_sset_count = tg3_get_sset_count,
90415477
MC
12229 .get_rxnfc = tg3_get_rxnfc,
12230 .get_rxfh_indir_size = tg3_get_rxfh_indir_size,
12231 .get_rxfh_indir = tg3_get_rxfh_indir,
12232 .set_rxfh_indir = tg3_set_rxfh_indir,
1da177e4
LT
12233};
12234
ccd5ba9d
MC
12235static void tg3_set_rx_mode(struct net_device *dev)
12236{
12237 struct tg3 *tp = netdev_priv(dev);
12238
12239 if (!netif_running(dev))
12240 return;
12241
12242 tg3_full_lock(tp, 0);
12243 __tg3_set_rx_mode(dev);
12244 tg3_full_unlock(tp);
12245}
12246
faf1627a
MC
12247static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
12248 int new_mtu)
12249{
12250 dev->mtu = new_mtu;
12251
12252 if (new_mtu > ETH_DATA_LEN) {
12253 if (tg3_flag(tp, 5780_CLASS)) {
12254 netdev_update_features(dev);
12255 tg3_flag_clear(tp, TSO_CAPABLE);
12256 } else {
12257 tg3_flag_set(tp, JUMBO_RING_ENABLE);
12258 }
12259 } else {
12260 if (tg3_flag(tp, 5780_CLASS)) {
12261 tg3_flag_set(tp, TSO_CAPABLE);
12262 netdev_update_features(dev);
12263 }
12264 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
12265 }
12266}
12267
12268static int tg3_change_mtu(struct net_device *dev, int new_mtu)
12269{
12270 struct tg3 *tp = netdev_priv(dev);
12271 int err;
12272
12273 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
12274 return -EINVAL;
12275
12276 if (!netif_running(dev)) {
12277 /* We'll just catch it later when the
12278 * device is up'd.
12279 */
12280 tg3_set_mtu(dev, tp, new_mtu);
12281 return 0;
12282 }
12283
12284 tg3_phy_stop(tp);
12285
12286 tg3_netif_stop(tp);
12287
12288 tg3_full_lock(tp, 1);
12289
12290 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12291
12292 tg3_set_mtu(dev, tp, new_mtu);
12293
12294 err = tg3_restart_hw(tp, 0);
12295
12296 if (!err)
12297 tg3_netif_start(tp);
12298
12299 tg3_full_unlock(tp);
12300
12301 if (!err)
12302 tg3_phy_start(tp);
12303
12304 return err;
12305}
12306
12307static const struct net_device_ops tg3_netdev_ops = {
12308 .ndo_open = tg3_open,
12309 .ndo_stop = tg3_close,
12310 .ndo_start_xmit = tg3_start_xmit,
12311 .ndo_get_stats64 = tg3_get_stats64,
12312 .ndo_validate_addr = eth_validate_addr,
12313 .ndo_set_rx_mode = tg3_set_rx_mode,
12314 .ndo_set_mac_address = tg3_set_mac_addr,
12315 .ndo_do_ioctl = tg3_ioctl,
12316 .ndo_tx_timeout = tg3_tx_timeout,
12317 .ndo_change_mtu = tg3_change_mtu,
12318 .ndo_fix_features = tg3_fix_features,
12319 .ndo_set_features = tg3_set_features,
12320#ifdef CONFIG_NET_POLL_CONTROLLER
12321 .ndo_poll_controller = tg3_poll_controller,
12322#endif
12323};
12324
1da177e4
LT
12325static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
12326{
1b27777a 12327 u32 cursize, val, magic;
1da177e4
LT
12328
12329 tp->nvram_size = EEPROM_CHIP_SIZE;
12330
e4f34110 12331 if (tg3_nvram_read(tp, 0, &magic) != 0)
1da177e4
LT
12332 return;
12333
b16250e3
MC
12334 if ((magic != TG3_EEPROM_MAGIC) &&
12335 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
12336 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
1da177e4
LT
12337 return;
12338
12339 /*
12340 * Size the chip by reading offsets at increasing powers of two.
12341 * When we encounter our validation signature, we know the addressing
12342 * has wrapped around, and thus have our chip size.
12343 */
1b27777a 12344 cursize = 0x10;
1da177e4
LT
12345
12346 while (cursize < tp->nvram_size) {
e4f34110 12347 if (tg3_nvram_read(tp, cursize, &val) != 0)
1da177e4
LT
12348 return;
12349
1820180b 12350 if (val == magic)
1da177e4
LT
12351 break;
12352
12353 cursize <<= 1;
12354 }
12355
12356 tp->nvram_size = cursize;
12357}
6aa20a22 12358
1da177e4
LT
12359static void __devinit tg3_get_nvram_size(struct tg3 *tp)
12360{
12361 u32 val;
12362
63c3a66f 12363 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
1b27777a
MC
12364 return;
12365
12366 /* Selfboot format */
1820180b 12367 if (val != TG3_EEPROM_MAGIC) {
1b27777a
MC
12368 tg3_get_eeprom_size(tp);
12369 return;
12370 }
12371
6d348f2c 12372 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
1da177e4 12373 if (val != 0) {
6d348f2c
MC
12374 /* This is confusing. We want to operate on the
12375 * 16-bit value at offset 0xf2. The tg3_nvram_read()
12376 * call will read from NVRAM and byteswap the data
12377 * according to the byteswapping settings for all
12378 * other register accesses. This ensures the data we
12379 * want will always reside in the lower 16-bits.
12380 * However, the data in NVRAM is in LE format, which
12381 * means the data from the NVRAM read will always be
12382 * opposite the endianness of the CPU. The 16-bit
12383 * byteswap then brings the data to CPU endianness.
12384 */
12385 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
1da177e4
LT
12386 return;
12387 }
12388 }
fd1122a2 12389 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
1da177e4
LT
12390}
12391
12392static void __devinit tg3_get_nvram_info(struct tg3 *tp)
12393{
12394 u32 nvcfg1;
12395
12396 nvcfg1 = tr32(NVRAM_CFG1);
12397 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
63c3a66f 12398 tg3_flag_set(tp, FLASH);
8590a603 12399 } else {
1da177e4
LT
12400 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12401 tw32(NVRAM_CFG1, nvcfg1);
12402 }
12403
6ff6f81d 12404 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
63c3a66f 12405 tg3_flag(tp, 5780_CLASS)) {
1da177e4 12406 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
8590a603
MC
12407 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
12408 tp->nvram_jedecnum = JEDEC_ATMEL;
12409 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
63c3a66f 12410 tg3_flag_set(tp, NVRAM_BUFFERED);
8590a603
MC
12411 break;
12412 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
12413 tp->nvram_jedecnum = JEDEC_ATMEL;
12414 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
12415 break;
12416 case FLASH_VENDOR_ATMEL_EEPROM:
12417 tp->nvram_jedecnum = JEDEC_ATMEL;
12418 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
63c3a66f 12419 tg3_flag_set(tp, NVRAM_BUFFERED);
8590a603
MC
12420 break;
12421 case FLASH_VENDOR_ST:
12422 tp->nvram_jedecnum = JEDEC_ST;
12423 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
63c3a66f 12424 tg3_flag_set(tp, NVRAM_BUFFERED);
8590a603
MC
12425 break;
12426 case FLASH_VENDOR_SAIFUN:
12427 tp->nvram_jedecnum = JEDEC_SAIFUN;
12428 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
12429 break;
12430 case FLASH_VENDOR_SST_SMALL:
12431 case FLASH_VENDOR_SST_LARGE:
12432 tp->nvram_jedecnum = JEDEC_SST;
12433 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
12434 break;
1da177e4 12435 }
8590a603 12436 } else {
1da177e4
LT
12437 tp->nvram_jedecnum = JEDEC_ATMEL;
12438 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
63c3a66f 12439 tg3_flag_set(tp, NVRAM_BUFFERED);
1da177e4
LT
12440 }
12441}
12442
a1b950d5
MC
12443static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
12444{
12445 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
12446 case FLASH_5752PAGE_SIZE_256:
12447 tp->nvram_pagesize = 256;
12448 break;
12449 case FLASH_5752PAGE_SIZE_512:
12450 tp->nvram_pagesize = 512;
12451 break;
12452 case FLASH_5752PAGE_SIZE_1K:
12453 tp->nvram_pagesize = 1024;
12454 break;
12455 case FLASH_5752PAGE_SIZE_2K:
12456 tp->nvram_pagesize = 2048;
12457 break;
12458 case FLASH_5752PAGE_SIZE_4K:
12459 tp->nvram_pagesize = 4096;
12460 break;
12461 case FLASH_5752PAGE_SIZE_264:
12462 tp->nvram_pagesize = 264;
12463 break;
12464 case FLASH_5752PAGE_SIZE_528:
12465 tp->nvram_pagesize = 528;
12466 break;
12467 }
12468}
12469
361b4ac2
MC
12470static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
12471{
12472 u32 nvcfg1;
12473
12474 nvcfg1 = tr32(NVRAM_CFG1);
12475
e6af301b
MC
12476 /* NVRAM protection for TPM */
12477 if (nvcfg1 & (1 << 27))
63c3a66f 12478 tg3_flag_set(tp, PROTECTED_NVRAM);
e6af301b 12479
361b4ac2 12480 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
8590a603
MC
12481 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
12482 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
12483 tp->nvram_jedecnum = JEDEC_ATMEL;
63c3a66f 12484 tg3_flag_set(tp, NVRAM_BUFFERED);
8590a603
MC
12485 break;
12486 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12487 tp->nvram_jedecnum = JEDEC_ATMEL;
63c3a66f
JP
12488 tg3_flag_set(tp, NVRAM_BUFFERED);
12489 tg3_flag_set(tp, FLASH);
8590a603
MC
12490 break;
12491 case FLASH_5752VENDOR_ST_M45PE10:
12492 case FLASH_5752VENDOR_ST_M45PE20:
12493 case FLASH_5752VENDOR_ST_M45PE40:
12494 tp->nvram_jedecnum = JEDEC_ST;
63c3a66f
JP
12495 tg3_flag_set(tp, NVRAM_BUFFERED);
12496 tg3_flag_set(tp, FLASH);
8590a603 12497 break;
361b4ac2
MC
12498 }
12499
63c3a66f 12500 if (tg3_flag(tp, FLASH)) {
a1b950d5 12501 tg3_nvram_get_pagesize(tp, nvcfg1);
8590a603 12502 } else {
361b4ac2
MC
12503 /* For eeprom, set pagesize to maximum eeprom size */
12504 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12505
12506 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12507 tw32(NVRAM_CFG1, nvcfg1);
12508 }
12509}
12510
d3c7b886
MC
12511static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
12512{
989a9d23 12513 u32 nvcfg1, protect = 0;
d3c7b886
MC
12514
12515 nvcfg1 = tr32(NVRAM_CFG1);
12516
12517 /* NVRAM protection for TPM */
989a9d23 12518 if (nvcfg1 & (1 << 27)) {
63c3a66f 12519 tg3_flag_set(tp, PROTECTED_NVRAM);
989a9d23
MC
12520 protect = 1;
12521 }
d3c7b886 12522
989a9d23
MC
12523 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12524 switch (nvcfg1) {
8590a603
MC
12525 case FLASH_5755VENDOR_ATMEL_FLASH_1:
12526 case FLASH_5755VENDOR_ATMEL_FLASH_2:
12527 case FLASH_5755VENDOR_ATMEL_FLASH_3:
12528 case FLASH_5755VENDOR_ATMEL_FLASH_5:
12529 tp->nvram_jedecnum = JEDEC_ATMEL;
63c3a66f
JP
12530 tg3_flag_set(tp, NVRAM_BUFFERED);
12531 tg3_flag_set(tp, FLASH);
8590a603
MC
12532 tp->nvram_pagesize = 264;
12533 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
12534 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
12535 tp->nvram_size = (protect ? 0x3e200 :
12536 TG3_NVRAM_SIZE_512KB);
12537 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
12538 tp->nvram_size = (protect ? 0x1f200 :
12539 TG3_NVRAM_SIZE_256KB);
12540 else
12541 tp->nvram_size = (protect ? 0x1f200 :
12542 TG3_NVRAM_SIZE_128KB);
12543 break;
12544 case FLASH_5752VENDOR_ST_M45PE10:
12545 case FLASH_5752VENDOR_ST_M45PE20:
12546 case FLASH_5752VENDOR_ST_M45PE40:
12547 tp->nvram_jedecnum = JEDEC_ST;
63c3a66f
JP
12548 tg3_flag_set(tp, NVRAM_BUFFERED);
12549 tg3_flag_set(tp, FLASH);
8590a603
MC
12550 tp->nvram_pagesize = 256;
12551 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
12552 tp->nvram_size = (protect ?
12553 TG3_NVRAM_SIZE_64KB :
12554 TG3_NVRAM_SIZE_128KB);
12555 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
12556 tp->nvram_size = (protect ?
12557 TG3_NVRAM_SIZE_64KB :
12558 TG3_NVRAM_SIZE_256KB);
12559 else
12560 tp->nvram_size = (protect ?
12561 TG3_NVRAM_SIZE_128KB :
12562 TG3_NVRAM_SIZE_512KB);
12563 break;
d3c7b886
MC
12564 }
12565}
12566
1b27777a
MC
12567static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
12568{
12569 u32 nvcfg1;
12570
12571 nvcfg1 = tr32(NVRAM_CFG1);
12572
12573 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
8590a603
MC
12574 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
12575 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12576 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
12577 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12578 tp->nvram_jedecnum = JEDEC_ATMEL;
63c3a66f 12579 tg3_flag_set(tp, NVRAM_BUFFERED);
8590a603 12580 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
1b27777a 12581
8590a603
MC
12582 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12583 tw32(NVRAM_CFG1, nvcfg1);
12584 break;
12585 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12586 case FLASH_5755VENDOR_ATMEL_FLASH_1:
12587 case FLASH_5755VENDOR_ATMEL_FLASH_2:
12588 case FLASH_5755VENDOR_ATMEL_FLASH_3:
12589 tp->nvram_jedecnum = JEDEC_ATMEL;
63c3a66f
JP
12590 tg3_flag_set(tp, NVRAM_BUFFERED);
12591 tg3_flag_set(tp, FLASH);
8590a603
MC
12592 tp->nvram_pagesize = 264;
12593 break;
12594 case FLASH_5752VENDOR_ST_M45PE10:
12595 case FLASH_5752VENDOR_ST_M45PE20:
12596 case FLASH_5752VENDOR_ST_M45PE40:
12597 tp->nvram_jedecnum = JEDEC_ST;
63c3a66f
JP
12598 tg3_flag_set(tp, NVRAM_BUFFERED);
12599 tg3_flag_set(tp, FLASH);
8590a603
MC
12600 tp->nvram_pagesize = 256;
12601 break;
1b27777a
MC
12602 }
12603}
12604
6b91fa02
MC
12605static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
12606{
12607 u32 nvcfg1, protect = 0;
12608
12609 nvcfg1 = tr32(NVRAM_CFG1);
12610
12611 /* NVRAM protection for TPM */
12612 if (nvcfg1 & (1 << 27)) {
63c3a66f 12613 tg3_flag_set(tp, PROTECTED_NVRAM);
6b91fa02
MC
12614 protect = 1;
12615 }
12616
12617 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12618 switch (nvcfg1) {
8590a603
MC
12619 case FLASH_5761VENDOR_ATMEL_ADB021D:
12620 case FLASH_5761VENDOR_ATMEL_ADB041D:
12621 case FLASH_5761VENDOR_ATMEL_ADB081D:
12622 case FLASH_5761VENDOR_ATMEL_ADB161D:
12623 case FLASH_5761VENDOR_ATMEL_MDB021D:
12624 case FLASH_5761VENDOR_ATMEL_MDB041D:
12625 case FLASH_5761VENDOR_ATMEL_MDB081D:
12626 case FLASH_5761VENDOR_ATMEL_MDB161D:
12627 tp->nvram_jedecnum = JEDEC_ATMEL;
63c3a66f
JP
12628 tg3_flag_set(tp, NVRAM_BUFFERED);
12629 tg3_flag_set(tp, FLASH);
12630 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
8590a603
MC
12631 tp->nvram_pagesize = 256;
12632 break;
12633 case FLASH_5761VENDOR_ST_A_M45PE20:
12634 case FLASH_5761VENDOR_ST_A_M45PE40:
12635 case FLASH_5761VENDOR_ST_A_M45PE80:
12636 case FLASH_5761VENDOR_ST_A_M45PE16:
12637 case FLASH_5761VENDOR_ST_M_M45PE20:
12638 case FLASH_5761VENDOR_ST_M_M45PE40:
12639 case FLASH_5761VENDOR_ST_M_M45PE80:
12640 case FLASH_5761VENDOR_ST_M_M45PE16:
12641 tp->nvram_jedecnum = JEDEC_ST;
63c3a66f
JP
12642 tg3_flag_set(tp, NVRAM_BUFFERED);
12643 tg3_flag_set(tp, FLASH);
8590a603
MC
12644 tp->nvram_pagesize = 256;
12645 break;
6b91fa02
MC
12646 }
12647
12648 if (protect) {
12649 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
12650 } else {
12651 switch (nvcfg1) {
8590a603
MC
12652 case FLASH_5761VENDOR_ATMEL_ADB161D:
12653 case FLASH_5761VENDOR_ATMEL_MDB161D:
12654 case FLASH_5761VENDOR_ST_A_M45PE16:
12655 case FLASH_5761VENDOR_ST_M_M45PE16:
12656 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
12657 break;
12658 case FLASH_5761VENDOR_ATMEL_ADB081D:
12659 case FLASH_5761VENDOR_ATMEL_MDB081D:
12660 case FLASH_5761VENDOR_ST_A_M45PE80:
12661 case FLASH_5761VENDOR_ST_M_M45PE80:
12662 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12663 break;
12664 case FLASH_5761VENDOR_ATMEL_ADB041D:
12665 case FLASH_5761VENDOR_ATMEL_MDB041D:
12666 case FLASH_5761VENDOR_ST_A_M45PE40:
12667 case FLASH_5761VENDOR_ST_M_M45PE40:
12668 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12669 break;
12670 case FLASH_5761VENDOR_ATMEL_ADB021D:
12671 case FLASH_5761VENDOR_ATMEL_MDB021D:
12672 case FLASH_5761VENDOR_ST_A_M45PE20:
12673 case FLASH_5761VENDOR_ST_M_M45PE20:
12674 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12675 break;
6b91fa02
MC
12676 }
12677 }
12678}
12679
b5d3772c
MC
12680static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
12681{
12682 tp->nvram_jedecnum = JEDEC_ATMEL;
63c3a66f 12683 tg3_flag_set(tp, NVRAM_BUFFERED);
b5d3772c
MC
12684 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12685}
12686
321d32a0
MC
12687static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
12688{
12689 u32 nvcfg1;
12690
12691 nvcfg1 = tr32(NVRAM_CFG1);
12692
12693 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12694 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12695 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12696 tp->nvram_jedecnum = JEDEC_ATMEL;
63c3a66f 12697 tg3_flag_set(tp, NVRAM_BUFFERED);
321d32a0
MC
12698 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12699
12700 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12701 tw32(NVRAM_CFG1, nvcfg1);
12702 return;
12703 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12704 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12705 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12706 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12707 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12708 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12709 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12710 tp->nvram_jedecnum = JEDEC_ATMEL;
63c3a66f
JP
12711 tg3_flag_set(tp, NVRAM_BUFFERED);
12712 tg3_flag_set(tp, FLASH);
321d32a0
MC
12713
12714 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12715 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12716 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12717 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12718 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12719 break;
12720 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12721 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12722 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12723 break;
12724 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12725 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12726 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12727 break;
12728 }
12729 break;
12730 case FLASH_5752VENDOR_ST_M45PE10:
12731 case FLASH_5752VENDOR_ST_M45PE20:
12732 case FLASH_5752VENDOR_ST_M45PE40:
12733 tp->nvram_jedecnum = JEDEC_ST;
63c3a66f
JP
12734 tg3_flag_set(tp, NVRAM_BUFFERED);
12735 tg3_flag_set(tp, FLASH);
321d32a0
MC
12736
12737 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12738 case FLASH_5752VENDOR_ST_M45PE10:
12739 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12740 break;
12741 case FLASH_5752VENDOR_ST_M45PE20:
12742 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12743 break;
12744 case FLASH_5752VENDOR_ST_M45PE40:
12745 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12746 break;
12747 }
12748 break;
12749 default:
63c3a66f 12750 tg3_flag_set(tp, NO_NVRAM);
321d32a0
MC
12751 return;
12752 }
12753
a1b950d5
MC
12754 tg3_nvram_get_pagesize(tp, nvcfg1);
12755 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
63c3a66f 12756 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
a1b950d5
MC
12757}
12758
12759
12760static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
12761{
12762 u32 nvcfg1;
12763
12764 nvcfg1 = tr32(NVRAM_CFG1);
12765
12766 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12767 case FLASH_5717VENDOR_ATMEL_EEPROM:
12768 case FLASH_5717VENDOR_MICRO_EEPROM:
12769 tp->nvram_jedecnum = JEDEC_ATMEL;
63c3a66f 12770 tg3_flag_set(tp, NVRAM_BUFFERED);
a1b950d5
MC
12771 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12772
12773 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12774 tw32(NVRAM_CFG1, nvcfg1);
12775 return;
12776 case FLASH_5717VENDOR_ATMEL_MDB011D:
12777 case FLASH_5717VENDOR_ATMEL_ADB011B:
12778 case FLASH_5717VENDOR_ATMEL_ADB011D:
12779 case FLASH_5717VENDOR_ATMEL_MDB021D:
12780 case FLASH_5717VENDOR_ATMEL_ADB021B:
12781 case FLASH_5717VENDOR_ATMEL_ADB021D:
12782 case FLASH_5717VENDOR_ATMEL_45USPT:
12783 tp->nvram_jedecnum = JEDEC_ATMEL;
63c3a66f
JP
12784 tg3_flag_set(tp, NVRAM_BUFFERED);
12785 tg3_flag_set(tp, FLASH);
a1b950d5
MC
12786
12787 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12788 case FLASH_5717VENDOR_ATMEL_MDB021D:
66ee33bf
MC
12789 /* Detect size with tg3_nvram_get_size() */
12790 break;
a1b950d5
MC
12791 case FLASH_5717VENDOR_ATMEL_ADB021B:
12792 case FLASH_5717VENDOR_ATMEL_ADB021D:
12793 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12794 break;
12795 default:
12796 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12797 break;
12798 }
321d32a0 12799 break;
a1b950d5
MC
12800 case FLASH_5717VENDOR_ST_M_M25PE10:
12801 case FLASH_5717VENDOR_ST_A_M25PE10:
12802 case FLASH_5717VENDOR_ST_M_M45PE10:
12803 case FLASH_5717VENDOR_ST_A_M45PE10:
12804 case FLASH_5717VENDOR_ST_M_M25PE20:
12805 case FLASH_5717VENDOR_ST_A_M25PE20:
12806 case FLASH_5717VENDOR_ST_M_M45PE20:
12807 case FLASH_5717VENDOR_ST_A_M45PE20:
12808 case FLASH_5717VENDOR_ST_25USPT:
12809 case FLASH_5717VENDOR_ST_45USPT:
12810 tp->nvram_jedecnum = JEDEC_ST;
63c3a66f
JP
12811 tg3_flag_set(tp, NVRAM_BUFFERED);
12812 tg3_flag_set(tp, FLASH);
a1b950d5
MC
12813
12814 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12815 case FLASH_5717VENDOR_ST_M_M25PE20:
a1b950d5 12816 case FLASH_5717VENDOR_ST_M_M45PE20:
66ee33bf
MC
12817 /* Detect size with tg3_nvram_get_size() */
12818 break;
12819 case FLASH_5717VENDOR_ST_A_M25PE20:
a1b950d5
MC
12820 case FLASH_5717VENDOR_ST_A_M45PE20:
12821 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12822 break;
12823 default:
12824 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12825 break;
12826 }
321d32a0 12827 break;
a1b950d5 12828 default:
63c3a66f 12829 tg3_flag_set(tp, NO_NVRAM);
a1b950d5 12830 return;
321d32a0 12831 }
a1b950d5
MC
12832
12833 tg3_nvram_get_pagesize(tp, nvcfg1);
12834 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
63c3a66f 12835 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
321d32a0
MC
12836}
12837
9b91b5f1
MC
12838static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp)
12839{
12840 u32 nvcfg1, nvmpinstrp;
12841
12842 nvcfg1 = tr32(NVRAM_CFG1);
12843 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
12844
12845 switch (nvmpinstrp) {
12846 case FLASH_5720_EEPROM_HD:
12847 case FLASH_5720_EEPROM_LD:
12848 tp->nvram_jedecnum = JEDEC_ATMEL;
63c3a66f 12849 tg3_flag_set(tp, NVRAM_BUFFERED);
9b91b5f1
MC
12850
12851 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12852 tw32(NVRAM_CFG1, nvcfg1);
12853 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
12854 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12855 else
12856 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
12857 return;
12858 case FLASH_5720VENDOR_M_ATMEL_DB011D:
12859 case FLASH_5720VENDOR_A_ATMEL_DB011B:
12860 case FLASH_5720VENDOR_A_ATMEL_DB011D:
12861 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12862 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12863 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12864 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12865 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12866 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12867 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12868 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12869 case FLASH_5720VENDOR_ATMEL_45USPT:
12870 tp->nvram_jedecnum = JEDEC_ATMEL;
63c3a66f
JP
12871 tg3_flag_set(tp, NVRAM_BUFFERED);
12872 tg3_flag_set(tp, FLASH);
9b91b5f1
MC
12873
12874 switch (nvmpinstrp) {
12875 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12876 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12877 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12878 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12879 break;
12880 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12881 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12882 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12883 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12884 break;
12885 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12886 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12887 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12888 break;
12889 default:
12890 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12891 break;
12892 }
12893 break;
12894 case FLASH_5720VENDOR_M_ST_M25PE10:
12895 case FLASH_5720VENDOR_M_ST_M45PE10:
12896 case FLASH_5720VENDOR_A_ST_M25PE10:
12897 case FLASH_5720VENDOR_A_ST_M45PE10:
12898 case FLASH_5720VENDOR_M_ST_M25PE20:
12899 case FLASH_5720VENDOR_M_ST_M45PE20:
12900 case FLASH_5720VENDOR_A_ST_M25PE20:
12901 case FLASH_5720VENDOR_A_ST_M45PE20:
12902 case FLASH_5720VENDOR_M_ST_M25PE40:
12903 case FLASH_5720VENDOR_M_ST_M45PE40:
12904 case FLASH_5720VENDOR_A_ST_M25PE40:
12905 case FLASH_5720VENDOR_A_ST_M45PE40:
12906 case FLASH_5720VENDOR_M_ST_M25PE80:
12907 case FLASH_5720VENDOR_M_ST_M45PE80:
12908 case FLASH_5720VENDOR_A_ST_M25PE80:
12909 case FLASH_5720VENDOR_A_ST_M45PE80:
12910 case FLASH_5720VENDOR_ST_25USPT:
12911 case FLASH_5720VENDOR_ST_45USPT:
12912 tp->nvram_jedecnum = JEDEC_ST;
63c3a66f
JP
12913 tg3_flag_set(tp, NVRAM_BUFFERED);
12914 tg3_flag_set(tp, FLASH);
9b91b5f1
MC
12915
12916 switch (nvmpinstrp) {
12917 case FLASH_5720VENDOR_M_ST_M25PE20:
12918 case FLASH_5720VENDOR_M_ST_M45PE20:
12919 case FLASH_5720VENDOR_A_ST_M25PE20:
12920 case FLASH_5720VENDOR_A_ST_M45PE20:
12921 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12922 break;
12923 case FLASH_5720VENDOR_M_ST_M25PE40:
12924 case FLASH_5720VENDOR_M_ST_M45PE40:
12925 case FLASH_5720VENDOR_A_ST_M25PE40:
12926 case FLASH_5720VENDOR_A_ST_M45PE40:
12927 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12928 break;
12929 case FLASH_5720VENDOR_M_ST_M25PE80:
12930 case FLASH_5720VENDOR_M_ST_M45PE80:
12931 case FLASH_5720VENDOR_A_ST_M25PE80:
12932 case FLASH_5720VENDOR_A_ST_M45PE80:
12933 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12934 break;
12935 default:
12936 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12937 break;
12938 }
12939 break;
12940 default:
63c3a66f 12941 tg3_flag_set(tp, NO_NVRAM);
9b91b5f1
MC
12942 return;
12943 }
12944
12945 tg3_nvram_get_pagesize(tp, nvcfg1);
12946 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
63c3a66f 12947 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
9b91b5f1
MC
12948}
12949
1da177e4
LT
12950/* Chips other than 5700/5701 use the NVRAM for fetching info. */
12951static void __devinit tg3_nvram_init(struct tg3 *tp)
12952{
1da177e4
LT
12953 tw32_f(GRC_EEPROM_ADDR,
12954 (EEPROM_ADDR_FSM_RESET |
12955 (EEPROM_DEFAULT_CLOCK_PERIOD <<
12956 EEPROM_ADDR_CLKPERD_SHIFT)));
12957
9d57f01c 12958 msleep(1);
1da177e4
LT
12959
12960 /* Enable seeprom accesses. */
12961 tw32_f(GRC_LOCAL_CTRL,
12962 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
12963 udelay(100);
12964
12965 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12966 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
63c3a66f 12967 tg3_flag_set(tp, NVRAM);
1da177e4 12968
ec41c7df 12969 if (tg3_nvram_lock(tp)) {
5129c3a3
MC
12970 netdev_warn(tp->dev,
12971 "Cannot get nvram lock, %s failed\n",
05dbe005 12972 __func__);
ec41c7df
MC
12973 return;
12974 }
e6af301b 12975 tg3_enable_nvram_access(tp);
1da177e4 12976
989a9d23
MC
12977 tp->nvram_size = 0;
12978
361b4ac2
MC
12979 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
12980 tg3_get_5752_nvram_info(tp);
d3c7b886
MC
12981 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12982 tg3_get_5755_nvram_info(tp);
d30cdd28 12983 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
57e6983c
MC
12984 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12985 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1b27777a 12986 tg3_get_5787_nvram_info(tp);
6b91fa02
MC
12987 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12988 tg3_get_5761_nvram_info(tp);
b5d3772c
MC
12989 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12990 tg3_get_5906_nvram_info(tp);
b703df6f 12991 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
55086ad9 12992 tg3_flag(tp, 57765_CLASS))
321d32a0 12993 tg3_get_57780_nvram_info(tp);
9b91b5f1
MC
12994 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
12995 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
a1b950d5 12996 tg3_get_5717_nvram_info(tp);
9b91b5f1
MC
12997 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
12998 tg3_get_5720_nvram_info(tp);
361b4ac2
MC
12999 else
13000 tg3_get_nvram_info(tp);
13001
989a9d23
MC
13002 if (tp->nvram_size == 0)
13003 tg3_get_nvram_size(tp);
1da177e4 13004
e6af301b 13005 tg3_disable_nvram_access(tp);
381291b7 13006 tg3_nvram_unlock(tp);
1da177e4
LT
13007
13008 } else {
63c3a66f
JP
13009 tg3_flag_clear(tp, NVRAM);
13010 tg3_flag_clear(tp, NVRAM_BUFFERED);
1da177e4
LT
13011
13012 tg3_get_eeprom_size(tp);
13013 }
13014}
13015
1da177e4
LT
13016struct subsys_tbl_ent {
13017 u16 subsys_vendor, subsys_devid;
13018 u32 phy_id;
13019};
13020
24daf2b0 13021static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
1da177e4 13022 /* Broadcom boards. */
24daf2b0 13023 { TG3PCI_SUBVENDOR_ID_BROADCOM,
79eb6904 13024 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
24daf2b0 13025 { TG3PCI_SUBVENDOR_ID_BROADCOM,
79eb6904 13026 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
24daf2b0 13027 { TG3PCI_SUBVENDOR_ID_BROADCOM,
79eb6904 13028 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
24daf2b0
MC
13029 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13030 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
13031 { TG3PCI_SUBVENDOR_ID_BROADCOM,
79eb6904 13032 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
24daf2b0 13033 { TG3PCI_SUBVENDOR_ID_BROADCOM,
79eb6904 13034 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
24daf2b0
MC
13035 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13036 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
13037 { TG3PCI_SUBVENDOR_ID_BROADCOM,
79eb6904 13038 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
24daf2b0 13039 { TG3PCI_SUBVENDOR_ID_BROADCOM,
79eb6904 13040 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
24daf2b0 13041 { TG3PCI_SUBVENDOR_ID_BROADCOM,
79eb6904 13042 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
24daf2b0 13043 { TG3PCI_SUBVENDOR_ID_BROADCOM,
79eb6904 13044 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
1da177e4
LT
13045
13046 /* 3com boards. */
24daf2b0 13047 { TG3PCI_SUBVENDOR_ID_3COM,
79eb6904 13048 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
24daf2b0 13049 { TG3PCI_SUBVENDOR_ID_3COM,
79eb6904 13050 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
24daf2b0
MC
13051 { TG3PCI_SUBVENDOR_ID_3COM,
13052 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
13053 { TG3PCI_SUBVENDOR_ID_3COM,
79eb6904 13054 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
24daf2b0 13055 { TG3PCI_SUBVENDOR_ID_3COM,
79eb6904 13056 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
1da177e4
LT
13057
13058 /* DELL boards. */
24daf2b0 13059 { TG3PCI_SUBVENDOR_ID_DELL,
79eb6904 13060 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
24daf2b0 13061 { TG3PCI_SUBVENDOR_ID_DELL,
79eb6904 13062 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
24daf2b0 13063 { TG3PCI_SUBVENDOR_ID_DELL,
79eb6904 13064 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
24daf2b0 13065 { TG3PCI_SUBVENDOR_ID_DELL,
79eb6904 13066 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
1da177e4
LT
13067
13068 /* Compaq boards. */
24daf2b0 13069 { TG3PCI_SUBVENDOR_ID_COMPAQ,
79eb6904 13070 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
24daf2b0 13071 { TG3PCI_SUBVENDOR_ID_COMPAQ,
79eb6904 13072 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
24daf2b0
MC
13073 { TG3PCI_SUBVENDOR_ID_COMPAQ,
13074 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
13075 { TG3PCI_SUBVENDOR_ID_COMPAQ,
79eb6904 13076 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
24daf2b0 13077 { TG3PCI_SUBVENDOR_ID_COMPAQ,
79eb6904 13078 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
1da177e4
LT
13079
13080 /* IBM boards. */
24daf2b0
MC
13081 { TG3PCI_SUBVENDOR_ID_IBM,
13082 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
1da177e4
LT
13083};
13084
24daf2b0 13085static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
1da177e4
LT
13086{
13087 int i;
13088
13089 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
13090 if ((subsys_id_to_phy_id[i].subsys_vendor ==
13091 tp->pdev->subsystem_vendor) &&
13092 (subsys_id_to_phy_id[i].subsys_devid ==
13093 tp->pdev->subsystem_device))
13094 return &subsys_id_to_phy_id[i];
13095 }
13096 return NULL;
13097}
13098
7d0c41ef 13099static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
1da177e4 13100{
1da177e4 13101 u32 val;
f49639e6 13102
79eb6904 13103 tp->phy_id = TG3_PHY_ID_INVALID;
7d0c41ef
MC
13104 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13105
a85feb8c 13106 /* Assume an onboard device and WOL capable by default. */
63c3a66f
JP
13107 tg3_flag_set(tp, EEPROM_WRITE_PROT);
13108 tg3_flag_set(tp, WOL_CAP);
72b845e0 13109
b5d3772c 13110 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
9d26e213 13111 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
63c3a66f
JP
13112 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13113 tg3_flag_set(tp, IS_NIC);
9d26e213 13114 }
0527ba35
MC
13115 val = tr32(VCPU_CFGSHDW);
13116 if (val & VCPU_CFGSHDW_ASPM_DBNC)
63c3a66f 13117 tg3_flag_set(tp, ASPM_WORKAROUND);
0527ba35 13118 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
6fdbab9d 13119 (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
63c3a66f 13120 tg3_flag_set(tp, WOL_ENABLE);
6fdbab9d
RW
13121 device_set_wakeup_enable(&tp->pdev->dev, true);
13122 }
05ac4cb7 13123 goto done;
b5d3772c
MC
13124 }
13125
1da177e4
LT
13126 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
13127 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
13128 u32 nic_cfg, led_cfg;
a9daf367 13129 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
7d0c41ef 13130 int eeprom_phy_serdes = 0;
1da177e4
LT
13131
13132 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
13133 tp->nic_sram_data_cfg = nic_cfg;
13134
13135 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
13136 ver >>= NIC_SRAM_DATA_VER_SHIFT;
6ff6f81d
MC
13137 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13138 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13139 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
1da177e4
LT
13140 (ver > 0) && (ver < 0x100))
13141 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
13142
a9daf367
MC
13143 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13144 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
13145
1da177e4
LT
13146 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
13147 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
13148 eeprom_phy_serdes = 1;
13149
13150 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
13151 if (nic_phy_id != 0) {
13152 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
13153 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
13154
13155 eeprom_phy_id = (id1 >> 16) << 10;
13156 eeprom_phy_id |= (id2 & 0xfc00) << 16;
13157 eeprom_phy_id |= (id2 & 0x03ff) << 0;
13158 } else
13159 eeprom_phy_id = 0;
13160
7d0c41ef 13161 tp->phy_id = eeprom_phy_id;
747e8f8b 13162 if (eeprom_phy_serdes) {
63c3a66f 13163 if (!tg3_flag(tp, 5705_PLUS))
f07e9af3 13164 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
a50d0796 13165 else
f07e9af3 13166 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
747e8f8b 13167 }
7d0c41ef 13168
63c3a66f 13169 if (tg3_flag(tp, 5750_PLUS))
1da177e4
LT
13170 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
13171 SHASTA_EXT_LED_MODE_MASK);
cbf46853 13172 else
1da177e4
LT
13173 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
13174
13175 switch (led_cfg) {
13176 default:
13177 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
13178 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13179 break;
13180
13181 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
13182 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13183 break;
13184
13185 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
13186 tp->led_ctrl = LED_CTRL_MODE_MAC;
9ba27794
MC
13187
13188 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
13189 * read on some older 5700/5701 bootcode.
13190 */
13191 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
13192 ASIC_REV_5700 ||
13193 GET_ASIC_REV(tp->pci_chip_rev_id) ==
13194 ASIC_REV_5701)
13195 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13196
1da177e4
LT
13197 break;
13198
13199 case SHASTA_EXT_LED_SHARED:
13200 tp->led_ctrl = LED_CTRL_MODE_SHARED;
13201 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
13202 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
13203 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13204 LED_CTRL_MODE_PHY_2);
13205 break;
13206
13207 case SHASTA_EXT_LED_MAC:
13208 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
13209 break;
13210
13211 case SHASTA_EXT_LED_COMBO:
13212 tp->led_ctrl = LED_CTRL_MODE_COMBO;
13213 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
13214 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13215 LED_CTRL_MODE_PHY_2);
13216 break;
13217
855e1111 13218 }
1da177e4
LT
13219
13220 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13221 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
13222 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
13223 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13224
b2a5c19c
MC
13225 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
13226 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
5f60891b 13227
9d26e213 13228 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
63c3a66f 13229 tg3_flag_set(tp, EEPROM_WRITE_PROT);
9d26e213
MC
13230 if ((tp->pdev->subsystem_vendor ==
13231 PCI_VENDOR_ID_ARIMA) &&
13232 (tp->pdev->subsystem_device == 0x205a ||
13233 tp->pdev->subsystem_device == 0x2063))
63c3a66f 13234 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
9d26e213 13235 } else {
63c3a66f
JP
13236 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13237 tg3_flag_set(tp, IS_NIC);
9d26e213 13238 }
1da177e4
LT
13239
13240 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
63c3a66f
JP
13241 tg3_flag_set(tp, ENABLE_ASF);
13242 if (tg3_flag(tp, 5750_PLUS))
13243 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
1da177e4 13244 }
b2b98d4a
MC
13245
13246 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
63c3a66f
JP
13247 tg3_flag(tp, 5750_PLUS))
13248 tg3_flag_set(tp, ENABLE_APE);
b2b98d4a 13249
f07e9af3 13250 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
a85feb8c 13251 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
63c3a66f 13252 tg3_flag_clear(tp, WOL_CAP);
1da177e4 13253
63c3a66f 13254 if (tg3_flag(tp, WOL_CAP) &&
6fdbab9d 13255 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
63c3a66f 13256 tg3_flag_set(tp, WOL_ENABLE);
6fdbab9d
RW
13257 device_set_wakeup_enable(&tp->pdev->dev, true);
13258 }
0527ba35 13259
1da177e4 13260 if (cfg2 & (1 << 17))
f07e9af3 13261 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
1da177e4
LT
13262
13263 /* serdes signal pre-emphasis in register 0x590 set by */
13264 /* bootcode if bit 18 is set */
13265 if (cfg2 & (1 << 18))
f07e9af3 13266 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
8ed5d97e 13267
63c3a66f
JP
13268 if ((tg3_flag(tp, 57765_PLUS) ||
13269 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13270 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
6833c043 13271 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
f07e9af3 13272 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
6833c043 13273
63c3a66f 13274 if (tg3_flag(tp, PCI_EXPRESS) &&
8c69b1e7 13275 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
63c3a66f 13276 !tg3_flag(tp, 57765_PLUS)) {
8ed5d97e
MC
13277 u32 cfg3;
13278
13279 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
13280 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
63c3a66f 13281 tg3_flag_set(tp, ASPM_WORKAROUND);
8ed5d97e 13282 }
a9daf367 13283
14417063 13284 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
63c3a66f 13285 tg3_flag_set(tp, RGMII_INBAND_DISABLE);
a9daf367 13286 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
63c3a66f 13287 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
a9daf367 13288 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
63c3a66f 13289 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
1da177e4 13290 }
05ac4cb7 13291done:
63c3a66f 13292 if (tg3_flag(tp, WOL_CAP))
43067ed8 13293 device_set_wakeup_enable(&tp->pdev->dev,
63c3a66f 13294 tg3_flag(tp, WOL_ENABLE));
43067ed8
RW
13295 else
13296 device_set_wakeup_capable(&tp->pdev->dev, false);
7d0c41ef
MC
13297}
13298
b2a5c19c
MC
13299static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
13300{
13301 int i;
13302 u32 val;
13303
13304 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
13305 tw32(OTP_CTRL, cmd);
13306
13307 /* Wait for up to 1 ms for command to execute. */
13308 for (i = 0; i < 100; i++) {
13309 val = tr32(OTP_STATUS);
13310 if (val & OTP_STATUS_CMD_DONE)
13311 break;
13312 udelay(10);
13313 }
13314
13315 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
13316}
13317
13318/* Read the gphy configuration from the OTP region of the chip. The gphy
13319 * configuration is a 32-bit value that straddles the alignment boundary.
13320 * We do two 32-bit reads and then shift and merge the results.
13321 */
13322static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
13323{
13324 u32 bhalf_otp, thalf_otp;
13325
13326 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
13327
13328 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
13329 return 0;
13330
13331 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
13332
13333 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13334 return 0;
13335
13336 thalf_otp = tr32(OTP_READ_DATA);
13337
13338 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
13339
13340 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13341 return 0;
13342
13343 bhalf_otp = tr32(OTP_READ_DATA);
13344
13345 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
13346}
13347
e256f8a3
MC
13348static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
13349{
202ff1c2 13350 u32 adv = ADVERTISED_Autoneg;
e256f8a3
MC
13351
13352 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
13353 adv |= ADVERTISED_1000baseT_Half |
13354 ADVERTISED_1000baseT_Full;
13355
13356 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
13357 adv |= ADVERTISED_100baseT_Half |
13358 ADVERTISED_100baseT_Full |
13359 ADVERTISED_10baseT_Half |
13360 ADVERTISED_10baseT_Full |
13361 ADVERTISED_TP;
13362 else
13363 adv |= ADVERTISED_FIBRE;
13364
13365 tp->link_config.advertising = adv;
13366 tp->link_config.speed = SPEED_INVALID;
13367 tp->link_config.duplex = DUPLEX_INVALID;
13368 tp->link_config.autoneg = AUTONEG_ENABLE;
13369 tp->link_config.active_speed = SPEED_INVALID;
13370 tp->link_config.active_duplex = DUPLEX_INVALID;
13371 tp->link_config.orig_speed = SPEED_INVALID;
13372 tp->link_config.orig_duplex = DUPLEX_INVALID;
13373 tp->link_config.orig_autoneg = AUTONEG_INVALID;
13374}
13375
7d0c41ef
MC
13376static int __devinit tg3_phy_probe(struct tg3 *tp)
13377{
13378 u32 hw_phy_id_1, hw_phy_id_2;
13379 u32 hw_phy_id, hw_phy_id_masked;
13380 int err;
1da177e4 13381
e256f8a3 13382 /* flow control autonegotiation is default behavior */
63c3a66f 13383 tg3_flag_set(tp, PAUSE_AUTONEG);
e256f8a3
MC
13384 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
13385
63c3a66f 13386 if (tg3_flag(tp, USE_PHYLIB))
b02fd9e3
MC
13387 return tg3_phy_init(tp);
13388
1da177e4 13389 /* Reading the PHY ID register can conflict with ASF
877d0310 13390 * firmware access to the PHY hardware.
1da177e4
LT
13391 */
13392 err = 0;
63c3a66f 13393 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
79eb6904 13394 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
1da177e4
LT
13395 } else {
13396 /* Now read the physical PHY_ID from the chip and verify
13397 * that it is sane. If it doesn't look good, we fall back
13398 * to either the hard-coded table based PHY_ID and failing
13399 * that the value found in the eeprom area.
13400 */
13401 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
13402 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
13403
13404 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
13405 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
13406 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
13407
79eb6904 13408 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
1da177e4
LT
13409 }
13410
79eb6904 13411 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
1da177e4 13412 tp->phy_id = hw_phy_id;
79eb6904 13413 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
f07e9af3 13414 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
da6b2d01 13415 else
f07e9af3 13416 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
1da177e4 13417 } else {
79eb6904 13418 if (tp->phy_id != TG3_PHY_ID_INVALID) {
7d0c41ef
MC
13419 /* Do nothing, phy ID already set up in
13420 * tg3_get_eeprom_hw_cfg().
13421 */
1da177e4
LT
13422 } else {
13423 struct subsys_tbl_ent *p;
13424
13425 /* No eeprom signature? Try the hardcoded
13426 * subsys device table.
13427 */
24daf2b0 13428 p = tg3_lookup_by_subsys(tp);
1da177e4
LT
13429 if (!p)
13430 return -ENODEV;
13431
13432 tp->phy_id = p->phy_id;
13433 if (!tp->phy_id ||
79eb6904 13434 tp->phy_id == TG3_PHY_ID_BCM8002)
f07e9af3 13435 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
1da177e4
LT
13436 }
13437 }
13438
a6b68dab 13439 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
5baa5e9a
MC
13440 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13441 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
13442 (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
a6b68dab
MC
13443 tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
13444 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
13445 tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
52b02d04
MC
13446 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
13447
e256f8a3
MC
13448 tg3_phy_init_link_config(tp);
13449
f07e9af3 13450 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
63c3a66f
JP
13451 !tg3_flag(tp, ENABLE_APE) &&
13452 !tg3_flag(tp, ENABLE_ASF)) {
e2bf73e7 13453 u32 bmsr, dummy;
1da177e4
LT
13454
13455 tg3_readphy(tp, MII_BMSR, &bmsr);
13456 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
13457 (bmsr & BMSR_LSTATUS))
13458 goto skip_phy_reset;
6aa20a22 13459
1da177e4
LT
13460 err = tg3_phy_reset(tp);
13461 if (err)
13462 return err;
13463
42b64a45 13464 tg3_phy_set_wirespeed(tp);
1da177e4 13465
e2bf73e7 13466 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
42b64a45
MC
13467 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
13468 tp->link_config.flowctrl);
1da177e4
LT
13469
13470 tg3_writephy(tp, MII_BMCR,
13471 BMCR_ANENABLE | BMCR_ANRESTART);
13472 }
1da177e4
LT
13473 }
13474
13475skip_phy_reset:
79eb6904 13476 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
1da177e4
LT
13477 err = tg3_init_5401phy_dsp(tp);
13478 if (err)
13479 return err;
1da177e4 13480
1da177e4
LT
13481 err = tg3_init_5401phy_dsp(tp);
13482 }
13483
1da177e4
LT
13484 return err;
13485}
13486
184b8904 13487static void __devinit tg3_read_vpd(struct tg3 *tp)
1da177e4 13488{
a4a8bb15 13489 u8 *vpd_data;
4181b2c8 13490 unsigned int block_end, rosize, len;
535a490e 13491 u32 vpdlen;
184b8904 13492 int j, i = 0;
a4a8bb15 13493
535a490e 13494 vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
a4a8bb15
MC
13495 if (!vpd_data)
13496 goto out_no_vpd;
1da177e4 13497
535a490e 13498 i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
4181b2c8
MC
13499 if (i < 0)
13500 goto out_not_found;
1da177e4 13501
4181b2c8
MC
13502 rosize = pci_vpd_lrdt_size(&vpd_data[i]);
13503 block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
13504 i += PCI_VPD_LRDT_TAG_SIZE;
1da177e4 13505
535a490e 13506 if (block_end > vpdlen)
4181b2c8 13507 goto out_not_found;
af2c6a4a 13508
184b8904
MC
13509 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13510 PCI_VPD_RO_KEYWORD_MFR_ID);
13511 if (j > 0) {
13512 len = pci_vpd_info_field_size(&vpd_data[j]);
13513
13514 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13515 if (j + len > block_end || len != 4 ||
13516 memcmp(&vpd_data[j], "1028", 4))
13517 goto partno;
13518
13519 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13520 PCI_VPD_RO_KEYWORD_VENDOR0);
13521 if (j < 0)
13522 goto partno;
13523
13524 len = pci_vpd_info_field_size(&vpd_data[j]);
13525
13526 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13527 if (j + len > block_end)
13528 goto partno;
13529
13530 memcpy(tp->fw_ver, &vpd_data[j], len);
535a490e 13531 strncat(tp->fw_ver, " bc ", vpdlen - len - 1);
184b8904
MC
13532 }
13533
13534partno:
4181b2c8
MC
13535 i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13536 PCI_VPD_RO_KEYWORD_PARTNO);
13537 if (i < 0)
13538 goto out_not_found;
af2c6a4a 13539
4181b2c8 13540 len = pci_vpd_info_field_size(&vpd_data[i]);
1da177e4 13541
4181b2c8
MC
13542 i += PCI_VPD_INFO_FLD_HDR_SIZE;
13543 if (len > TG3_BPN_SIZE ||
535a490e 13544 (len + i) > vpdlen)
4181b2c8 13545 goto out_not_found;
1da177e4 13546
4181b2c8 13547 memcpy(tp->board_part_number, &vpd_data[i], len);
1da177e4 13548
1da177e4 13549out_not_found:
a4a8bb15 13550 kfree(vpd_data);
37a949c5 13551 if (tp->board_part_number[0])
a4a8bb15
MC
13552 return;
13553
13554out_no_vpd:
37a949c5
MC
13555 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
13556 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717)
13557 strcpy(tp->board_part_number, "BCM5717");
13558 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
13559 strcpy(tp->board_part_number, "BCM5718");
13560 else
13561 goto nomatch;
13562 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
13563 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
13564 strcpy(tp->board_part_number, "BCM57780");
13565 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
13566 strcpy(tp->board_part_number, "BCM57760");
13567 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
13568 strcpy(tp->board_part_number, "BCM57790");
13569 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
13570 strcpy(tp->board_part_number, "BCM57788");
13571 else
13572 goto nomatch;
13573 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
13574 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
13575 strcpy(tp->board_part_number, "BCM57761");
13576 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
13577 strcpy(tp->board_part_number, "BCM57765");
13578 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
13579 strcpy(tp->board_part_number, "BCM57781");
13580 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
13581 strcpy(tp->board_part_number, "BCM57785");
13582 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
13583 strcpy(tp->board_part_number, "BCM57791");
13584 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13585 strcpy(tp->board_part_number, "BCM57795");
13586 else
13587 goto nomatch;
55086ad9
MC
13588 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766) {
13589 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
13590 strcpy(tp->board_part_number, "BCM57762");
13591 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
13592 strcpy(tp->board_part_number, "BCM57766");
13593 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
13594 strcpy(tp->board_part_number, "BCM57782");
13595 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
13596 strcpy(tp->board_part_number, "BCM57786");
13597 else
13598 goto nomatch;
37a949c5 13599 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
b5d3772c 13600 strcpy(tp->board_part_number, "BCM95906");
37a949c5
MC
13601 } else {
13602nomatch:
b5d3772c 13603 strcpy(tp->board_part_number, "none");
37a949c5 13604 }
1da177e4
LT
13605}
13606
9c8a620e
MC
13607static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
13608{
13609 u32 val;
13610
e4f34110 13611 if (tg3_nvram_read(tp, offset, &val) ||
9c8a620e 13612 (val & 0xfc000000) != 0x0c000000 ||
e4f34110 13613 tg3_nvram_read(tp, offset + 4, &val) ||
9c8a620e
MC
13614 val != 0)
13615 return 0;
13616
13617 return 1;
13618}
13619
acd9c119
MC
13620static void __devinit tg3_read_bc_ver(struct tg3 *tp)
13621{
ff3a7cb2 13622 u32 val, offset, start, ver_offset;
75f9936e 13623 int i, dst_off;
ff3a7cb2 13624 bool newver = false;
acd9c119
MC
13625
13626 if (tg3_nvram_read(tp, 0xc, &offset) ||
13627 tg3_nvram_read(tp, 0x4, &start))
13628 return;
13629
13630 offset = tg3_nvram_logical_addr(tp, offset);
13631
ff3a7cb2 13632 if (tg3_nvram_read(tp, offset, &val))
acd9c119
MC
13633 return;
13634
ff3a7cb2
MC
13635 if ((val & 0xfc000000) == 0x0c000000) {
13636 if (tg3_nvram_read(tp, offset + 4, &val))
acd9c119
MC
13637 return;
13638
ff3a7cb2
MC
13639 if (val == 0)
13640 newver = true;
13641 }
13642
75f9936e
MC
13643 dst_off = strlen(tp->fw_ver);
13644
ff3a7cb2 13645 if (newver) {
75f9936e
MC
13646 if (TG3_VER_SIZE - dst_off < 16 ||
13647 tg3_nvram_read(tp, offset + 8, &ver_offset))
ff3a7cb2
MC
13648 return;
13649
13650 offset = offset + ver_offset - start;
13651 for (i = 0; i < 16; i += 4) {
13652 __be32 v;
13653 if (tg3_nvram_read_be32(tp, offset + i, &v))
13654 return;
13655
75f9936e 13656 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
ff3a7cb2
MC
13657 }
13658 } else {
13659 u32 major, minor;
13660
13661 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
13662 return;
13663
13664 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
13665 TG3_NVM_BCVER_MAJSFT;
13666 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
75f9936e
MC
13667 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
13668 "v%d.%02d", major, minor);
acd9c119
MC
13669 }
13670}
13671
a6f6cb1c
MC
13672static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
13673{
13674 u32 val, major, minor;
13675
13676 /* Use native endian representation */
13677 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
13678 return;
13679
13680 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
13681 TG3_NVM_HWSB_CFG1_MAJSFT;
13682 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
13683 TG3_NVM_HWSB_CFG1_MINSFT;
13684
13685 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
13686}
13687
dfe00d7d
MC
13688static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
13689{
13690 u32 offset, major, minor, build;
13691
75f9936e 13692 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
dfe00d7d
MC
13693
13694 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
13695 return;
13696
13697 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
13698 case TG3_EEPROM_SB_REVISION_0:
13699 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
13700 break;
13701 case TG3_EEPROM_SB_REVISION_2:
13702 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
13703 break;
13704 case TG3_EEPROM_SB_REVISION_3:
13705 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
13706 break;
a4153d40
MC
13707 case TG3_EEPROM_SB_REVISION_4:
13708 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
13709 break;
13710 case TG3_EEPROM_SB_REVISION_5:
13711 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
13712 break;
bba226ac
MC
13713 case TG3_EEPROM_SB_REVISION_6:
13714 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
13715 break;
dfe00d7d
MC
13716 default:
13717 return;
13718 }
13719
e4f34110 13720 if (tg3_nvram_read(tp, offset, &val))
dfe00d7d
MC
13721 return;
13722
13723 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
13724 TG3_EEPROM_SB_EDH_BLD_SHFT;
13725 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
13726 TG3_EEPROM_SB_EDH_MAJ_SHFT;
13727 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
13728
13729 if (minor > 99 || build > 26)
13730 return;
13731
75f9936e
MC
13732 offset = strlen(tp->fw_ver);
13733 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
13734 " v%d.%02d", major, minor);
dfe00d7d
MC
13735
13736 if (build > 0) {
75f9936e
MC
13737 offset = strlen(tp->fw_ver);
13738 if (offset < TG3_VER_SIZE - 1)
13739 tp->fw_ver[offset] = 'a' + build - 1;
dfe00d7d
MC
13740 }
13741}
13742
acd9c119 13743static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
c4e6575c
MC
13744{
13745 u32 val, offset, start;
acd9c119 13746 int i, vlen;
9c8a620e
MC
13747
13748 for (offset = TG3_NVM_DIR_START;
13749 offset < TG3_NVM_DIR_END;
13750 offset += TG3_NVM_DIRENT_SIZE) {
e4f34110 13751 if (tg3_nvram_read(tp, offset, &val))
c4e6575c
MC
13752 return;
13753
9c8a620e
MC
13754 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
13755 break;
13756 }
13757
13758 if (offset == TG3_NVM_DIR_END)
13759 return;
13760
63c3a66f 13761 if (!tg3_flag(tp, 5705_PLUS))
9c8a620e 13762 start = 0x08000000;
e4f34110 13763 else if (tg3_nvram_read(tp, offset - 4, &start))
9c8a620e
MC
13764 return;
13765
e4f34110 13766 if (tg3_nvram_read(tp, offset + 4, &offset) ||
9c8a620e 13767 !tg3_fw_img_is_valid(tp, offset) ||
e4f34110 13768 tg3_nvram_read(tp, offset + 8, &val))
9c8a620e
MC
13769 return;
13770
13771 offset += val - start;
13772
acd9c119 13773 vlen = strlen(tp->fw_ver);
9c8a620e 13774
acd9c119
MC
13775 tp->fw_ver[vlen++] = ',';
13776 tp->fw_ver[vlen++] = ' ';
9c8a620e
MC
13777
13778 for (i = 0; i < 4; i++) {
a9dc529d
MC
13779 __be32 v;
13780 if (tg3_nvram_read_be32(tp, offset, &v))
c4e6575c
MC
13781 return;
13782
b9fc7dc5 13783 offset += sizeof(v);
c4e6575c 13784
acd9c119
MC
13785 if (vlen > TG3_VER_SIZE - sizeof(v)) {
13786 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
9c8a620e 13787 break;
c4e6575c 13788 }
9c8a620e 13789
acd9c119
MC
13790 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
13791 vlen += sizeof(v);
c4e6575c 13792 }
acd9c119
MC
13793}
13794
7fd76445
MC
13795static void __devinit tg3_read_dash_ver(struct tg3 *tp)
13796{
13797 int vlen;
13798 u32 apedata;
ecc79648 13799 char *fwtype;
7fd76445 13800
63c3a66f 13801 if (!tg3_flag(tp, ENABLE_APE) || !tg3_flag(tp, ENABLE_ASF))
7fd76445
MC
13802 return;
13803
13804 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
13805 if (apedata != APE_SEG_SIG_MAGIC)
13806 return;
13807
13808 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
13809 if (!(apedata & APE_FW_STATUS_READY))
13810 return;
13811
13812 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
13813
dc6d0744 13814 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) {
63c3a66f 13815 tg3_flag_set(tp, APE_HAS_NCSI);
ecc79648 13816 fwtype = "NCSI";
dc6d0744 13817 } else {
ecc79648 13818 fwtype = "DASH";
dc6d0744 13819 }
ecc79648 13820
7fd76445
MC
13821 vlen = strlen(tp->fw_ver);
13822
ecc79648
MC
13823 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
13824 fwtype,
7fd76445
MC
13825 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
13826 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
13827 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
13828 (apedata & APE_FW_VERSION_BLDMSK));
13829}
13830
acd9c119
MC
13831static void __devinit tg3_read_fw_ver(struct tg3 *tp)
13832{
13833 u32 val;
75f9936e 13834 bool vpd_vers = false;
acd9c119 13835
75f9936e
MC
13836 if (tp->fw_ver[0] != 0)
13837 vpd_vers = true;
df259d8c 13838
63c3a66f 13839 if (tg3_flag(tp, NO_NVRAM)) {
75f9936e 13840 strcat(tp->fw_ver, "sb");
df259d8c
MC
13841 return;
13842 }
13843
acd9c119
MC
13844 if (tg3_nvram_read(tp, 0, &val))
13845 return;
13846
13847 if (val == TG3_EEPROM_MAGIC)
13848 tg3_read_bc_ver(tp);
13849 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
13850 tg3_read_sb_ver(tp, val);
a6f6cb1c
MC
13851 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
13852 tg3_read_hwsb_ver(tp);
acd9c119
MC
13853 else
13854 return;
13855
c9cab24e 13856 if (vpd_vers)
75f9936e 13857 goto done;
acd9c119 13858
c9cab24e
MC
13859 if (tg3_flag(tp, ENABLE_APE)) {
13860 if (tg3_flag(tp, ENABLE_ASF))
13861 tg3_read_dash_ver(tp);
13862 } else if (tg3_flag(tp, ENABLE_ASF)) {
13863 tg3_read_mgmtfw_ver(tp);
13864 }
9c8a620e 13865
75f9936e 13866done:
9c8a620e 13867 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
c4e6575c
MC
13868}
13869
7cb32cf2
MC
13870static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
13871{
63c3a66f 13872 if (tg3_flag(tp, LRG_PROD_RING_CAP))
de9f5230 13873 return TG3_RX_RET_MAX_SIZE_5717;
63c3a66f 13874 else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
de9f5230 13875 return TG3_RX_RET_MAX_SIZE_5700;
7cb32cf2 13876 else
de9f5230 13877 return TG3_RX_RET_MAX_SIZE_5705;
7cb32cf2
MC
13878}
13879
4143470c 13880static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
895950c2
JP
13881 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
13882 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
13883 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
13884 { },
13885};
13886
16c7fa7d
MC
13887static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
13888{
13889 struct pci_dev *peer;
13890 unsigned int func, devnr = tp->pdev->devfn & ~7;
13891
13892 for (func = 0; func < 8; func++) {
13893 peer = pci_get_slot(tp->pdev->bus, devnr | func);
13894 if (peer && peer != tp->pdev)
13895 break;
13896 pci_dev_put(peer);
13897 }
13898 /* 5704 can be configured in single-port mode, set peer to
13899 * tp->pdev in that case.
13900 */
13901 if (!peer) {
13902 peer = tp->pdev;
13903 return peer;
13904 }
13905
13906 /*
13907 * We don't need to keep the refcount elevated; there's no way
13908 * to remove one half of this device without removing the other
13909 */
13910 pci_dev_put(peer);
13911
13912 return peer;
13913}
13914
1da177e4
LT
13915static int __devinit tg3_get_invariants(struct tg3 *tp)
13916{
1da177e4 13917 u32 misc_ctrl_reg;
1da177e4
LT
13918 u32 pci_state_reg, grc_misc_cfg;
13919 u32 val;
13920 u16 pci_cmd;
5e7dfd0f 13921 int err;
1da177e4 13922
1da177e4
LT
13923 /* Force memory write invalidate off. If we leave it on,
13924 * then on 5700_BX chips we have to enable a workaround.
13925 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
13926 * to match the cacheline size. The Broadcom driver have this
13927 * workaround but turns MWI off all the times so never uses
13928 * it. This seems to suggest that the workaround is insufficient.
13929 */
13930 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13931 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
13932 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13933
16821285
MC
13934 /* Important! -- Make sure register accesses are byteswapped
13935 * correctly. Also, for those chips that require it, make
13936 * sure that indirect register accesses are enabled before
13937 * the first operation.
1da177e4
LT
13938 */
13939 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13940 &misc_ctrl_reg);
16821285
MC
13941 tp->misc_host_ctrl |= (misc_ctrl_reg &
13942 MISC_HOST_CTRL_CHIPREV);
13943 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13944 tp->misc_host_ctrl);
1da177e4
LT
13945
13946 tp->pci_chip_rev_id = (misc_ctrl_reg >>
13947 MISC_HOST_CTRL_CHIPREV_SHIFT);
795d01c5
MC
13948 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
13949 u32 prod_id_asic_rev;
13950
5001e2f6
MC
13951 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
13952 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
d78b59f5
MC
13953 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
13954 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
f6eb9b1f
MC
13955 pci_read_config_dword(tp->pdev,
13956 TG3PCI_GEN2_PRODID_ASICREV,
13957 &prod_id_asic_rev);
b703df6f
MC
13958 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
13959 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
13960 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
13961 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
13962 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
55086ad9
MC
13963 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
13964 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
13965 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
13966 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
13967 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
b703df6f
MC
13968 pci_read_config_dword(tp->pdev,
13969 TG3PCI_GEN15_PRODID_ASICREV,
13970 &prod_id_asic_rev);
f6eb9b1f
MC
13971 else
13972 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
13973 &prod_id_asic_rev);
13974
321d32a0 13975 tp->pci_chip_rev_id = prod_id_asic_rev;
795d01c5 13976 }
1da177e4 13977
ff645bec
MC
13978 /* Wrong chip ID in 5752 A0. This code can be removed later
13979 * as A0 is not in production.
13980 */
13981 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
13982 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
13983
6892914f
MC
13984 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
13985 * we need to disable memory and use config. cycles
13986 * only to access all registers. The 5702/03 chips
13987 * can mistakenly decode the special cycles from the
13988 * ICH chipsets as memory write cycles, causing corruption
13989 * of register and memory space. Only certain ICH bridges
13990 * will drive special cycles with non-zero data during the
13991 * address phase which can fall within the 5703's address
13992 * range. This is not an ICH bug as the PCI spec allows
13993 * non-zero address during special cycles. However, only
13994 * these ICH bridges are known to drive non-zero addresses
13995 * during special cycles.
13996 *
13997 * Since special cycles do not cross PCI bridges, we only
13998 * enable this workaround if the 5703 is on the secondary
13999 * bus of these ICH bridges.
14000 */
14001 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
14002 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
14003 static struct tg3_dev_id {
14004 u32 vendor;
14005 u32 device;
14006 u32 rev;
14007 } ich_chipsets[] = {
14008 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
14009 PCI_ANY_ID },
14010 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
14011 PCI_ANY_ID },
14012 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
14013 0xa },
14014 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
14015 PCI_ANY_ID },
14016 { },
14017 };
14018 struct tg3_dev_id *pci_id = &ich_chipsets[0];
14019 struct pci_dev *bridge = NULL;
14020
14021 while (pci_id->vendor != 0) {
14022 bridge = pci_get_device(pci_id->vendor, pci_id->device,
14023 bridge);
14024 if (!bridge) {
14025 pci_id++;
14026 continue;
14027 }
14028 if (pci_id->rev != PCI_ANY_ID) {
44c10138 14029 if (bridge->revision > pci_id->rev)
6892914f
MC
14030 continue;
14031 }
14032 if (bridge->subordinate &&
14033 (bridge->subordinate->number ==
14034 tp->pdev->bus->number)) {
63c3a66f 14035 tg3_flag_set(tp, ICH_WORKAROUND);
6892914f
MC
14036 pci_dev_put(bridge);
14037 break;
14038 }
14039 }
14040 }
14041
6ff6f81d 14042 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
41588ba1
MC
14043 static struct tg3_dev_id {
14044 u32 vendor;
14045 u32 device;
14046 } bridge_chipsets[] = {
14047 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
14048 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
14049 { },
14050 };
14051 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
14052 struct pci_dev *bridge = NULL;
14053
14054 while (pci_id->vendor != 0) {
14055 bridge = pci_get_device(pci_id->vendor,
14056 pci_id->device,
14057 bridge);
14058 if (!bridge) {
14059 pci_id++;
14060 continue;
14061 }
14062 if (bridge->subordinate &&
14063 (bridge->subordinate->number <=
14064 tp->pdev->bus->number) &&
14065 (bridge->subordinate->subordinate >=
14066 tp->pdev->bus->number)) {
63c3a66f 14067 tg3_flag_set(tp, 5701_DMA_BUG);
41588ba1
MC
14068 pci_dev_put(bridge);
14069 break;
14070 }
14071 }
14072 }
14073
4a29cc2e
MC
14074 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
14075 * DMA addresses > 40-bit. This bridge may have other additional
14076 * 57xx devices behind it in some 4-port NIC designs for example.
14077 * Any tg3 device found behind the bridge will also need the 40-bit
14078 * DMA workaround.
14079 */
a4e2b347
MC
14080 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
14081 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
63c3a66f
JP
14082 tg3_flag_set(tp, 5780_CLASS);
14083 tg3_flag_set(tp, 40BIT_DMA_BUG);
4cf78e4f 14084 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
859a5887 14085 } else {
4a29cc2e
MC
14086 struct pci_dev *bridge = NULL;
14087
14088 do {
14089 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
14090 PCI_DEVICE_ID_SERVERWORKS_EPB,
14091 bridge);
14092 if (bridge && bridge->subordinate &&
14093 (bridge->subordinate->number <=
14094 tp->pdev->bus->number) &&
14095 (bridge->subordinate->subordinate >=
14096 tp->pdev->bus->number)) {
63c3a66f 14097 tg3_flag_set(tp, 40BIT_DMA_BUG);
4a29cc2e
MC
14098 pci_dev_put(bridge);
14099 break;
14100 }
14101 } while (bridge);
14102 }
4cf78e4f 14103
f6eb9b1f 14104 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
3a1e19d3 14105 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
7544b097
MC
14106 tp->pdev_peer = tg3_find_peer(tp);
14107
c885e824 14108 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
d78b59f5
MC
14109 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14110 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
63c3a66f 14111 tg3_flag_set(tp, 5717_PLUS);
0a58d668
MC
14112
14113 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
55086ad9
MC
14114 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
14115 tg3_flag_set(tp, 57765_CLASS);
14116
14117 if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS))
63c3a66f 14118 tg3_flag_set(tp, 57765_PLUS);
c885e824 14119
321d32a0
MC
14120 /* Intentionally exclude ASIC_REV_5906 */
14121 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
d9ab5ad1 14122 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
d30cdd28 14123 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9936bcf6 14124 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
57e6983c 14125 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
f6eb9b1f 14126 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
63c3a66f
JP
14127 tg3_flag(tp, 57765_PLUS))
14128 tg3_flag_set(tp, 5755_PLUS);
321d32a0
MC
14129
14130 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14131 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
b5d3772c 14132 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
63c3a66f
JP
14133 tg3_flag(tp, 5755_PLUS) ||
14134 tg3_flag(tp, 5780_CLASS))
14135 tg3_flag_set(tp, 5750_PLUS);
6708e5cc 14136
6ff6f81d 14137 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
63c3a66f
JP
14138 tg3_flag(tp, 5750_PLUS))
14139 tg3_flag_set(tp, 5705_PLUS);
1b440c56 14140
507399f1 14141 /* Determine TSO capabilities */
a0512944 14142 if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0)
4d163b75 14143 ; /* Do nothing. HW bug. */
63c3a66f
JP
14144 else if (tg3_flag(tp, 57765_PLUS))
14145 tg3_flag_set(tp, HW_TSO_3);
14146 else if (tg3_flag(tp, 5755_PLUS) ||
e849cdc3 14147 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
63c3a66f
JP
14148 tg3_flag_set(tp, HW_TSO_2);
14149 else if (tg3_flag(tp, 5750_PLUS)) {
14150 tg3_flag_set(tp, HW_TSO_1);
14151 tg3_flag_set(tp, TSO_BUG);
507399f1
MC
14152 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
14153 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
63c3a66f 14154 tg3_flag_clear(tp, TSO_BUG);
507399f1
MC
14155 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14156 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14157 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
63c3a66f 14158 tg3_flag_set(tp, TSO_BUG);
507399f1
MC
14159 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
14160 tp->fw_needed = FIRMWARE_TG3TSO5;
14161 else
14162 tp->fw_needed = FIRMWARE_TG3TSO;
14163 }
14164
dabc5c67 14165 /* Selectively allow TSO based on operating conditions */
6ff6f81d
MC
14166 if (tg3_flag(tp, HW_TSO_1) ||
14167 tg3_flag(tp, HW_TSO_2) ||
14168 tg3_flag(tp, HW_TSO_3) ||
cf9ecf4b
MC
14169 tp->fw_needed) {
14170 /* For firmware TSO, assume ASF is disabled.
14171 * We'll disable TSO later if we discover ASF
14172 * is enabled in tg3_get_eeprom_hw_cfg().
14173 */
dabc5c67 14174 tg3_flag_set(tp, TSO_CAPABLE);
cf9ecf4b 14175 } else {
dabc5c67
MC
14176 tg3_flag_clear(tp, TSO_CAPABLE);
14177 tg3_flag_clear(tp, TSO_BUG);
14178 tp->fw_needed = NULL;
14179 }
14180
14181 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
14182 tp->fw_needed = FIRMWARE_TG3;
14183
507399f1
MC
14184 tp->irq_max = 1;
14185
63c3a66f
JP
14186 if (tg3_flag(tp, 5750_PLUS)) {
14187 tg3_flag_set(tp, SUPPORT_MSI);
7544b097
MC
14188 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
14189 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
14190 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
14191 tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
14192 tp->pdev_peer == tp->pdev))
63c3a66f 14193 tg3_flag_clear(tp, SUPPORT_MSI);
7544b097 14194
63c3a66f 14195 if (tg3_flag(tp, 5755_PLUS) ||
b5d3772c 14196 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
63c3a66f 14197 tg3_flag_set(tp, 1SHOT_MSI);
52c0fd83 14198 }
4f125f42 14199
63c3a66f
JP
14200 if (tg3_flag(tp, 57765_PLUS)) {
14201 tg3_flag_set(tp, SUPPORT_MSIX);
507399f1 14202 tp->irq_max = TG3_IRQ_MAX_VECS;
90415477 14203 tg3_rss_init_dflt_indir_tbl(tp);
507399f1 14204 }
f6eb9b1f 14205 }
0e1406dd 14206
2ffcc981 14207 if (tg3_flag(tp, 5755_PLUS))
63c3a66f 14208 tg3_flag_set(tp, SHORT_DMA_BUG);
f6eb9b1f 14209
e31aa987 14210 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
a4cb428d 14211 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
e31aa987 14212
fa6b2aae
MC
14213 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14214 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14215 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
63c3a66f 14216 tg3_flag_set(tp, LRG_PROD_RING_CAP);
de9f5230 14217
63c3a66f 14218 if (tg3_flag(tp, 57765_PLUS) &&
a0512944 14219 tp->pci_chip_rev_id != CHIPREV_ID_5719_A0)
63c3a66f 14220 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
b703df6f 14221
63c3a66f
JP
14222 if (!tg3_flag(tp, 5705_PLUS) ||
14223 tg3_flag(tp, 5780_CLASS) ||
14224 tg3_flag(tp, USE_JUMBO_BDFLAG))
14225 tg3_flag_set(tp, JUMBO_CAPABLE);
0f893dc6 14226
52f4490c
MC
14227 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14228 &pci_state_reg);
14229
708ebb3a 14230 if (pci_is_pcie(tp->pdev)) {
5e7dfd0f
MC
14231 u16 lnkctl;
14232
63c3a66f 14233 tg3_flag_set(tp, PCI_EXPRESS);
5f5c51e3 14234
2c55a3d0
MC
14235 if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0) {
14236 int readrq = pcie_get_readrq(tp->pdev);
14237 if (readrq > 2048)
14238 pcie_set_readrq(tp->pdev, 2048);
14239 }
5f5c51e3 14240
5e7dfd0f 14241 pci_read_config_word(tp->pdev,
708ebb3a 14242 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
5e7dfd0f
MC
14243 &lnkctl);
14244 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
7196cd6c
MC
14245 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
14246 ASIC_REV_5906) {
63c3a66f 14247 tg3_flag_clear(tp, HW_TSO_2);
dabc5c67 14248 tg3_flag_clear(tp, TSO_CAPABLE);
7196cd6c 14249 }
5e7dfd0f 14250 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
321d32a0 14251 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9cf74ebb
MC
14252 tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
14253 tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
63c3a66f 14254 tg3_flag_set(tp, CLKREQ_BUG);
614b0590 14255 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
63c3a66f 14256 tg3_flag_set(tp, L1PLLPD_EN);
c7835a77 14257 }
52f4490c 14258 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
708ebb3a
JM
14259 /* BCM5785 devices are effectively PCIe devices, and should
14260 * follow PCIe codepaths, but do not have a PCIe capabilities
14261 * section.
93a700a9 14262 */
63c3a66f
JP
14263 tg3_flag_set(tp, PCI_EXPRESS);
14264 } else if (!tg3_flag(tp, 5705_PLUS) ||
14265 tg3_flag(tp, 5780_CLASS)) {
52f4490c
MC
14266 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
14267 if (!tp->pcix_cap) {
2445e461
MC
14268 dev_err(&tp->pdev->dev,
14269 "Cannot find PCI-X capability, aborting\n");
52f4490c
MC
14270 return -EIO;
14271 }
14272
14273 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
63c3a66f 14274 tg3_flag_set(tp, PCIX_MODE);
52f4490c 14275 }
1da177e4 14276
399de50b
MC
14277 /* If we have an AMD 762 or VIA K8T800 chipset, write
14278 * reordering to the mailbox registers done by the host
14279 * controller can cause major troubles. We read back from
14280 * every mailbox register write to force the writes to be
14281 * posted to the chip in order.
14282 */
4143470c 14283 if (pci_dev_present(tg3_write_reorder_chipsets) &&
63c3a66f
JP
14284 !tg3_flag(tp, PCI_EXPRESS))
14285 tg3_flag_set(tp, MBOX_WRITE_REORDER);
399de50b 14286
69fc4053
MC
14287 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
14288 &tp->pci_cacheline_sz);
14289 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14290 &tp->pci_lat_timer);
1da177e4
LT
14291 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14292 tp->pci_lat_timer < 64) {
14293 tp->pci_lat_timer = 64;
69fc4053
MC
14294 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14295 tp->pci_lat_timer);
1da177e4
LT
14296 }
14297
16821285
MC
14298 /* Important! -- It is critical that the PCI-X hw workaround
14299 * situation is decided before the first MMIO register access.
14300 */
52f4490c
MC
14301 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
14302 /* 5700 BX chips need to have their TX producer index
14303 * mailboxes written twice to workaround a bug.
14304 */
63c3a66f 14305 tg3_flag_set(tp, TXD_MBOX_HWBUG);
1da177e4 14306
52f4490c 14307 /* If we are in PCI-X mode, enable register write workaround.
1da177e4
LT
14308 *
14309 * The workaround is to use indirect register accesses
14310 * for all chip writes not to mailbox registers.
14311 */
63c3a66f 14312 if (tg3_flag(tp, PCIX_MODE)) {
1da177e4 14313 u32 pm_reg;
1da177e4 14314
63c3a66f 14315 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
1da177e4
LT
14316
14317 /* The chip can have it's power management PCI config
14318 * space registers clobbered due to this bug.
14319 * So explicitly force the chip into D0 here.
14320 */
9974a356
MC
14321 pci_read_config_dword(tp->pdev,
14322 tp->pm_cap + PCI_PM_CTRL,
1da177e4
LT
14323 &pm_reg);
14324 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
14325 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
9974a356
MC
14326 pci_write_config_dword(tp->pdev,
14327 tp->pm_cap + PCI_PM_CTRL,
1da177e4
LT
14328 pm_reg);
14329
14330 /* Also, force SERR#/PERR# in PCI command. */
14331 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14332 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
14333 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14334 }
14335 }
14336
1da177e4 14337 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
63c3a66f 14338 tg3_flag_set(tp, PCI_HIGH_SPEED);
1da177e4 14339 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
63c3a66f 14340 tg3_flag_set(tp, PCI_32BIT);
1da177e4
LT
14341
14342 /* Chip-specific fixup from Broadcom driver */
14343 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
14344 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
14345 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
14346 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
14347 }
14348
1ee582d8 14349 /* Default fast path register access methods */
20094930 14350 tp->read32 = tg3_read32;
1ee582d8 14351 tp->write32 = tg3_write32;
09ee929c 14352 tp->read32_mbox = tg3_read32;
20094930 14353 tp->write32_mbox = tg3_write32;
1ee582d8
MC
14354 tp->write32_tx_mbox = tg3_write32;
14355 tp->write32_rx_mbox = tg3_write32;
14356
14357 /* Various workaround register access methods */
63c3a66f 14358 if (tg3_flag(tp, PCIX_TARGET_HWBUG))
1ee582d8 14359 tp->write32 = tg3_write_indirect_reg32;
98efd8a6 14360 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
63c3a66f 14361 (tg3_flag(tp, PCI_EXPRESS) &&
98efd8a6
MC
14362 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
14363 /*
14364 * Back to back register writes can cause problems on these
14365 * chips, the workaround is to read back all reg writes
14366 * except those to mailbox regs.
14367 *
14368 * See tg3_write_indirect_reg32().
14369 */
1ee582d8 14370 tp->write32 = tg3_write_flush_reg32;
98efd8a6
MC
14371 }
14372
63c3a66f 14373 if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
1ee582d8 14374 tp->write32_tx_mbox = tg3_write32_tx_mbox;
63c3a66f 14375 if (tg3_flag(tp, MBOX_WRITE_REORDER))
1ee582d8
MC
14376 tp->write32_rx_mbox = tg3_write_flush_reg32;
14377 }
20094930 14378
63c3a66f 14379 if (tg3_flag(tp, ICH_WORKAROUND)) {
6892914f
MC
14380 tp->read32 = tg3_read_indirect_reg32;
14381 tp->write32 = tg3_write_indirect_reg32;
14382 tp->read32_mbox = tg3_read_indirect_mbox;
14383 tp->write32_mbox = tg3_write_indirect_mbox;
14384 tp->write32_tx_mbox = tg3_write_indirect_mbox;
14385 tp->write32_rx_mbox = tg3_write_indirect_mbox;
14386
14387 iounmap(tp->regs);
22abe310 14388 tp->regs = NULL;
6892914f
MC
14389
14390 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14391 pci_cmd &= ~PCI_COMMAND_MEMORY;
14392 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14393 }
b5d3772c
MC
14394 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14395 tp->read32_mbox = tg3_read32_mbox_5906;
14396 tp->write32_mbox = tg3_write32_mbox_5906;
14397 tp->write32_tx_mbox = tg3_write32_mbox_5906;
14398 tp->write32_rx_mbox = tg3_write32_mbox_5906;
14399 }
6892914f 14400
bbadf503 14401 if (tp->write32 == tg3_write_indirect_reg32 ||
63c3a66f 14402 (tg3_flag(tp, PCIX_MODE) &&
bbadf503 14403 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
f49639e6 14404 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
63c3a66f 14405 tg3_flag_set(tp, SRAM_USE_CONFIG);
bbadf503 14406
16821285
MC
14407 /* The memory arbiter has to be enabled in order for SRAM accesses
14408 * to succeed. Normally on powerup the tg3 chip firmware will make
14409 * sure it is enabled, but other entities such as system netboot
14410 * code might disable it.
14411 */
14412 val = tr32(MEMARB_MODE);
14413 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
14414
9dc5e342
MC
14415 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
14416 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14417 tg3_flag(tp, 5780_CLASS)) {
14418 if (tg3_flag(tp, PCIX_MODE)) {
14419 pci_read_config_dword(tp->pdev,
14420 tp->pcix_cap + PCI_X_STATUS,
14421 &val);
14422 tp->pci_fn = val & 0x7;
14423 }
14424 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
14425 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14426 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14427 NIC_SRAM_CPMUSTAT_SIG) {
14428 tp->pci_fn = val & TG3_CPMU_STATUS_FMSK_5717;
14429 tp->pci_fn = tp->pci_fn ? 1 : 0;
14430 }
14431 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14432 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
14433 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14434 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14435 NIC_SRAM_CPMUSTAT_SIG) {
14436 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
14437 TG3_CPMU_STATUS_FSHFT_5719;
14438 }
69f11c99
MC
14439 }
14440
7d0c41ef 14441 /* Get eeprom hw config before calling tg3_set_power_state().
63c3a66f 14442 * In particular, the TG3_FLAG_IS_NIC flag must be
7d0c41ef
MC
14443 * determined before calling tg3_set_power_state() so that
14444 * we know whether or not to switch out of Vaux power.
14445 * When the flag is set, it means that GPIO1 is used for eeprom
14446 * write protect and also implies that it is a LOM where GPIOs
14447 * are not used to switch power.
6aa20a22 14448 */
7d0c41ef
MC
14449 tg3_get_eeprom_hw_cfg(tp);
14450
cf9ecf4b
MC
14451 if (tp->fw_needed && tg3_flag(tp, ENABLE_ASF)) {
14452 tg3_flag_clear(tp, TSO_CAPABLE);
14453 tg3_flag_clear(tp, TSO_BUG);
14454 tp->fw_needed = NULL;
14455 }
14456
63c3a66f 14457 if (tg3_flag(tp, ENABLE_APE)) {
0d3031d9
MC
14458 /* Allow reads and writes to the
14459 * APE register and memory space.
14460 */
14461 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
f92d9dc1
MC
14462 PCISTATE_ALLOW_APE_SHMEM_WR |
14463 PCISTATE_ALLOW_APE_PSPACE_WR;
0d3031d9
MC
14464 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
14465 pci_state_reg);
c9cab24e
MC
14466
14467 tg3_ape_lock_init(tp);
0d3031d9
MC
14468 }
14469
9936bcf6 14470 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
57e6983c 14471 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
321d32a0 14472 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
f6eb9b1f 14473 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
63c3a66f
JP
14474 tg3_flag(tp, 57765_PLUS))
14475 tg3_flag_set(tp, CPMU_PRESENT);
d30cdd28 14476
16821285
MC
14477 /* Set up tp->grc_local_ctrl before calling
14478 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
14479 * will bring 5700's external PHY out of reset.
314fba34
MC
14480 * It is also used as eeprom write protect on LOMs.
14481 */
14482 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
6ff6f81d 14483 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
63c3a66f 14484 tg3_flag(tp, EEPROM_WRITE_PROT))
314fba34
MC
14485 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
14486 GRC_LCLCTRL_GPIO_OUTPUT1);
3e7d83bc
MC
14487 /* Unused GPIO3 must be driven as output on 5752 because there
14488 * are no pull-up resistors on unused GPIO pins.
14489 */
14490 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
14491 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
314fba34 14492
321d32a0 14493 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
cb4ed1fd 14494 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
55086ad9 14495 tg3_flag(tp, 57765_CLASS))
af36e6b6
MC
14496 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14497
8d519ab2
MC
14498 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
14499 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
5f0c4a3c
MC
14500 /* Turn off the debug UART. */
14501 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
63c3a66f 14502 if (tg3_flag(tp, IS_NIC))
5f0c4a3c
MC
14503 /* Keep VMain power. */
14504 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
14505 GRC_LCLCTRL_GPIO_OUTPUT0;
14506 }
14507
16821285
MC
14508 /* Switch out of Vaux if it is a NIC */
14509 tg3_pwrsrc_switch_to_vmain(tp);
1da177e4 14510
1da177e4
LT
14511 /* Derive initial jumbo mode from MTU assigned in
14512 * ether_setup() via the alloc_etherdev() call
14513 */
63c3a66f
JP
14514 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
14515 tg3_flag_set(tp, JUMBO_RING_ENABLE);
1da177e4
LT
14516
14517 /* Determine WakeOnLan speed to use. */
14518 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14519 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
14520 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
14521 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
63c3a66f 14522 tg3_flag_clear(tp, WOL_SPEED_100MB);
1da177e4 14523 } else {
63c3a66f 14524 tg3_flag_set(tp, WOL_SPEED_100MB);
1da177e4
LT
14525 }
14526
7f97a4bd 14527 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
f07e9af3 14528 tp->phy_flags |= TG3_PHYFLG_IS_FET;
7f97a4bd 14529
1da177e4 14530 /* A few boards don't want Ethernet@WireSpeed phy feature */
6ff6f81d
MC
14531 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14532 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
1da177e4 14533 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
747e8f8b 14534 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
f07e9af3
MC
14535 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
14536 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14537 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
1da177e4
LT
14538
14539 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
14540 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
f07e9af3 14541 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
1da177e4 14542 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
f07e9af3 14543 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
1da177e4 14544
63c3a66f 14545 if (tg3_flag(tp, 5705_PLUS) &&
f07e9af3 14546 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
321d32a0 14547 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
f6eb9b1f 14548 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
63c3a66f 14549 !tg3_flag(tp, 57765_PLUS)) {
c424cb24 14550 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
d30cdd28 14551 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
9936bcf6
MC
14552 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14553 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
d4011ada
MC
14554 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
14555 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
f07e9af3 14556 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
c1d2a196 14557 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
f07e9af3 14558 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
321d32a0 14559 } else
f07e9af3 14560 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
c424cb24 14561 }
1da177e4 14562
b2a5c19c
MC
14563 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
14564 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
14565 tp->phy_otp = tg3_read_otp_phycfg(tp);
14566 if (tp->phy_otp == 0)
14567 tp->phy_otp = TG3_OTP_DEFAULT;
14568 }
14569
63c3a66f 14570 if (tg3_flag(tp, CPMU_PRESENT))
8ef21428
MC
14571 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
14572 else
14573 tp->mi_mode = MAC_MI_MODE_BASE;
14574
1da177e4 14575 tp->coalesce_mode = 0;
1da177e4
LT
14576 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
14577 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
14578 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
14579
4d958473
MC
14580 /* Set these bits to enable statistics workaround. */
14581 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14582 tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
14583 tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
14584 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
14585 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
14586 }
14587
321d32a0
MC
14588 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14589 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
63c3a66f 14590 tg3_flag_set(tp, USE_PHYLIB);
57e6983c 14591
158d7abd
MC
14592 err = tg3_mdio_init(tp);
14593 if (err)
14594 return err;
1da177e4
LT
14595
14596 /* Initialize data/descriptor byte/word swapping. */
14597 val = tr32(GRC_MODE);
f2096f94
MC
14598 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14599 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
14600 GRC_MODE_WORD_SWAP_B2HRX_DATA |
14601 GRC_MODE_B2HRX_ENABLE |
14602 GRC_MODE_HTX2B_ENABLE |
14603 GRC_MODE_HOST_STACKUP);
14604 else
14605 val &= GRC_MODE_HOST_STACKUP;
14606
1da177e4
LT
14607 tw32(GRC_MODE, val | tp->grc_mode);
14608
14609 tg3_switch_clocks(tp);
14610
14611 /* Clear this out for sanity. */
14612 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
14613
14614 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14615 &pci_state_reg);
14616 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
63c3a66f 14617 !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
1da177e4
LT
14618 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
14619
14620 if (chiprevid == CHIPREV_ID_5701_A0 ||
14621 chiprevid == CHIPREV_ID_5701_B0 ||
14622 chiprevid == CHIPREV_ID_5701_B2 ||
14623 chiprevid == CHIPREV_ID_5701_B5) {
14624 void __iomem *sram_base;
14625
14626 /* Write some dummy words into the SRAM status block
14627 * area, see if it reads back correctly. If the return
14628 * value is bad, force enable the PCIX workaround.
14629 */
14630 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
14631
14632 writel(0x00000000, sram_base);
14633 writel(0x00000000, sram_base + 4);
14634 writel(0xffffffff, sram_base + 4);
14635 if (readl(sram_base) != 0x00000000)
63c3a66f 14636 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
1da177e4
LT
14637 }
14638 }
14639
14640 udelay(50);
14641 tg3_nvram_init(tp);
14642
14643 grc_misc_cfg = tr32(GRC_MISC_CFG);
14644 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
14645
1da177e4
LT
14646 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14647 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
14648 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
63c3a66f 14649 tg3_flag_set(tp, IS_5788);
1da177e4 14650
63c3a66f 14651 if (!tg3_flag(tp, IS_5788) &&
6ff6f81d 14652 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
63c3a66f
JP
14653 tg3_flag_set(tp, TAGGED_STATUS);
14654 if (tg3_flag(tp, TAGGED_STATUS)) {
fac9b83e
DM
14655 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
14656 HOSTCC_MODE_CLRTICK_TXBD);
14657
14658 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
14659 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14660 tp->misc_host_ctrl);
14661 }
14662
3bda1258 14663 /* Preserve the APE MAC_MODE bits */
63c3a66f 14664 if (tg3_flag(tp, ENABLE_APE))
d2394e6b 14665 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
3bda1258 14666 else
6e01b20b 14667 tp->mac_mode = 0;
3bda1258 14668
1da177e4
LT
14669 /* these are limited to 10/100 only */
14670 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14671 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
14672 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14673 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14674 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
14675 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
14676 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
14677 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14678 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
676917d4
MC
14679 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
14680 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
321d32a0 14681 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
d1101142
MC
14682 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14683 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
f07e9af3
MC
14684 (tp->phy_flags & TG3_PHYFLG_IS_FET))
14685 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
1da177e4
LT
14686
14687 err = tg3_phy_probe(tp);
14688 if (err) {
2445e461 14689 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
1da177e4 14690 /* ... but do not return immediately ... */
b02fd9e3 14691 tg3_mdio_fini(tp);
1da177e4
LT
14692 }
14693
184b8904 14694 tg3_read_vpd(tp);
c4e6575c 14695 tg3_read_fw_ver(tp);
1da177e4 14696
f07e9af3
MC
14697 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
14698 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
1da177e4
LT
14699 } else {
14700 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
f07e9af3 14701 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
1da177e4 14702 else
f07e9af3 14703 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
1da177e4
LT
14704 }
14705
14706 /* 5700 {AX,BX} chips have a broken status block link
14707 * change bit implementation, so we must use the
14708 * status register in those cases.
14709 */
14710 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
63c3a66f 14711 tg3_flag_set(tp, USE_LINKCHG_REG);
1da177e4 14712 else
63c3a66f 14713 tg3_flag_clear(tp, USE_LINKCHG_REG);
1da177e4
LT
14714
14715 /* The led_ctrl is set during tg3_phy_probe, here we might
14716 * have to force the link status polling mechanism based
14717 * upon subsystem IDs.
14718 */
14719 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
007a880d 14720 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
f07e9af3
MC
14721 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
14722 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
63c3a66f 14723 tg3_flag_set(tp, USE_LINKCHG_REG);
1da177e4
LT
14724 }
14725
14726 /* For all SERDES we poll the MAC status register. */
f07e9af3 14727 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
63c3a66f 14728 tg3_flag_set(tp, POLL_SERDES);
1da177e4 14729 else
63c3a66f 14730 tg3_flag_clear(tp, POLL_SERDES);
1da177e4 14731
9205fd9c 14732 tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
d2757fc4 14733 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
1da177e4 14734 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
63c3a66f 14735 tg3_flag(tp, PCIX_MODE)) {
9205fd9c 14736 tp->rx_offset = NET_SKB_PAD;
d2757fc4 14737#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
9dc7a113 14738 tp->rx_copy_thresh = ~(u16)0;
d2757fc4
MC
14739#endif
14740 }
1da177e4 14741
2c49a44d
MC
14742 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
14743 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
7cb32cf2
MC
14744 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
14745
2c49a44d 14746 tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
f92905de
MC
14747
14748 /* Increment the rx prod index on the rx std ring by at most
14749 * 8 for these chips to workaround hw errata.
14750 */
14751 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14752 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14753 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
14754 tp->rx_std_max_post = 8;
14755
63c3a66f 14756 if (tg3_flag(tp, ASPM_WORKAROUND))
8ed5d97e
MC
14757 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
14758 PCIE_PWR_MGMT_L1_THRESH_MSK;
14759
1da177e4
LT
14760 return err;
14761}
14762
49b6e95f 14763#ifdef CONFIG_SPARC
1da177e4
LT
14764static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
14765{
14766 struct net_device *dev = tp->dev;
14767 struct pci_dev *pdev = tp->pdev;
49b6e95f 14768 struct device_node *dp = pci_device_to_OF_node(pdev);
374d4cac 14769 const unsigned char *addr;
49b6e95f
DM
14770 int len;
14771
14772 addr = of_get_property(dp, "local-mac-address", &len);
14773 if (addr && len == 6) {
14774 memcpy(dev->dev_addr, addr, 6);
14775 memcpy(dev->perm_addr, dev->dev_addr, 6);
14776 return 0;
1da177e4
LT
14777 }
14778 return -ENODEV;
14779}
14780
14781static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
14782{
14783 struct net_device *dev = tp->dev;
14784
14785 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
2ff43697 14786 memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
1da177e4
LT
14787 return 0;
14788}
14789#endif
14790
14791static int __devinit tg3_get_device_address(struct tg3 *tp)
14792{
14793 struct net_device *dev = tp->dev;
14794 u32 hi, lo, mac_offset;
008652b3 14795 int addr_ok = 0;
1da177e4 14796
49b6e95f 14797#ifdef CONFIG_SPARC
1da177e4
LT
14798 if (!tg3_get_macaddr_sparc(tp))
14799 return 0;
14800#endif
14801
14802 mac_offset = 0x7c;
6ff6f81d 14803 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
63c3a66f 14804 tg3_flag(tp, 5780_CLASS)) {
1da177e4
LT
14805 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
14806 mac_offset = 0xcc;
14807 if (tg3_nvram_lock(tp))
14808 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
14809 else
14810 tg3_nvram_unlock(tp);
63c3a66f 14811 } else if (tg3_flag(tp, 5717_PLUS)) {
69f11c99 14812 if (tp->pci_fn & 1)
a1b950d5 14813 mac_offset = 0xcc;
69f11c99 14814 if (tp->pci_fn > 1)
a50d0796 14815 mac_offset += 0x18c;
a1b950d5 14816 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
b5d3772c 14817 mac_offset = 0x10;
1da177e4
LT
14818
14819 /* First try to get it from MAC address mailbox. */
14820 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
14821 if ((hi >> 16) == 0x484b) {
14822 dev->dev_addr[0] = (hi >> 8) & 0xff;
14823 dev->dev_addr[1] = (hi >> 0) & 0xff;
14824
14825 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
14826 dev->dev_addr[2] = (lo >> 24) & 0xff;
14827 dev->dev_addr[3] = (lo >> 16) & 0xff;
14828 dev->dev_addr[4] = (lo >> 8) & 0xff;
14829 dev->dev_addr[5] = (lo >> 0) & 0xff;
1da177e4 14830
008652b3
MC
14831 /* Some old bootcode may report a 0 MAC address in SRAM */
14832 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
14833 }
14834 if (!addr_ok) {
14835 /* Next, try NVRAM. */
63c3a66f 14836 if (!tg3_flag(tp, NO_NVRAM) &&
df259d8c 14837 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
6d348f2c 14838 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
62cedd11
MC
14839 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
14840 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
008652b3
MC
14841 }
14842 /* Finally just fetch it out of the MAC control regs. */
14843 else {
14844 hi = tr32(MAC_ADDR_0_HIGH);
14845 lo = tr32(MAC_ADDR_0_LOW);
14846
14847 dev->dev_addr[5] = lo & 0xff;
14848 dev->dev_addr[4] = (lo >> 8) & 0xff;
14849 dev->dev_addr[3] = (lo >> 16) & 0xff;
14850 dev->dev_addr[2] = (lo >> 24) & 0xff;
14851 dev->dev_addr[1] = hi & 0xff;
14852 dev->dev_addr[0] = (hi >> 8) & 0xff;
14853 }
1da177e4
LT
14854 }
14855
14856 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
7582a335 14857#ifdef CONFIG_SPARC
1da177e4
LT
14858 if (!tg3_get_default_macaddr_sparc(tp))
14859 return 0;
14860#endif
14861 return -EINVAL;
14862 }
2ff43697 14863 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
1da177e4
LT
14864 return 0;
14865}
14866
59e6b434
DM
14867#define BOUNDARY_SINGLE_CACHELINE 1
14868#define BOUNDARY_MULTI_CACHELINE 2
14869
14870static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
14871{
14872 int cacheline_size;
14873 u8 byte;
14874 int goal;
14875
14876 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
14877 if (byte == 0)
14878 cacheline_size = 1024;
14879 else
14880 cacheline_size = (int) byte * 4;
14881
14882 /* On 5703 and later chips, the boundary bits have no
14883 * effect.
14884 */
14885 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14886 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
63c3a66f 14887 !tg3_flag(tp, PCI_EXPRESS))
59e6b434
DM
14888 goto out;
14889
14890#if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
14891 goal = BOUNDARY_MULTI_CACHELINE;
14892#else
14893#if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
14894 goal = BOUNDARY_SINGLE_CACHELINE;
14895#else
14896 goal = 0;
14897#endif
14898#endif
14899
63c3a66f 14900 if (tg3_flag(tp, 57765_PLUS)) {
cbf9ca6c
MC
14901 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
14902 goto out;
14903 }
14904
59e6b434
DM
14905 if (!goal)
14906 goto out;
14907
14908 /* PCI controllers on most RISC systems tend to disconnect
14909 * when a device tries to burst across a cache-line boundary.
14910 * Therefore, letting tg3 do so just wastes PCI bandwidth.
14911 *
14912 * Unfortunately, for PCI-E there are only limited
14913 * write-side controls for this, and thus for reads
14914 * we will still get the disconnects. We'll also waste
14915 * these PCI cycles for both read and write for chips
14916 * other than 5700 and 5701 which do not implement the
14917 * boundary bits.
14918 */
63c3a66f 14919 if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
59e6b434
DM
14920 switch (cacheline_size) {
14921 case 16:
14922 case 32:
14923 case 64:
14924 case 128:
14925 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14926 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
14927 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
14928 } else {
14929 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14930 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14931 }
14932 break;
14933
14934 case 256:
14935 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
14936 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
14937 break;
14938
14939 default:
14940 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14941 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14942 break;
855e1111 14943 }
63c3a66f 14944 } else if (tg3_flag(tp, PCI_EXPRESS)) {
59e6b434
DM
14945 switch (cacheline_size) {
14946 case 16:
14947 case 32:
14948 case 64:
14949 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14950 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14951 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
14952 break;
14953 }
14954 /* fallthrough */
14955 case 128:
14956 default:
14957 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14958 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
14959 break;
855e1111 14960 }
59e6b434
DM
14961 } else {
14962 switch (cacheline_size) {
14963 case 16:
14964 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14965 val |= (DMA_RWCTRL_READ_BNDRY_16 |
14966 DMA_RWCTRL_WRITE_BNDRY_16);
14967 break;
14968 }
14969 /* fallthrough */
14970 case 32:
14971 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14972 val |= (DMA_RWCTRL_READ_BNDRY_32 |
14973 DMA_RWCTRL_WRITE_BNDRY_32);
14974 break;
14975 }
14976 /* fallthrough */
14977 case 64:
14978 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14979 val |= (DMA_RWCTRL_READ_BNDRY_64 |
14980 DMA_RWCTRL_WRITE_BNDRY_64);
14981 break;
14982 }
14983 /* fallthrough */
14984 case 128:
14985 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14986 val |= (DMA_RWCTRL_READ_BNDRY_128 |
14987 DMA_RWCTRL_WRITE_BNDRY_128);
14988 break;
14989 }
14990 /* fallthrough */
14991 case 256:
14992 val |= (DMA_RWCTRL_READ_BNDRY_256 |
14993 DMA_RWCTRL_WRITE_BNDRY_256);
14994 break;
14995 case 512:
14996 val |= (DMA_RWCTRL_READ_BNDRY_512 |
14997 DMA_RWCTRL_WRITE_BNDRY_512);
14998 break;
14999 case 1024:
15000 default:
15001 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
15002 DMA_RWCTRL_WRITE_BNDRY_1024);
15003 break;
855e1111 15004 }
59e6b434
DM
15005 }
15006
15007out:
15008 return val;
15009}
15010
1da177e4
LT
15011static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
15012{
15013 struct tg3_internal_buffer_desc test_desc;
15014 u32 sram_dma_descs;
15015 int i, ret;
15016
15017 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
15018
15019 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
15020 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
15021 tw32(RDMAC_STATUS, 0);
15022 tw32(WDMAC_STATUS, 0);
15023
15024 tw32(BUFMGR_MODE, 0);
15025 tw32(FTQ_RESET, 0);
15026
15027 test_desc.addr_hi = ((u64) buf_dma) >> 32;
15028 test_desc.addr_lo = buf_dma & 0xffffffff;
15029 test_desc.nic_mbuf = 0x00002100;
15030 test_desc.len = size;
15031
15032 /*
15033 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
15034 * the *second* time the tg3 driver was getting loaded after an
15035 * initial scan.
15036 *
15037 * Broadcom tells me:
15038 * ...the DMA engine is connected to the GRC block and a DMA
15039 * reset may affect the GRC block in some unpredictable way...
15040 * The behavior of resets to individual blocks has not been tested.
15041 *
15042 * Broadcom noted the GRC reset will also reset all sub-components.
15043 */
15044 if (to_device) {
15045 test_desc.cqid_sqid = (13 << 8) | 2;
15046
15047 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
15048 udelay(40);
15049 } else {
15050 test_desc.cqid_sqid = (16 << 8) | 7;
15051
15052 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
15053 udelay(40);
15054 }
15055 test_desc.flags = 0x00000005;
15056
15057 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
15058 u32 val;
15059
15060 val = *(((u32 *)&test_desc) + i);
15061 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
15062 sram_dma_descs + (i * sizeof(u32)));
15063 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
15064 }
15065 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
15066
859a5887 15067 if (to_device)
1da177e4 15068 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
859a5887 15069 else
1da177e4 15070 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
1da177e4
LT
15071
15072 ret = -ENODEV;
15073 for (i = 0; i < 40; i++) {
15074 u32 val;
15075
15076 if (to_device)
15077 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
15078 else
15079 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
15080 if ((val & 0xffff) == sram_dma_descs) {
15081 ret = 0;
15082 break;
15083 }
15084
15085 udelay(100);
15086 }
15087
15088 return ret;
15089}
15090
ded7340d 15091#define TEST_BUFFER_SIZE 0x2000
1da177e4 15092
4143470c 15093static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
895950c2
JP
15094 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
15095 { },
15096};
15097
1da177e4
LT
15098static int __devinit tg3_test_dma(struct tg3 *tp)
15099{
15100 dma_addr_t buf_dma;
59e6b434 15101 u32 *buf, saved_dma_rwctrl;
cbf9ca6c 15102 int ret = 0;
1da177e4 15103
4bae65c8
MC
15104 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
15105 &buf_dma, GFP_KERNEL);
1da177e4
LT
15106 if (!buf) {
15107 ret = -ENOMEM;
15108 goto out_nofree;
15109 }
15110
15111 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
15112 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
15113
59e6b434 15114 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
1da177e4 15115
63c3a66f 15116 if (tg3_flag(tp, 57765_PLUS))
cbf9ca6c
MC
15117 goto out;
15118
63c3a66f 15119 if (tg3_flag(tp, PCI_EXPRESS)) {
1da177e4
LT
15120 /* DMA read watermark not used on PCIE */
15121 tp->dma_rwctrl |= 0x00180000;
63c3a66f 15122 } else if (!tg3_flag(tp, PCIX_MODE)) {
85e94ced
MC
15123 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
15124 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
1da177e4
LT
15125 tp->dma_rwctrl |= 0x003f0000;
15126 else
15127 tp->dma_rwctrl |= 0x003f000f;
15128 } else {
15129 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
15130 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
15131 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
49afdeb6 15132 u32 read_water = 0x7;
1da177e4 15133
4a29cc2e
MC
15134 /* If the 5704 is behind the EPB bridge, we can
15135 * do the less restrictive ONE_DMA workaround for
15136 * better performance.
15137 */
63c3a66f 15138 if (tg3_flag(tp, 40BIT_DMA_BUG) &&
4a29cc2e
MC
15139 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
15140 tp->dma_rwctrl |= 0x8000;
15141 else if (ccval == 0x6 || ccval == 0x7)
1da177e4
LT
15142 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
15143
49afdeb6
MC
15144 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
15145 read_water = 4;
59e6b434 15146 /* Set bit 23 to enable PCIX hw bug fix */
49afdeb6
MC
15147 tp->dma_rwctrl |=
15148 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
15149 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
15150 (1 << 23);
4cf78e4f
MC
15151 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
15152 /* 5780 always in PCIX mode */
15153 tp->dma_rwctrl |= 0x00144000;
a4e2b347
MC
15154 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
15155 /* 5714 always in PCIX mode */
15156 tp->dma_rwctrl |= 0x00148000;
1da177e4
LT
15157 } else {
15158 tp->dma_rwctrl |= 0x001b000f;
15159 }
15160 }
15161
15162 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
15163 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
15164 tp->dma_rwctrl &= 0xfffffff0;
15165
15166 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15167 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
15168 /* Remove this if it causes problems for some boards. */
15169 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
15170
15171 /* On 5700/5701 chips, we need to set this bit.
15172 * Otherwise the chip will issue cacheline transactions
15173 * to streamable DMA memory with not all the byte
15174 * enables turned on. This is an error on several
15175 * RISC PCI controllers, in particular sparc64.
15176 *
15177 * On 5703/5704 chips, this bit has been reassigned
15178 * a different meaning. In particular, it is used
15179 * on those chips to enable a PCI-X workaround.
15180 */
15181 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
15182 }
15183
15184 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15185
15186#if 0
15187 /* Unneeded, already done by tg3_get_invariants. */
15188 tg3_switch_clocks(tp);
15189#endif
15190
1da177e4
LT
15191 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
15192 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
15193 goto out;
15194
59e6b434
DM
15195 /* It is best to perform DMA test with maximum write burst size
15196 * to expose the 5700/5701 write DMA bug.
15197 */
15198 saved_dma_rwctrl = tp->dma_rwctrl;
15199 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15200 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15201
1da177e4
LT
15202 while (1) {
15203 u32 *p = buf, i;
15204
15205 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
15206 p[i] = i;
15207
15208 /* Send the buffer to the chip. */
15209 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
15210 if (ret) {
2445e461
MC
15211 dev_err(&tp->pdev->dev,
15212 "%s: Buffer write failed. err = %d\n",
15213 __func__, ret);
1da177e4
LT
15214 break;
15215 }
15216
15217#if 0
15218 /* validate data reached card RAM correctly. */
15219 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15220 u32 val;
15221 tg3_read_mem(tp, 0x2100 + (i*4), &val);
15222 if (le32_to_cpu(val) != p[i]) {
2445e461
MC
15223 dev_err(&tp->pdev->dev,
15224 "%s: Buffer corrupted on device! "
15225 "(%d != %d)\n", __func__, val, i);
1da177e4
LT
15226 /* ret = -ENODEV here? */
15227 }
15228 p[i] = 0;
15229 }
15230#endif
15231 /* Now read it back. */
15232 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
15233 if (ret) {
5129c3a3
MC
15234 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
15235 "err = %d\n", __func__, ret);
1da177e4
LT
15236 break;
15237 }
15238
15239 /* Verify it. */
15240 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15241 if (p[i] == i)
15242 continue;
15243
59e6b434
DM
15244 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15245 DMA_RWCTRL_WRITE_BNDRY_16) {
15246 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
1da177e4
LT
15247 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15248 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15249 break;
15250 } else {
2445e461
MC
15251 dev_err(&tp->pdev->dev,
15252 "%s: Buffer corrupted on read back! "
15253 "(%d != %d)\n", __func__, p[i], i);
1da177e4
LT
15254 ret = -ENODEV;
15255 goto out;
15256 }
15257 }
15258
15259 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
15260 /* Success. */
15261 ret = 0;
15262 break;
15263 }
15264 }
59e6b434
DM
15265 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15266 DMA_RWCTRL_WRITE_BNDRY_16) {
15267 /* DMA test passed without adjusting DMA boundary,
6d1cfbab
MC
15268 * now look for chipsets that are known to expose the
15269 * DMA bug without failing the test.
59e6b434 15270 */
4143470c 15271 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
6d1cfbab
MC
15272 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15273 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
859a5887 15274 } else {
6d1cfbab
MC
15275 /* Safe to use the calculated DMA boundary. */
15276 tp->dma_rwctrl = saved_dma_rwctrl;
859a5887 15277 }
6d1cfbab 15278
59e6b434
DM
15279 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15280 }
1da177e4
LT
15281
15282out:
4bae65c8 15283 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
1da177e4
LT
15284out_nofree:
15285 return ret;
15286}
15287
1da177e4
LT
15288static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
15289{
63c3a66f 15290 if (tg3_flag(tp, 57765_PLUS)) {
666bc831
MC
15291 tp->bufmgr_config.mbuf_read_dma_low_water =
15292 DEFAULT_MB_RDMA_LOW_WATER_5705;
15293 tp->bufmgr_config.mbuf_mac_rx_low_water =
15294 DEFAULT_MB_MACRX_LOW_WATER_57765;
15295 tp->bufmgr_config.mbuf_high_water =
15296 DEFAULT_MB_HIGH_WATER_57765;
15297
15298 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15299 DEFAULT_MB_RDMA_LOW_WATER_5705;
15300 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15301 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
15302 tp->bufmgr_config.mbuf_high_water_jumbo =
15303 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
63c3a66f 15304 } else if (tg3_flag(tp, 5705_PLUS)) {
fdfec172
MC
15305 tp->bufmgr_config.mbuf_read_dma_low_water =
15306 DEFAULT_MB_RDMA_LOW_WATER_5705;
15307 tp->bufmgr_config.mbuf_mac_rx_low_water =
15308 DEFAULT_MB_MACRX_LOW_WATER_5705;
15309 tp->bufmgr_config.mbuf_high_water =
15310 DEFAULT_MB_HIGH_WATER_5705;
b5d3772c
MC
15311 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
15312 tp->bufmgr_config.mbuf_mac_rx_low_water =
15313 DEFAULT_MB_MACRX_LOW_WATER_5906;
15314 tp->bufmgr_config.mbuf_high_water =
15315 DEFAULT_MB_HIGH_WATER_5906;
15316 }
fdfec172
MC
15317
15318 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15319 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
15320 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15321 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
15322 tp->bufmgr_config.mbuf_high_water_jumbo =
15323 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
15324 } else {
15325 tp->bufmgr_config.mbuf_read_dma_low_water =
15326 DEFAULT_MB_RDMA_LOW_WATER;
15327 tp->bufmgr_config.mbuf_mac_rx_low_water =
15328 DEFAULT_MB_MACRX_LOW_WATER;
15329 tp->bufmgr_config.mbuf_high_water =
15330 DEFAULT_MB_HIGH_WATER;
15331
15332 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15333 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
15334 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15335 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
15336 tp->bufmgr_config.mbuf_high_water_jumbo =
15337 DEFAULT_MB_HIGH_WATER_JUMBO;
15338 }
1da177e4
LT
15339
15340 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
15341 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
15342}
15343
15344static char * __devinit tg3_phy_string(struct tg3 *tp)
15345{
79eb6904
MC
15346 switch (tp->phy_id & TG3_PHY_ID_MASK) {
15347 case TG3_PHY_ID_BCM5400: return "5400";
15348 case TG3_PHY_ID_BCM5401: return "5401";
15349 case TG3_PHY_ID_BCM5411: return "5411";
15350 case TG3_PHY_ID_BCM5701: return "5701";
15351 case TG3_PHY_ID_BCM5703: return "5703";
15352 case TG3_PHY_ID_BCM5704: return "5704";
15353 case TG3_PHY_ID_BCM5705: return "5705";
15354 case TG3_PHY_ID_BCM5750: return "5750";
15355 case TG3_PHY_ID_BCM5752: return "5752";
15356 case TG3_PHY_ID_BCM5714: return "5714";
15357 case TG3_PHY_ID_BCM5780: return "5780";
15358 case TG3_PHY_ID_BCM5755: return "5755";
15359 case TG3_PHY_ID_BCM5787: return "5787";
15360 case TG3_PHY_ID_BCM5784: return "5784";
15361 case TG3_PHY_ID_BCM5756: return "5722/5756";
15362 case TG3_PHY_ID_BCM5906: return "5906";
15363 case TG3_PHY_ID_BCM5761: return "5761";
15364 case TG3_PHY_ID_BCM5718C: return "5718C";
15365 case TG3_PHY_ID_BCM5718S: return "5718S";
15366 case TG3_PHY_ID_BCM57765: return "57765";
302b500b 15367 case TG3_PHY_ID_BCM5719C: return "5719C";
6418f2c1 15368 case TG3_PHY_ID_BCM5720C: return "5720C";
79eb6904 15369 case TG3_PHY_ID_BCM8002: return "8002/serdes";
1da177e4
LT
15370 case 0: return "serdes";
15371 default: return "unknown";
855e1111 15372 }
1da177e4
LT
15373}
15374
f9804ddb
MC
15375static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
15376{
63c3a66f 15377 if (tg3_flag(tp, PCI_EXPRESS)) {
f9804ddb
MC
15378 strcpy(str, "PCI Express");
15379 return str;
63c3a66f 15380 } else if (tg3_flag(tp, PCIX_MODE)) {
f9804ddb
MC
15381 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
15382
15383 strcpy(str, "PCIX:");
15384
15385 if ((clock_ctrl == 7) ||
15386 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
15387 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
15388 strcat(str, "133MHz");
15389 else if (clock_ctrl == 0)
15390 strcat(str, "33MHz");
15391 else if (clock_ctrl == 2)
15392 strcat(str, "50MHz");
15393 else if (clock_ctrl == 4)
15394 strcat(str, "66MHz");
15395 else if (clock_ctrl == 6)
15396 strcat(str, "100MHz");
f9804ddb
MC
15397 } else {
15398 strcpy(str, "PCI:");
63c3a66f 15399 if (tg3_flag(tp, PCI_HIGH_SPEED))
f9804ddb
MC
15400 strcat(str, "66MHz");
15401 else
15402 strcat(str, "33MHz");
15403 }
63c3a66f 15404 if (tg3_flag(tp, PCI_32BIT))
f9804ddb
MC
15405 strcat(str, ":32-bit");
15406 else
15407 strcat(str, ":64-bit");
15408 return str;
15409}
15410
15f9850d
DM
15411static void __devinit tg3_init_coal(struct tg3 *tp)
15412{
15413 struct ethtool_coalesce *ec = &tp->coal;
15414
15415 memset(ec, 0, sizeof(*ec));
15416 ec->cmd = ETHTOOL_GCOALESCE;
15417 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
15418 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
15419 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
15420 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
15421 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
15422 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
15423 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
15424 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
15425 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
15426
15427 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
15428 HOSTCC_MODE_CLRTICK_TXBD)) {
15429 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
15430 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
15431 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
15432 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
15433 }
d244c892 15434
63c3a66f 15435 if (tg3_flag(tp, 5705_PLUS)) {
d244c892
MC
15436 ec->rx_coalesce_usecs_irq = 0;
15437 ec->tx_coalesce_usecs_irq = 0;
15438 ec->stats_block_coalesce_usecs = 0;
15439 }
15f9850d
DM
15440}
15441
1da177e4
LT
15442static int __devinit tg3_init_one(struct pci_dev *pdev,
15443 const struct pci_device_id *ent)
15444{
1da177e4
LT
15445 struct net_device *dev;
15446 struct tg3 *tp;
646c9edd
MC
15447 int i, err, pm_cap;
15448 u32 sndmbx, rcvmbx, intmbx;
f9804ddb 15449 char str[40];
72f2afb8 15450 u64 dma_mask, persist_dma_mask;
c8f44aff 15451 netdev_features_t features = 0;
1da177e4 15452
05dbe005 15453 printk_once(KERN_INFO "%s\n", version);
1da177e4
LT
15454
15455 err = pci_enable_device(pdev);
15456 if (err) {
2445e461 15457 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
1da177e4
LT
15458 return err;
15459 }
15460
1da177e4
LT
15461 err = pci_request_regions(pdev, DRV_MODULE_NAME);
15462 if (err) {
2445e461 15463 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
1da177e4
LT
15464 goto err_out_disable_pdev;
15465 }
15466
15467 pci_set_master(pdev);
15468
15469 /* Find power-management capability. */
15470 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
15471 if (pm_cap == 0) {
2445e461
MC
15472 dev_err(&pdev->dev,
15473 "Cannot find Power Management capability, aborting\n");
1da177e4
LT
15474 err = -EIO;
15475 goto err_out_free_res;
15476 }
15477
16821285
MC
15478 err = pci_set_power_state(pdev, PCI_D0);
15479 if (err) {
15480 dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
15481 goto err_out_free_res;
15482 }
15483
fe5f5787 15484 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
1da177e4 15485 if (!dev) {
1da177e4 15486 err = -ENOMEM;
16821285 15487 goto err_out_power_down;
1da177e4
LT
15488 }
15489
1da177e4
LT
15490 SET_NETDEV_DEV(dev, &pdev->dev);
15491
1da177e4
LT
15492 tp = netdev_priv(dev);
15493 tp->pdev = pdev;
15494 tp->dev = dev;
15495 tp->pm_cap = pm_cap;
1da177e4
LT
15496 tp->rx_mode = TG3_DEF_RX_MODE;
15497 tp->tx_mode = TG3_DEF_TX_MODE;
8ef21428 15498
1da177e4
LT
15499 if (tg3_debug > 0)
15500 tp->msg_enable = tg3_debug;
15501 else
15502 tp->msg_enable = TG3_DEF_MSG_ENABLE;
15503
15504 /* The word/byte swap controls here control register access byte
15505 * swapping. DMA data byte swapping is controlled in the GRC_MODE
15506 * setting below.
15507 */
15508 tp->misc_host_ctrl =
15509 MISC_HOST_CTRL_MASK_PCI_INT |
15510 MISC_HOST_CTRL_WORD_SWAP |
15511 MISC_HOST_CTRL_INDIR_ACCESS |
15512 MISC_HOST_CTRL_PCISTATE_RW;
15513
15514 /* The NONFRM (non-frame) byte/word swap controls take effect
15515 * on descriptor entries, anything which isn't packet data.
15516 *
15517 * The StrongARM chips on the board (one for tx, one for rx)
15518 * are running in big-endian mode.
15519 */
15520 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
15521 GRC_MODE_WSWAP_NONFRM_DATA);
15522#ifdef __BIG_ENDIAN
15523 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
15524#endif
15525 spin_lock_init(&tp->lock);
1da177e4 15526 spin_lock_init(&tp->indirect_lock);
c4028958 15527 INIT_WORK(&tp->reset_task, tg3_reset_task);
1da177e4 15528
d5fe488a 15529 tp->regs = pci_ioremap_bar(pdev, BAR_0);
ab0049b4 15530 if (!tp->regs) {
ab96b241 15531 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
1da177e4
LT
15532 err = -ENOMEM;
15533 goto err_out_free_dev;
15534 }
15535
c9cab24e
MC
15536 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
15537 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
15538 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
15539 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
15540 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15541 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15542 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15543 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) {
15544 tg3_flag_set(tp, ENABLE_APE);
15545 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
15546 if (!tp->aperegs) {
15547 dev_err(&pdev->dev,
15548 "Cannot map APE registers, aborting\n");
15549 err = -ENOMEM;
15550 goto err_out_iounmap;
15551 }
15552 }
15553
1da177e4
LT
15554 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
15555 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
1da177e4 15556
1da177e4 15557 dev->ethtool_ops = &tg3_ethtool_ops;
1da177e4 15558 dev->watchdog_timeo = TG3_TX_TIMEOUT;
2ffcc981 15559 dev->netdev_ops = &tg3_netdev_ops;
1da177e4 15560 dev->irq = pdev->irq;
1da177e4
LT
15561
15562 err = tg3_get_invariants(tp);
15563 if (err) {
ab96b241
MC
15564 dev_err(&pdev->dev,
15565 "Problem fetching invariants of chip, aborting\n");
c9cab24e 15566 goto err_out_apeunmap;
1da177e4
LT
15567 }
15568
4a29cc2e
MC
15569 /* The EPB bridge inside 5714, 5715, and 5780 and any
15570 * device behind the EPB cannot support DMA addresses > 40-bit.
72f2afb8
MC
15571 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
15572 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
15573 * do DMA address check in tg3_start_xmit().
15574 */
63c3a66f 15575 if (tg3_flag(tp, IS_5788))
284901a9 15576 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
63c3a66f 15577 else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
50cf156a 15578 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
72f2afb8 15579#ifdef CONFIG_HIGHMEM
6a35528a 15580 dma_mask = DMA_BIT_MASK(64);
72f2afb8 15581#endif
4a29cc2e 15582 } else
6a35528a 15583 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
72f2afb8
MC
15584
15585 /* Configure DMA attributes. */
284901a9 15586 if (dma_mask > DMA_BIT_MASK(32)) {
72f2afb8
MC
15587 err = pci_set_dma_mask(pdev, dma_mask);
15588 if (!err) {
0da0606f 15589 features |= NETIF_F_HIGHDMA;
72f2afb8
MC
15590 err = pci_set_consistent_dma_mask(pdev,
15591 persist_dma_mask);
15592 if (err < 0) {
ab96b241
MC
15593 dev_err(&pdev->dev, "Unable to obtain 64 bit "
15594 "DMA for consistent allocations\n");
c9cab24e 15595 goto err_out_apeunmap;
72f2afb8
MC
15596 }
15597 }
15598 }
284901a9
YH
15599 if (err || dma_mask == DMA_BIT_MASK(32)) {
15600 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
72f2afb8 15601 if (err) {
ab96b241
MC
15602 dev_err(&pdev->dev,
15603 "No usable DMA configuration, aborting\n");
c9cab24e 15604 goto err_out_apeunmap;
72f2afb8
MC
15605 }
15606 }
15607
fdfec172 15608 tg3_init_bufmgr_config(tp);
1da177e4 15609
0da0606f
MC
15610 features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
15611
15612 /* 5700 B0 chips do not support checksumming correctly due
15613 * to hardware bugs.
15614 */
15615 if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
15616 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
15617
15618 if (tg3_flag(tp, 5755_PLUS))
15619 features |= NETIF_F_IPV6_CSUM;
15620 }
15621
4e3a7aaa
MC
15622 /* TSO is on by default on chips that support hardware TSO.
15623 * Firmware TSO on older chips gives lower performance, so it
15624 * is off by default, but can be enabled using ethtool.
15625 */
63c3a66f
JP
15626 if ((tg3_flag(tp, HW_TSO_1) ||
15627 tg3_flag(tp, HW_TSO_2) ||
15628 tg3_flag(tp, HW_TSO_3)) &&
0da0606f
MC
15629 (features & NETIF_F_IP_CSUM))
15630 features |= NETIF_F_TSO;
63c3a66f 15631 if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
0da0606f
MC
15632 if (features & NETIF_F_IPV6_CSUM)
15633 features |= NETIF_F_TSO6;
63c3a66f 15634 if (tg3_flag(tp, HW_TSO_3) ||
e849cdc3 15635 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
57e6983c
MC
15636 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
15637 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
63c3a66f 15638 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
dc668910 15639 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
0da0606f 15640 features |= NETIF_F_TSO_ECN;
b0026624 15641 }
1da177e4 15642
d542fe27
MC
15643 dev->features |= features;
15644 dev->vlan_features |= features;
15645
06c03c02
MB
15646 /*
15647 * Add loopback capability only for a subset of devices that support
15648 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
15649 * loopback for the remaining devices.
15650 */
15651 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
15652 !tg3_flag(tp, CPMU_PRESENT))
15653 /* Add the loopback capability */
0da0606f
MC
15654 features |= NETIF_F_LOOPBACK;
15655
0da0606f 15656 dev->hw_features |= features;
06c03c02 15657
1da177e4 15658 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
63c3a66f 15659 !tg3_flag(tp, TSO_CAPABLE) &&
1da177e4 15660 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
63c3a66f 15661 tg3_flag_set(tp, MAX_RXPEND_64);
1da177e4
LT
15662 tp->rx_pending = 63;
15663 }
15664
1da177e4
LT
15665 err = tg3_get_device_address(tp);
15666 if (err) {
ab96b241
MC
15667 dev_err(&pdev->dev,
15668 "Could not obtain valid ethernet address, aborting\n");
c9cab24e 15669 goto err_out_apeunmap;
c88864df
MC
15670 }
15671
1da177e4
LT
15672 /*
15673 * Reset chip in case UNDI or EFI driver did not shutdown
15674 * DMA self test will enable WDMAC and we'll see (spurious)
15675 * pending DMA on the PCI bus at that point.
15676 */
15677 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
15678 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
1da177e4 15679 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
944d980e 15680 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
1da177e4
LT
15681 }
15682
15683 err = tg3_test_dma(tp);
15684 if (err) {
ab96b241 15685 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
c88864df 15686 goto err_out_apeunmap;
1da177e4
LT
15687 }
15688
78f90dcf
MC
15689 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
15690 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
15691 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
6fd45cb8 15692 for (i = 0; i < tp->irq_max; i++) {
78f90dcf
MC
15693 struct tg3_napi *tnapi = &tp->napi[i];
15694
15695 tnapi->tp = tp;
15696 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
15697
15698 tnapi->int_mbox = intmbx;
93a700a9 15699 if (i <= 4)
78f90dcf
MC
15700 intmbx += 0x8;
15701 else
15702 intmbx += 0x4;
15703
15704 tnapi->consmbox = rcvmbx;
15705 tnapi->prodmbox = sndmbx;
15706
66cfd1bd 15707 if (i)
78f90dcf 15708 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
66cfd1bd 15709 else
78f90dcf 15710 tnapi->coal_now = HOSTCC_MODE_NOW;
78f90dcf 15711
63c3a66f 15712 if (!tg3_flag(tp, SUPPORT_MSIX))
78f90dcf
MC
15713 break;
15714
15715 /*
15716 * If we support MSIX, we'll be using RSS. If we're using
15717 * RSS, the first vector only handles link interrupts and the
15718 * remaining vectors handle rx and tx interrupts. Reuse the
15719 * mailbox values for the next iteration. The values we setup
15720 * above are still useful for the single vectored mode.
15721 */
15722 if (!i)
15723 continue;
15724
15725 rcvmbx += 0x8;
15726
15727 if (sndmbx & 0x4)
15728 sndmbx -= 0x4;
15729 else
15730 sndmbx += 0xc;
15731 }
15732
15f9850d
DM
15733 tg3_init_coal(tp);
15734
c49a1561
MC
15735 pci_set_drvdata(pdev, dev);
15736
cd0d7228
MC
15737 if (tg3_flag(tp, 5717_PLUS)) {
15738 /* Resume a low-power mode */
15739 tg3_frob_aux_power(tp, false);
15740 }
15741
1da177e4
LT
15742 err = register_netdev(dev);
15743 if (err) {
ab96b241 15744 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
0d3031d9 15745 goto err_out_apeunmap;
1da177e4
LT
15746 }
15747
05dbe005
JP
15748 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
15749 tp->board_part_number,
15750 tp->pci_chip_rev_id,
15751 tg3_bus_string(tp, str),
15752 dev->dev_addr);
1da177e4 15753
f07e9af3 15754 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
3f0e3ad7
MC
15755 struct phy_device *phydev;
15756 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
5129c3a3
MC
15757 netdev_info(dev,
15758 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
05dbe005 15759 phydev->drv->name, dev_name(&phydev->dev));
f07e9af3
MC
15760 } else {
15761 char *ethtype;
15762
15763 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
15764 ethtype = "10/100Base-TX";
15765 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
15766 ethtype = "1000Base-SX";
15767 else
15768 ethtype = "10/100/1000Base-T";
15769
5129c3a3 15770 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
47007831
MC
15771 "(WireSpeed[%d], EEE[%d])\n",
15772 tg3_phy_string(tp), ethtype,
15773 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
15774 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
f07e9af3 15775 }
05dbe005
JP
15776
15777 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
dc668910 15778 (dev->features & NETIF_F_RXCSUM) != 0,
63c3a66f 15779 tg3_flag(tp, USE_LINKCHG_REG) != 0,
f07e9af3 15780 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
63c3a66f
JP
15781 tg3_flag(tp, ENABLE_ASF) != 0,
15782 tg3_flag(tp, TSO_CAPABLE) != 0);
05dbe005
JP
15783 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
15784 tp->dma_rwctrl,
15785 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
15786 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
1da177e4 15787
b45aa2f6
MC
15788 pci_save_state(pdev);
15789
1da177e4
LT
15790 return 0;
15791
0d3031d9
MC
15792err_out_apeunmap:
15793 if (tp->aperegs) {
15794 iounmap(tp->aperegs);
15795 tp->aperegs = NULL;
15796 }
15797
1da177e4 15798err_out_iounmap:
6892914f
MC
15799 if (tp->regs) {
15800 iounmap(tp->regs);
22abe310 15801 tp->regs = NULL;
6892914f 15802 }
1da177e4
LT
15803
15804err_out_free_dev:
15805 free_netdev(dev);
15806
16821285
MC
15807err_out_power_down:
15808 pci_set_power_state(pdev, PCI_D3hot);
15809
1da177e4
LT
15810err_out_free_res:
15811 pci_release_regions(pdev);
15812
15813err_out_disable_pdev:
15814 pci_disable_device(pdev);
15815 pci_set_drvdata(pdev, NULL);
15816 return err;
15817}
15818
15819static void __devexit tg3_remove_one(struct pci_dev *pdev)
15820{
15821 struct net_device *dev = pci_get_drvdata(pdev);
15822
15823 if (dev) {
15824 struct tg3 *tp = netdev_priv(dev);
15825
077f849d
JSR
15826 if (tp->fw)
15827 release_firmware(tp->fw);
15828
db219973 15829 tg3_reset_task_cancel(tp);
158d7abd 15830
e730c823 15831 if (tg3_flag(tp, USE_PHYLIB)) {
b02fd9e3 15832 tg3_phy_fini(tp);
158d7abd 15833 tg3_mdio_fini(tp);
b02fd9e3 15834 }
158d7abd 15835
1da177e4 15836 unregister_netdev(dev);
0d3031d9
MC
15837 if (tp->aperegs) {
15838 iounmap(tp->aperegs);
15839 tp->aperegs = NULL;
15840 }
6892914f
MC
15841 if (tp->regs) {
15842 iounmap(tp->regs);
22abe310 15843 tp->regs = NULL;
6892914f 15844 }
1da177e4
LT
15845 free_netdev(dev);
15846 pci_release_regions(pdev);
15847 pci_disable_device(pdev);
15848 pci_set_drvdata(pdev, NULL);
15849 }
15850}
15851
aa6027ca 15852#ifdef CONFIG_PM_SLEEP
c866b7ea 15853static int tg3_suspend(struct device *device)
1da177e4 15854{
c866b7ea 15855 struct pci_dev *pdev = to_pci_dev(device);
1da177e4
LT
15856 struct net_device *dev = pci_get_drvdata(pdev);
15857 struct tg3 *tp = netdev_priv(dev);
15858 int err;
15859
15860 if (!netif_running(dev))
15861 return 0;
15862
db219973 15863 tg3_reset_task_cancel(tp);
b02fd9e3 15864 tg3_phy_stop(tp);
1da177e4
LT
15865 tg3_netif_stop(tp);
15866
15867 del_timer_sync(&tp->timer);
15868
f47c11ee 15869 tg3_full_lock(tp, 1);
1da177e4 15870 tg3_disable_ints(tp);
f47c11ee 15871 tg3_full_unlock(tp);
1da177e4
LT
15872
15873 netif_device_detach(dev);
15874
f47c11ee 15875 tg3_full_lock(tp, 0);
944d980e 15876 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
63c3a66f 15877 tg3_flag_clear(tp, INIT_COMPLETE);
f47c11ee 15878 tg3_full_unlock(tp);
1da177e4 15879
c866b7ea 15880 err = tg3_power_down_prepare(tp);
1da177e4 15881 if (err) {
b02fd9e3
MC
15882 int err2;
15883
f47c11ee 15884 tg3_full_lock(tp, 0);
1da177e4 15885
63c3a66f 15886 tg3_flag_set(tp, INIT_COMPLETE);
b02fd9e3
MC
15887 err2 = tg3_restart_hw(tp, 1);
15888 if (err2)
b9ec6c1b 15889 goto out;
1da177e4
LT
15890
15891 tp->timer.expires = jiffies + tp->timer_offset;
15892 add_timer(&tp->timer);
15893
15894 netif_device_attach(dev);
15895 tg3_netif_start(tp);
15896
b9ec6c1b 15897out:
f47c11ee 15898 tg3_full_unlock(tp);
b02fd9e3
MC
15899
15900 if (!err2)
15901 tg3_phy_start(tp);
1da177e4
LT
15902 }
15903
15904 return err;
15905}
15906
c866b7ea 15907static int tg3_resume(struct device *device)
1da177e4 15908{
c866b7ea 15909 struct pci_dev *pdev = to_pci_dev(device);
1da177e4
LT
15910 struct net_device *dev = pci_get_drvdata(pdev);
15911 struct tg3 *tp = netdev_priv(dev);
15912 int err;
15913
15914 if (!netif_running(dev))
15915 return 0;
15916
1da177e4
LT
15917 netif_device_attach(dev);
15918
f47c11ee 15919 tg3_full_lock(tp, 0);
1da177e4 15920
63c3a66f 15921 tg3_flag_set(tp, INIT_COMPLETE);
b9ec6c1b
MC
15922 err = tg3_restart_hw(tp, 1);
15923 if (err)
15924 goto out;
1da177e4
LT
15925
15926 tp->timer.expires = jiffies + tp->timer_offset;
15927 add_timer(&tp->timer);
15928
1da177e4
LT
15929 tg3_netif_start(tp);
15930
b9ec6c1b 15931out:
f47c11ee 15932 tg3_full_unlock(tp);
1da177e4 15933
b02fd9e3
MC
15934 if (!err)
15935 tg3_phy_start(tp);
15936
b9ec6c1b 15937 return err;
1da177e4
LT
15938}
15939
c866b7ea 15940static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
aa6027ca
ED
15941#define TG3_PM_OPS (&tg3_pm_ops)
15942
15943#else
15944
15945#define TG3_PM_OPS NULL
15946
15947#endif /* CONFIG_PM_SLEEP */
c866b7ea 15948
b45aa2f6
MC
15949/**
15950 * tg3_io_error_detected - called when PCI error is detected
15951 * @pdev: Pointer to PCI device
15952 * @state: The current pci connection state
15953 *
15954 * This function is called after a PCI bus error affecting
15955 * this device has been detected.
15956 */
15957static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
15958 pci_channel_state_t state)
15959{
15960 struct net_device *netdev = pci_get_drvdata(pdev);
15961 struct tg3 *tp = netdev_priv(netdev);
15962 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
15963
15964 netdev_info(netdev, "PCI I/O error detected\n");
15965
15966 rtnl_lock();
15967
15968 if (!netif_running(netdev))
15969 goto done;
15970
15971 tg3_phy_stop(tp);
15972
15973 tg3_netif_stop(tp);
15974
15975 del_timer_sync(&tp->timer);
b45aa2f6
MC
15976
15977 /* Want to make sure that the reset task doesn't run */
db219973 15978 tg3_reset_task_cancel(tp);
63c3a66f 15979 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
b45aa2f6
MC
15980
15981 netif_device_detach(netdev);
15982
15983 /* Clean up software state, even if MMIO is blocked */
15984 tg3_full_lock(tp, 0);
15985 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
15986 tg3_full_unlock(tp);
15987
15988done:
15989 if (state == pci_channel_io_perm_failure)
15990 err = PCI_ERS_RESULT_DISCONNECT;
15991 else
15992 pci_disable_device(pdev);
15993
15994 rtnl_unlock();
15995
15996 return err;
15997}
15998
15999/**
16000 * tg3_io_slot_reset - called after the pci bus has been reset.
16001 * @pdev: Pointer to PCI device
16002 *
16003 * Restart the card from scratch, as if from a cold-boot.
16004 * At this point, the card has exprienced a hard reset,
16005 * followed by fixups by BIOS, and has its config space
16006 * set up identically to what it was at cold boot.
16007 */
16008static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
16009{
16010 struct net_device *netdev = pci_get_drvdata(pdev);
16011 struct tg3 *tp = netdev_priv(netdev);
16012 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
16013 int err;
16014
16015 rtnl_lock();
16016
16017 if (pci_enable_device(pdev)) {
16018 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
16019 goto done;
16020 }
16021
16022 pci_set_master(pdev);
16023 pci_restore_state(pdev);
16024 pci_save_state(pdev);
16025
16026 if (!netif_running(netdev)) {
16027 rc = PCI_ERS_RESULT_RECOVERED;
16028 goto done;
16029 }
16030
16031 err = tg3_power_up(tp);
bed9829f 16032 if (err)
b45aa2f6 16033 goto done;
b45aa2f6
MC
16034
16035 rc = PCI_ERS_RESULT_RECOVERED;
16036
16037done:
16038 rtnl_unlock();
16039
16040 return rc;
16041}
16042
16043/**
16044 * tg3_io_resume - called when traffic can start flowing again.
16045 * @pdev: Pointer to PCI device
16046 *
16047 * This callback is called when the error recovery driver tells
16048 * us that its OK to resume normal operation.
16049 */
16050static void tg3_io_resume(struct pci_dev *pdev)
16051{
16052 struct net_device *netdev = pci_get_drvdata(pdev);
16053 struct tg3 *tp = netdev_priv(netdev);
16054 int err;
16055
16056 rtnl_lock();
16057
16058 if (!netif_running(netdev))
16059 goto done;
16060
16061 tg3_full_lock(tp, 0);
63c3a66f 16062 tg3_flag_set(tp, INIT_COMPLETE);
b45aa2f6
MC
16063 err = tg3_restart_hw(tp, 1);
16064 tg3_full_unlock(tp);
16065 if (err) {
16066 netdev_err(netdev, "Cannot restart hardware after reset.\n");
16067 goto done;
16068 }
16069
16070 netif_device_attach(netdev);
16071
16072 tp->timer.expires = jiffies + tp->timer_offset;
16073 add_timer(&tp->timer);
16074
16075 tg3_netif_start(tp);
16076
16077 tg3_phy_start(tp);
16078
16079done:
16080 rtnl_unlock();
16081}
16082
16083static struct pci_error_handlers tg3_err_handler = {
16084 .error_detected = tg3_io_error_detected,
16085 .slot_reset = tg3_io_slot_reset,
16086 .resume = tg3_io_resume
16087};
16088
1da177e4
LT
16089static struct pci_driver tg3_driver = {
16090 .name = DRV_MODULE_NAME,
16091 .id_table = tg3_pci_tbl,
16092 .probe = tg3_init_one,
16093 .remove = __devexit_p(tg3_remove_one),
b45aa2f6 16094 .err_handler = &tg3_err_handler,
aa6027ca 16095 .driver.pm = TG3_PM_OPS,
1da177e4
LT
16096};
16097
16098static int __init tg3_init(void)
16099{
29917620 16100 return pci_register_driver(&tg3_driver);
1da177e4
LT
16101}
16102
16103static void __exit tg3_cleanup(void)
16104{
16105 pci_unregister_driver(&tg3_driver);
16106}
16107
16108module_init(tg3_init);
16109module_exit(tg3_cleanup);