]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blame - drivers/net/ethernet/broadcom/tg3.c
Merge tag 'dev_removal' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/net...
[mirror_ubuntu-eoan-kernel.git] / drivers / net / ethernet / broadcom / tg3.c
CommitLineData
1da177e4
LT
1/*
2 * tg3.c: Broadcom Tigon3 ethernet driver.
3 *
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
9e056c03 7 * Copyright (C) 2005-2012 Broadcom Corporation.
1da177e4
LT
8 *
9 * Firmware is:
49cabf49
MC
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
12 *
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
1da177e4
LT
16 */
17
1da177e4
LT
18
19#include <linux/module.h>
20#include <linux/moduleparam.h>
6867c843 21#include <linux/stringify.h>
1da177e4
LT
22#include <linux/kernel.h>
23#include <linux/types.h>
24#include <linux/compiler.h>
25#include <linux/slab.h>
26#include <linux/delay.h>
14c85021 27#include <linux/in.h>
1da177e4 28#include <linux/init.h>
a6b7a407 29#include <linux/interrupt.h>
1da177e4
LT
30#include <linux/ioport.h>
31#include <linux/pci.h>
32#include <linux/netdevice.h>
33#include <linux/etherdevice.h>
34#include <linux/skbuff.h>
35#include <linux/ethtool.h>
3110f5f5 36#include <linux/mdio.h>
1da177e4 37#include <linux/mii.h>
158d7abd 38#include <linux/phy.h>
a9daf367 39#include <linux/brcmphy.h>
1da177e4
LT
40#include <linux/if_vlan.h>
41#include <linux/ip.h>
42#include <linux/tcp.h>
43#include <linux/workqueue.h>
61487480 44#include <linux/prefetch.h>
f9a5f7d3 45#include <linux/dma-mapping.h>
077f849d 46#include <linux/firmware.h>
aed93e0b
MC
47#include <linux/hwmon.h>
48#include <linux/hwmon-sysfs.h>
1da177e4
LT
49
50#include <net/checksum.h>
c9bdd4b5 51#include <net/ip.h>
1da177e4 52
27fd9de8 53#include <linux/io.h>
1da177e4 54#include <asm/byteorder.h>
27fd9de8 55#include <linux/uaccess.h>
1da177e4 56
49b6e95f 57#ifdef CONFIG_SPARC
1da177e4 58#include <asm/idprom.h>
49b6e95f 59#include <asm/prom.h>
1da177e4
LT
60#endif
61
63532394
MC
62#define BAR_0 0
63#define BAR_2 2
64
1da177e4
LT
65#include "tg3.h"
66
63c3a66f
JP
67/* Functions & macros to verify TG3_FLAGS types */
68
69static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
70{
71 return test_bit(flag, bits);
72}
73
74static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
75{
76 set_bit(flag, bits);
77}
78
79static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
80{
81 clear_bit(flag, bits);
82}
83
84#define tg3_flag(tp, flag) \
85 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
86#define tg3_flag_set(tp, flag) \
87 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
88#define tg3_flag_clear(tp, flag) \
89 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
90
1da177e4 91#define DRV_MODULE_NAME "tg3"
6867c843 92#define TG3_MAJ_NUM 3
0b3ba055 93#define TG3_MIN_NUM 127
6867c843
MC
94#define DRV_MODULE_VERSION \
95 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
0b3ba055 96#define DRV_MODULE_RELDATE "November 14, 2012"
1da177e4 97
fd6d3f0e
MC
98#define RESET_KIND_SHUTDOWN 0
99#define RESET_KIND_INIT 1
100#define RESET_KIND_SUSPEND 2
101
1da177e4
LT
102#define TG3_DEF_RX_MODE 0
103#define TG3_DEF_TX_MODE 0
104#define TG3_DEF_MSG_ENABLE \
105 (NETIF_MSG_DRV | \
106 NETIF_MSG_PROBE | \
107 NETIF_MSG_LINK | \
108 NETIF_MSG_TIMER | \
109 NETIF_MSG_IFDOWN | \
110 NETIF_MSG_IFUP | \
111 NETIF_MSG_RX_ERR | \
112 NETIF_MSG_TX_ERR)
113
520b2756
MC
114#define TG3_GRC_LCLCTL_PWRSW_DELAY 100
115
1da177e4
LT
116/* length of time before we decide the hardware is borked,
117 * and dev->tx_timeout() should be called to fix the problem
118 */
63c3a66f 119
1da177e4
LT
120#define TG3_TX_TIMEOUT (5 * HZ)
121
122/* hardware minimum and maximum for a single frame's data payload */
123#define TG3_MIN_MTU 60
124#define TG3_MAX_MTU(tp) \
63c3a66f 125 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
1da177e4
LT
126
127/* These numbers seem to be hard coded in the NIC firmware somehow.
128 * You can't change the ring sizes, but you can change where you place
129 * them in the NIC onboard memory.
130 */
7cb32cf2 131#define TG3_RX_STD_RING_SIZE(tp) \
63c3a66f 132 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
de9f5230 133 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
1da177e4 134#define TG3_DEF_RX_RING_PENDING 200
7cb32cf2 135#define TG3_RX_JMB_RING_SIZE(tp) \
63c3a66f 136 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
de9f5230 137 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
1da177e4
LT
138#define TG3_DEF_RX_JUMBO_RING_PENDING 100
139
140/* Do not place this n-ring entries value into the tp struct itself,
141 * we really want to expose these constants to GCC so that modulo et
142 * al. operations are done with shifts and masks instead of with
143 * hw multiply/modulo instructions. Another solution would be to
144 * replace things like '% foo' with '& (foo - 1)'.
145 */
1da177e4
LT
146
147#define TG3_TX_RING_SIZE 512
148#define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
149
2c49a44d
MC
150#define TG3_RX_STD_RING_BYTES(tp) \
151 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
152#define TG3_RX_JMB_RING_BYTES(tp) \
153 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
154#define TG3_RX_RCB_RING_BYTES(tp) \
7cb32cf2 155 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
1da177e4
LT
156#define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
157 TG3_TX_RING_SIZE)
1da177e4
LT
158#define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
159
287be12e
MC
160#define TG3_DMA_BYTE_ENAB 64
161
162#define TG3_RX_STD_DMA_SZ 1536
163#define TG3_RX_JMB_DMA_SZ 9046
164
165#define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
166
167#define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
168#define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
1da177e4 169
2c49a44d
MC
170#define TG3_RX_STD_BUFF_RING_SIZE(tp) \
171 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
2b2cdb65 172
2c49a44d
MC
173#define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
174 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
2b2cdb65 175
d2757fc4
MC
176/* Due to a hardware bug, the 5701 can only DMA to memory addresses
177 * that are at least dword aligned when used in PCIX mode. The driver
178 * works around this bug by double copying the packet. This workaround
179 * is built into the normal double copy length check for efficiency.
180 *
181 * However, the double copy is only necessary on those architectures
182 * where unaligned memory accesses are inefficient. For those architectures
183 * where unaligned memory accesses incur little penalty, we can reintegrate
184 * the 5701 in the normal rx path. Doing so saves a device structure
185 * dereference by hardcoding the double copy threshold in place.
186 */
187#define TG3_RX_COPY_THRESHOLD 256
188#if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
189 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
190#else
191 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
192#endif
193
81389f57
MC
194#if (NET_IP_ALIGN != 0)
195#define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
196#else
9205fd9c 197#define TG3_RX_OFFSET(tp) (NET_SKB_PAD)
81389f57
MC
198#endif
199
1da177e4 200/* minimum number of free TX descriptors required to wake up TX process */
f3f3f27e 201#define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
55086ad9 202#define TG3_TX_BD_DMA_MAX_2K 2048
a4cb428d 203#define TG3_TX_BD_DMA_MAX_4K 4096
1da177e4 204
ad829268
MC
205#define TG3_RAW_IP_ALIGN 2
206
c6cdf436 207#define TG3_FW_UPDATE_TIMEOUT_SEC 5
21f7638e 208#define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
c6cdf436 209
077f849d
JSR
210#define FIRMWARE_TG3 "tigon/tg3.bin"
211#define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
212#define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
213
229b1ad1 214static char version[] =
05dbe005 215 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
1da177e4
LT
216
217MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
218MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
219MODULE_LICENSE("GPL");
220MODULE_VERSION(DRV_MODULE_VERSION);
077f849d
JSR
221MODULE_FIRMWARE(FIRMWARE_TG3);
222MODULE_FIRMWARE(FIRMWARE_TG3TSO);
223MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
224
1da177e4
LT
225static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
226module_param(tg3_debug, int, 0);
227MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
228
3d567e0e
NNS
229#define TG3_DRV_DATA_FLAG_10_100_ONLY 0x0001
230#define TG3_DRV_DATA_FLAG_5705_10_100 0x0002
231
a3aa1884 232static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
13185217
HK
233 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
234 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
235 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
236 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
237 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
3d567e0e
NNS
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
252 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
253 TG3_DRV_DATA_FLAG_5705_10_100},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
255 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
256 TG3_DRV_DATA_FLAG_5705_10_100},
13185217 257 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
3d567e0e
NNS
258 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
259 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
260 TG3_DRV_DATA_FLAG_5705_10_100},
13185217 261 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
126a3368 262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
13185217 263 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
13185217 264 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
3d567e0e
NNS
265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
266 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
13185217
HK
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
3d567e0e
NNS
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
272 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
13185217
HK
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
126a3368 277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
13185217
HK
278 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
3d567e0e
NNS
280 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
281 PCI_VENDOR_ID_LENOVO,
282 TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
283 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
13185217 284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
3d567e0e
NNS
285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
286 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
13185217
HK
287 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
288 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
289 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
290 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
291 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
292 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
293 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
b5d3772c
MC
294 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
295 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
d30cdd28
MC
296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
297 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
6c7af27c 298 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
9936bcf6
MC
299 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
300 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
c88e668b
MC
301 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
302 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
2befdcea
MC
303 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
304 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
3d567e0e
NNS
305 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
306 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
307 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
308 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
309 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
310 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
321d32a0
MC
311 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
312 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
3d567e0e
NNS
313 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
314 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
5e7ccf20 315 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
5001e2f6 316 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
79d49695 317 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
5001e2f6 318 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
b0f75221
MC
319 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
320 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
321 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
322 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
3d567e0e
NNS
323 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
324 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
325 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
326 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
302b500b 327 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
ba1f3c76 328 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
02eca3f5 329 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
13185217
HK
330 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
331 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
332 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
333 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
334 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
335 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
336 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
1dcb14d9 337 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
13185217 338 {}
1da177e4
LT
339};
340
341MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
342
50da859d 343static const struct {
1da177e4 344 const char string[ETH_GSTRING_LEN];
48fa55a0 345} ethtool_stats_keys[] = {
1da177e4
LT
346 { "rx_octets" },
347 { "rx_fragments" },
348 { "rx_ucast_packets" },
349 { "rx_mcast_packets" },
350 { "rx_bcast_packets" },
351 { "rx_fcs_errors" },
352 { "rx_align_errors" },
353 { "rx_xon_pause_rcvd" },
354 { "rx_xoff_pause_rcvd" },
355 { "rx_mac_ctrl_rcvd" },
356 { "rx_xoff_entered" },
357 { "rx_frame_too_long_errors" },
358 { "rx_jabbers" },
359 { "rx_undersize_packets" },
360 { "rx_in_length_errors" },
361 { "rx_out_length_errors" },
362 { "rx_64_or_less_octet_packets" },
363 { "rx_65_to_127_octet_packets" },
364 { "rx_128_to_255_octet_packets" },
365 { "rx_256_to_511_octet_packets" },
366 { "rx_512_to_1023_octet_packets" },
367 { "rx_1024_to_1522_octet_packets" },
368 { "rx_1523_to_2047_octet_packets" },
369 { "rx_2048_to_4095_octet_packets" },
370 { "rx_4096_to_8191_octet_packets" },
371 { "rx_8192_to_9022_octet_packets" },
372
373 { "tx_octets" },
374 { "tx_collisions" },
375
376 { "tx_xon_sent" },
377 { "tx_xoff_sent" },
378 { "tx_flow_control" },
379 { "tx_mac_errors" },
380 { "tx_single_collisions" },
381 { "tx_mult_collisions" },
382 { "tx_deferred" },
383 { "tx_excessive_collisions" },
384 { "tx_late_collisions" },
385 { "tx_collide_2times" },
386 { "tx_collide_3times" },
387 { "tx_collide_4times" },
388 { "tx_collide_5times" },
389 { "tx_collide_6times" },
390 { "tx_collide_7times" },
391 { "tx_collide_8times" },
392 { "tx_collide_9times" },
393 { "tx_collide_10times" },
394 { "tx_collide_11times" },
395 { "tx_collide_12times" },
396 { "tx_collide_13times" },
397 { "tx_collide_14times" },
398 { "tx_collide_15times" },
399 { "tx_ucast_packets" },
400 { "tx_mcast_packets" },
401 { "tx_bcast_packets" },
402 { "tx_carrier_sense_errors" },
403 { "tx_discards" },
404 { "tx_errors" },
405
406 { "dma_writeq_full" },
407 { "dma_write_prioq_full" },
408 { "rxbds_empty" },
409 { "rx_discards" },
410 { "rx_errors" },
411 { "rx_threshold_hit" },
412
413 { "dma_readq_full" },
414 { "dma_read_prioq_full" },
415 { "tx_comp_queue_full" },
416
417 { "ring_set_send_prod_index" },
418 { "ring_status_update" },
419 { "nic_irqs" },
420 { "nic_avoided_irqs" },
4452d099
MC
421 { "nic_tx_threshold_hit" },
422
423 { "mbuf_lwm_thresh_hit" },
1da177e4
LT
424};
425
48fa55a0 426#define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
93df8b8f
NNS
427#define TG3_NVRAM_TEST 0
428#define TG3_LINK_TEST 1
429#define TG3_REGISTER_TEST 2
430#define TG3_MEMORY_TEST 3
431#define TG3_MAC_LOOPB_TEST 4
432#define TG3_PHY_LOOPB_TEST 5
433#define TG3_EXT_LOOPB_TEST 6
434#define TG3_INTERRUPT_TEST 7
48fa55a0
MC
435
436
50da859d 437static const struct {
4cafd3f5 438 const char string[ETH_GSTRING_LEN];
48fa55a0 439} ethtool_test_keys[] = {
93df8b8f
NNS
440 [TG3_NVRAM_TEST] = { "nvram test (online) " },
441 [TG3_LINK_TEST] = { "link test (online) " },
442 [TG3_REGISTER_TEST] = { "register test (offline)" },
443 [TG3_MEMORY_TEST] = { "memory test (offline)" },
444 [TG3_MAC_LOOPB_TEST] = { "mac loopback test (offline)" },
445 [TG3_PHY_LOOPB_TEST] = { "phy loopback test (offline)" },
446 [TG3_EXT_LOOPB_TEST] = { "ext loopback test (offline)" },
447 [TG3_INTERRUPT_TEST] = { "interrupt test (offline)" },
4cafd3f5
MC
448};
449
48fa55a0
MC
450#define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
451
452
b401e9e2
MC
453static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
454{
455 writel(val, tp->regs + off);
456}
457
458static u32 tg3_read32(struct tg3 *tp, u32 off)
459{
de6f31eb 460 return readl(tp->regs + off);
b401e9e2
MC
461}
462
0d3031d9
MC
463static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
464{
465 writel(val, tp->aperegs + off);
466}
467
468static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
469{
de6f31eb 470 return readl(tp->aperegs + off);
0d3031d9
MC
471}
472
1da177e4
LT
473static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
474{
6892914f
MC
475 unsigned long flags;
476
477 spin_lock_irqsave(&tp->indirect_lock, flags);
1ee582d8
MC
478 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
479 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
6892914f 480 spin_unlock_irqrestore(&tp->indirect_lock, flags);
1ee582d8
MC
481}
482
483static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
484{
485 writel(val, tp->regs + off);
486 readl(tp->regs + off);
1da177e4
LT
487}
488
6892914f 489static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
1da177e4 490{
6892914f
MC
491 unsigned long flags;
492 u32 val;
493
494 spin_lock_irqsave(&tp->indirect_lock, flags);
495 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
496 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
497 spin_unlock_irqrestore(&tp->indirect_lock, flags);
498 return val;
499}
500
501static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
502{
503 unsigned long flags;
504
505 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
506 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
507 TG3_64BIT_REG_LOW, val);
508 return;
509 }
66711e66 510 if (off == TG3_RX_STD_PROD_IDX_REG) {
6892914f
MC
511 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
512 TG3_64BIT_REG_LOW, val);
513 return;
1da177e4 514 }
6892914f
MC
515
516 spin_lock_irqsave(&tp->indirect_lock, flags);
517 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
518 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
519 spin_unlock_irqrestore(&tp->indirect_lock, flags);
520
521 /* In indirect mode when disabling interrupts, we also need
522 * to clear the interrupt bit in the GRC local ctrl register.
523 */
524 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
525 (val == 0x1)) {
526 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
527 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
528 }
529}
530
531static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
532{
533 unsigned long flags;
534 u32 val;
535
536 spin_lock_irqsave(&tp->indirect_lock, flags);
537 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
538 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
539 spin_unlock_irqrestore(&tp->indirect_lock, flags);
540 return val;
541}
542
b401e9e2
MC
543/* usec_wait specifies the wait time in usec when writing to certain registers
544 * where it is unsafe to read back the register without some delay.
545 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
546 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
547 */
548static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
6892914f 549{
63c3a66f 550 if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
b401e9e2
MC
551 /* Non-posted methods */
552 tp->write32(tp, off, val);
553 else {
554 /* Posted method */
555 tg3_write32(tp, off, val);
556 if (usec_wait)
557 udelay(usec_wait);
558 tp->read32(tp, off);
559 }
560 /* Wait again after the read for the posted method to guarantee that
561 * the wait time is met.
562 */
563 if (usec_wait)
564 udelay(usec_wait);
1da177e4
LT
565}
566
09ee929c
MC
567static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
568{
569 tp->write32_mbox(tp, off, val);
63c3a66f 570 if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
6892914f 571 tp->read32_mbox(tp, off);
09ee929c
MC
572}
573
20094930 574static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
1da177e4
LT
575{
576 void __iomem *mbox = tp->regs + off;
577 writel(val, mbox);
63c3a66f 578 if (tg3_flag(tp, TXD_MBOX_HWBUG))
1da177e4 579 writel(val, mbox);
63c3a66f 580 if (tg3_flag(tp, MBOX_WRITE_REORDER))
1da177e4
LT
581 readl(mbox);
582}
583
b5d3772c
MC
584static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
585{
de6f31eb 586 return readl(tp->regs + off + GRCMBOX_BASE);
b5d3772c
MC
587}
588
589static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
590{
591 writel(val, tp->regs + off + GRCMBOX_BASE);
592}
593
c6cdf436 594#define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
09ee929c 595#define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
c6cdf436
MC
596#define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
597#define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
598#define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
20094930 599
c6cdf436
MC
600#define tw32(reg, val) tp->write32(tp, reg, val)
601#define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
602#define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
603#define tr32(reg) tp->read32(tp, reg)
1da177e4
LT
604
605static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
606{
6892914f
MC
607 unsigned long flags;
608
6ff6f81d 609 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
b5d3772c
MC
610 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
611 return;
612
6892914f 613 spin_lock_irqsave(&tp->indirect_lock, flags);
63c3a66f 614 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
bbadf503
MC
615 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
616 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
1da177e4 617
bbadf503
MC
618 /* Always leave this as zero. */
619 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
620 } else {
621 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
622 tw32_f(TG3PCI_MEM_WIN_DATA, val);
28fbef78 623
bbadf503
MC
624 /* Always leave this as zero. */
625 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
626 }
627 spin_unlock_irqrestore(&tp->indirect_lock, flags);
758a6139
DM
628}
629
1da177e4
LT
630static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
631{
6892914f
MC
632 unsigned long flags;
633
6ff6f81d 634 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
b5d3772c
MC
635 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
636 *val = 0;
637 return;
638 }
639
6892914f 640 spin_lock_irqsave(&tp->indirect_lock, flags);
63c3a66f 641 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
bbadf503
MC
642 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
643 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
1da177e4 644
bbadf503
MC
645 /* Always leave this as zero. */
646 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
647 } else {
648 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
649 *val = tr32(TG3PCI_MEM_WIN_DATA);
650
651 /* Always leave this as zero. */
652 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
653 }
6892914f 654 spin_unlock_irqrestore(&tp->indirect_lock, flags);
1da177e4
LT
655}
656
0d3031d9
MC
657static void tg3_ape_lock_init(struct tg3 *tp)
658{
659 int i;
6f5c8f83 660 u32 regbase, bit;
f92d9dc1
MC
661
662 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
663 regbase = TG3_APE_LOCK_GRANT;
664 else
665 regbase = TG3_APE_PER_LOCK_GRANT;
0d3031d9
MC
666
667 /* Make sure the driver hasn't any stale locks. */
78f94dc7
MC
668 for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
669 switch (i) {
670 case TG3_APE_LOCK_PHY0:
671 case TG3_APE_LOCK_PHY1:
672 case TG3_APE_LOCK_PHY2:
673 case TG3_APE_LOCK_PHY3:
674 bit = APE_LOCK_GRANT_DRIVER;
675 break;
676 default:
677 if (!tp->pci_fn)
678 bit = APE_LOCK_GRANT_DRIVER;
679 else
680 bit = 1 << tp->pci_fn;
681 }
682 tg3_ape_write32(tp, regbase + 4 * i, bit);
6f5c8f83
MC
683 }
684
0d3031d9
MC
685}
686
687static int tg3_ape_lock(struct tg3 *tp, int locknum)
688{
689 int i, off;
690 int ret = 0;
6f5c8f83 691 u32 status, req, gnt, bit;
0d3031d9 692
63c3a66f 693 if (!tg3_flag(tp, ENABLE_APE))
0d3031d9
MC
694 return 0;
695
696 switch (locknum) {
6f5c8f83
MC
697 case TG3_APE_LOCK_GPIO:
698 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
699 return 0;
33f401ae
MC
700 case TG3_APE_LOCK_GRC:
701 case TG3_APE_LOCK_MEM:
78f94dc7
MC
702 if (!tp->pci_fn)
703 bit = APE_LOCK_REQ_DRIVER;
704 else
705 bit = 1 << tp->pci_fn;
33f401ae 706 break;
8151ad57
MC
707 case TG3_APE_LOCK_PHY0:
708 case TG3_APE_LOCK_PHY1:
709 case TG3_APE_LOCK_PHY2:
710 case TG3_APE_LOCK_PHY3:
711 bit = APE_LOCK_REQ_DRIVER;
712 break;
33f401ae
MC
713 default:
714 return -EINVAL;
0d3031d9
MC
715 }
716
f92d9dc1
MC
717 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
718 req = TG3_APE_LOCK_REQ;
719 gnt = TG3_APE_LOCK_GRANT;
720 } else {
721 req = TG3_APE_PER_LOCK_REQ;
722 gnt = TG3_APE_PER_LOCK_GRANT;
723 }
724
0d3031d9
MC
725 off = 4 * locknum;
726
6f5c8f83 727 tg3_ape_write32(tp, req + off, bit);
0d3031d9
MC
728
729 /* Wait for up to 1 millisecond to acquire lock. */
730 for (i = 0; i < 100; i++) {
f92d9dc1 731 status = tg3_ape_read32(tp, gnt + off);
6f5c8f83 732 if (status == bit)
0d3031d9
MC
733 break;
734 udelay(10);
735 }
736
6f5c8f83 737 if (status != bit) {
0d3031d9 738 /* Revoke the lock request. */
6f5c8f83 739 tg3_ape_write32(tp, gnt + off, bit);
0d3031d9
MC
740 ret = -EBUSY;
741 }
742
743 return ret;
744}
745
746static void tg3_ape_unlock(struct tg3 *tp, int locknum)
747{
6f5c8f83 748 u32 gnt, bit;
0d3031d9 749
63c3a66f 750 if (!tg3_flag(tp, ENABLE_APE))
0d3031d9
MC
751 return;
752
753 switch (locknum) {
6f5c8f83
MC
754 case TG3_APE_LOCK_GPIO:
755 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
756 return;
33f401ae
MC
757 case TG3_APE_LOCK_GRC:
758 case TG3_APE_LOCK_MEM:
78f94dc7
MC
759 if (!tp->pci_fn)
760 bit = APE_LOCK_GRANT_DRIVER;
761 else
762 bit = 1 << tp->pci_fn;
33f401ae 763 break;
8151ad57
MC
764 case TG3_APE_LOCK_PHY0:
765 case TG3_APE_LOCK_PHY1:
766 case TG3_APE_LOCK_PHY2:
767 case TG3_APE_LOCK_PHY3:
768 bit = APE_LOCK_GRANT_DRIVER;
769 break;
33f401ae
MC
770 default:
771 return;
0d3031d9
MC
772 }
773
f92d9dc1
MC
774 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
775 gnt = TG3_APE_LOCK_GRANT;
776 else
777 gnt = TG3_APE_PER_LOCK_GRANT;
778
6f5c8f83 779 tg3_ape_write32(tp, gnt + 4 * locknum, bit);
0d3031d9
MC
780}
781
b65a372b 782static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
fd6d3f0e 783{
fd6d3f0e
MC
784 u32 apedata;
785
b65a372b
MC
786 while (timeout_us) {
787 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
788 return -EBUSY;
789
790 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
791 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
792 break;
793
794 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
795
796 udelay(10);
797 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
798 }
799
800 return timeout_us ? 0 : -EBUSY;
801}
802
cf8d55ae
MC
803static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
804{
805 u32 i, apedata;
806
807 for (i = 0; i < timeout_us / 10; i++) {
808 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
809
810 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
811 break;
812
813 udelay(10);
814 }
815
816 return i == timeout_us / 10;
817}
818
86449944
MC
819static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
820 u32 len)
cf8d55ae
MC
821{
822 int err;
823 u32 i, bufoff, msgoff, maxlen, apedata;
824
825 if (!tg3_flag(tp, APE_HAS_NCSI))
826 return 0;
827
828 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
829 if (apedata != APE_SEG_SIG_MAGIC)
830 return -ENODEV;
831
832 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
833 if (!(apedata & APE_FW_STATUS_READY))
834 return -EAGAIN;
835
836 bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
837 TG3_APE_SHMEM_BASE;
838 msgoff = bufoff + 2 * sizeof(u32);
839 maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
840
841 while (len) {
842 u32 length;
843
844 /* Cap xfer sizes to scratchpad limits. */
845 length = (len > maxlen) ? maxlen : len;
846 len -= length;
847
848 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
849 if (!(apedata & APE_FW_STATUS_READY))
850 return -EAGAIN;
851
852 /* Wait for up to 1 msec for APE to service previous event. */
853 err = tg3_ape_event_lock(tp, 1000);
854 if (err)
855 return err;
856
857 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
858 APE_EVENT_STATUS_SCRTCHPD_READ |
859 APE_EVENT_STATUS_EVENT_PENDING;
860 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
861
862 tg3_ape_write32(tp, bufoff, base_off);
863 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
864
865 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
866 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
867
868 base_off += length;
869
870 if (tg3_ape_wait_for_event(tp, 30000))
871 return -EAGAIN;
872
873 for (i = 0; length; i += 4, length -= 4) {
874 u32 val = tg3_ape_read32(tp, msgoff + i);
875 memcpy(data, &val, sizeof(u32));
876 data++;
877 }
878 }
879
880 return 0;
881}
882
b65a372b
MC
883static int tg3_ape_send_event(struct tg3 *tp, u32 event)
884{
885 int err;
886 u32 apedata;
fd6d3f0e
MC
887
888 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
889 if (apedata != APE_SEG_SIG_MAGIC)
b65a372b 890 return -EAGAIN;
fd6d3f0e
MC
891
892 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
893 if (!(apedata & APE_FW_STATUS_READY))
b65a372b 894 return -EAGAIN;
fd6d3f0e
MC
895
896 /* Wait for up to 1 millisecond for APE to service previous event. */
b65a372b
MC
897 err = tg3_ape_event_lock(tp, 1000);
898 if (err)
899 return err;
fd6d3f0e 900
b65a372b
MC
901 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
902 event | APE_EVENT_STATUS_EVENT_PENDING);
fd6d3f0e 903
b65a372b
MC
904 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
905 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
fd6d3f0e 906
b65a372b 907 return 0;
fd6d3f0e
MC
908}
909
910static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
911{
912 u32 event;
913 u32 apedata;
914
915 if (!tg3_flag(tp, ENABLE_APE))
916 return;
917
918 switch (kind) {
919 case RESET_KIND_INIT:
920 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
921 APE_HOST_SEG_SIG_MAGIC);
922 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
923 APE_HOST_SEG_LEN_MAGIC);
924 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
925 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
926 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
927 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
928 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
929 APE_HOST_BEHAV_NO_PHYLOCK);
930 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
931 TG3_APE_HOST_DRVR_STATE_START);
932
933 event = APE_EVENT_STATUS_STATE_START;
934 break;
935 case RESET_KIND_SHUTDOWN:
936 /* With the interface we are currently using,
937 * APE does not track driver state. Wiping
938 * out the HOST SEGMENT SIGNATURE forces
939 * the APE to assume OS absent status.
940 */
941 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
942
943 if (device_may_wakeup(&tp->pdev->dev) &&
944 tg3_flag(tp, WOL_ENABLE)) {
945 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
946 TG3_APE_HOST_WOL_SPEED_AUTO);
947 apedata = TG3_APE_HOST_DRVR_STATE_WOL;
948 } else
949 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
950
951 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
952
953 event = APE_EVENT_STATUS_STATE_UNLOAD;
954 break;
955 case RESET_KIND_SUSPEND:
956 event = APE_EVENT_STATUS_STATE_SUSPEND;
957 break;
958 default:
959 return;
960 }
961
962 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
963
964 tg3_ape_send_event(tp, event);
965}
966
1da177e4
LT
967static void tg3_disable_ints(struct tg3 *tp)
968{
89aeb3bc
MC
969 int i;
970
1da177e4
LT
971 tw32(TG3PCI_MISC_HOST_CTRL,
972 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
89aeb3bc
MC
973 for (i = 0; i < tp->irq_max; i++)
974 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
1da177e4
LT
975}
976
1da177e4
LT
977static void tg3_enable_ints(struct tg3 *tp)
978{
89aeb3bc 979 int i;
89aeb3bc 980
bbe832c0
MC
981 tp->irq_sync = 0;
982 wmb();
983
1da177e4
LT
984 tw32(TG3PCI_MISC_HOST_CTRL,
985 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
89aeb3bc 986
f89f38b8 987 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
89aeb3bc
MC
988 for (i = 0; i < tp->irq_cnt; i++) {
989 struct tg3_napi *tnapi = &tp->napi[i];
c6cdf436 990
898a56f8 991 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
63c3a66f 992 if (tg3_flag(tp, 1SHOT_MSI))
89aeb3bc 993 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
f19af9c2 994
f89f38b8 995 tp->coal_now |= tnapi->coal_now;
89aeb3bc 996 }
f19af9c2
MC
997
998 /* Force an initial interrupt */
63c3a66f 999 if (!tg3_flag(tp, TAGGED_STATUS) &&
f19af9c2
MC
1000 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1001 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1002 else
f89f38b8
MC
1003 tw32(HOSTCC_MODE, tp->coal_now);
1004
1005 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1da177e4
LT
1006}
1007
17375d25 1008static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
04237ddd 1009{
17375d25 1010 struct tg3 *tp = tnapi->tp;
898a56f8 1011 struct tg3_hw_status *sblk = tnapi->hw_status;
04237ddd
MC
1012 unsigned int work_exists = 0;
1013
1014 /* check for phy events */
63c3a66f 1015 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
04237ddd
MC
1016 if (sblk->status & SD_STATUS_LINK_CHG)
1017 work_exists = 1;
1018 }
f891ea16
MC
1019
1020 /* check for TX work to do */
1021 if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1022 work_exists = 1;
1023
1024 /* check for RX work to do */
1025 if (tnapi->rx_rcb_prod_idx &&
8d9d7cfc 1026 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
04237ddd
MC
1027 work_exists = 1;
1028
1029 return work_exists;
1030}
1031
17375d25 1032/* tg3_int_reenable
04237ddd
MC
1033 * similar to tg3_enable_ints, but it accurately determines whether there
1034 * is new work pending and can return without flushing the PIO write
6aa20a22 1035 * which reenables interrupts
1da177e4 1036 */
17375d25 1037static void tg3_int_reenable(struct tg3_napi *tnapi)
1da177e4 1038{
17375d25
MC
1039 struct tg3 *tp = tnapi->tp;
1040
898a56f8 1041 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1da177e4
LT
1042 mmiowb();
1043
fac9b83e
DM
1044 /* When doing tagged status, this work check is unnecessary.
1045 * The last_tag we write above tells the chip which piece of
1046 * work we've completed.
1047 */
63c3a66f 1048 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
04237ddd 1049 tw32(HOSTCC_MODE, tp->coalesce_mode |
fd2ce37f 1050 HOSTCC_MODE_ENABLE | tnapi->coal_now);
1da177e4
LT
1051}
1052
1da177e4
LT
1053static void tg3_switch_clocks(struct tg3 *tp)
1054{
f6eb9b1f 1055 u32 clock_ctrl;
1da177e4
LT
1056 u32 orig_clock_ctrl;
1057
63c3a66f 1058 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
4cf78e4f
MC
1059 return;
1060
f6eb9b1f
MC
1061 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1062
1da177e4
LT
1063 orig_clock_ctrl = clock_ctrl;
1064 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1065 CLOCK_CTRL_CLKRUN_OENABLE |
1066 0x1f);
1067 tp->pci_clock_ctrl = clock_ctrl;
1068
63c3a66f 1069 if (tg3_flag(tp, 5705_PLUS)) {
1da177e4 1070 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
b401e9e2
MC
1071 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1072 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1da177e4
LT
1073 }
1074 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
b401e9e2
MC
1075 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1076 clock_ctrl |
1077 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1078 40);
1079 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1080 clock_ctrl | (CLOCK_CTRL_ALTCLK),
1081 40);
1da177e4 1082 }
b401e9e2 1083 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1da177e4
LT
1084}
1085
1086#define PHY_BUSY_LOOPS 5000
1087
1088static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1089{
1090 u32 frame_val;
1091 unsigned int loops;
1092 int ret;
1093
1094 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1095 tw32_f(MAC_MI_MODE,
1096 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1097 udelay(80);
1098 }
1099
8151ad57
MC
1100 tg3_ape_lock(tp, tp->phy_ape_lock);
1101
1da177e4
LT
1102 *val = 0x0;
1103
882e9793 1104 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1da177e4
LT
1105 MI_COM_PHY_ADDR_MASK);
1106 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1107 MI_COM_REG_ADDR_MASK);
1108 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
6aa20a22 1109
1da177e4
LT
1110 tw32_f(MAC_MI_COM, frame_val);
1111
1112 loops = PHY_BUSY_LOOPS;
1113 while (loops != 0) {
1114 udelay(10);
1115 frame_val = tr32(MAC_MI_COM);
1116
1117 if ((frame_val & MI_COM_BUSY) == 0) {
1118 udelay(5);
1119 frame_val = tr32(MAC_MI_COM);
1120 break;
1121 }
1122 loops -= 1;
1123 }
1124
1125 ret = -EBUSY;
1126 if (loops != 0) {
1127 *val = frame_val & MI_COM_DATA_MASK;
1128 ret = 0;
1129 }
1130
1131 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1132 tw32_f(MAC_MI_MODE, tp->mi_mode);
1133 udelay(80);
1134 }
1135
8151ad57
MC
1136 tg3_ape_unlock(tp, tp->phy_ape_lock);
1137
1da177e4
LT
1138 return ret;
1139}
1140
1141static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1142{
1143 u32 frame_val;
1144 unsigned int loops;
1145 int ret;
1146
f07e9af3 1147 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
221c5637 1148 (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
b5d3772c
MC
1149 return 0;
1150
1da177e4
LT
1151 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1152 tw32_f(MAC_MI_MODE,
1153 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1154 udelay(80);
1155 }
1156
8151ad57
MC
1157 tg3_ape_lock(tp, tp->phy_ape_lock);
1158
882e9793 1159 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1da177e4
LT
1160 MI_COM_PHY_ADDR_MASK);
1161 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1162 MI_COM_REG_ADDR_MASK);
1163 frame_val |= (val & MI_COM_DATA_MASK);
1164 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
6aa20a22 1165
1da177e4
LT
1166 tw32_f(MAC_MI_COM, frame_val);
1167
1168 loops = PHY_BUSY_LOOPS;
1169 while (loops != 0) {
1170 udelay(10);
1171 frame_val = tr32(MAC_MI_COM);
1172 if ((frame_val & MI_COM_BUSY) == 0) {
1173 udelay(5);
1174 frame_val = tr32(MAC_MI_COM);
1175 break;
1176 }
1177 loops -= 1;
1178 }
1179
1180 ret = -EBUSY;
1181 if (loops != 0)
1182 ret = 0;
1183
1184 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1185 tw32_f(MAC_MI_MODE, tp->mi_mode);
1186 udelay(80);
1187 }
1188
8151ad57
MC
1189 tg3_ape_unlock(tp, tp->phy_ape_lock);
1190
1da177e4
LT
1191 return ret;
1192}
1193
b0988c15
MC
1194static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1195{
1196 int err;
1197
1198 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1199 if (err)
1200 goto done;
1201
1202 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1203 if (err)
1204 goto done;
1205
1206 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1207 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1208 if (err)
1209 goto done;
1210
1211 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1212
1213done:
1214 return err;
1215}
1216
1217static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1218{
1219 int err;
1220
1221 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1222 if (err)
1223 goto done;
1224
1225 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1226 if (err)
1227 goto done;
1228
1229 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1230 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1231 if (err)
1232 goto done;
1233
1234 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1235
1236done:
1237 return err;
1238}
1239
1240static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1241{
1242 int err;
1243
1244 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1245 if (!err)
1246 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1247
1248 return err;
1249}
1250
1251static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1252{
1253 int err;
1254
1255 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1256 if (!err)
1257 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1258
1259 return err;
1260}
1261
15ee95c3
MC
1262static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1263{
1264 int err;
1265
1266 err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1267 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1268 MII_TG3_AUXCTL_SHDWSEL_MISC);
1269 if (!err)
1270 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1271
1272 return err;
1273}
1274
b4bd2929
MC
1275static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1276{
1277 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1278 set |= MII_TG3_AUXCTL_MISC_WREN;
1279
1280 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1281}
1282
1d36ba45
MC
1283#define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
1284 tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1285 MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
1286 MII_TG3_AUXCTL_ACTL_TX_6DB)
1287
1288#define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
1289 tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1290 MII_TG3_AUXCTL_ACTL_TX_6DB);
1291
95e2869a
MC
1292static int tg3_bmcr_reset(struct tg3 *tp)
1293{
1294 u32 phy_control;
1295 int limit, err;
1296
1297 /* OK, reset it, and poll the BMCR_RESET bit until it
1298 * clears or we time out.
1299 */
1300 phy_control = BMCR_RESET;
1301 err = tg3_writephy(tp, MII_BMCR, phy_control);
1302 if (err != 0)
1303 return -EBUSY;
1304
1305 limit = 5000;
1306 while (limit--) {
1307 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1308 if (err != 0)
1309 return -EBUSY;
1310
1311 if ((phy_control & BMCR_RESET) == 0) {
1312 udelay(40);
1313 break;
1314 }
1315 udelay(10);
1316 }
d4675b52 1317 if (limit < 0)
95e2869a
MC
1318 return -EBUSY;
1319
1320 return 0;
1321}
1322
158d7abd
MC
1323static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1324{
3d16543d 1325 struct tg3 *tp = bp->priv;
158d7abd
MC
1326 u32 val;
1327
24bb4fb6 1328 spin_lock_bh(&tp->lock);
158d7abd
MC
1329
1330 if (tg3_readphy(tp, reg, &val))
24bb4fb6
MC
1331 val = -EIO;
1332
1333 spin_unlock_bh(&tp->lock);
158d7abd
MC
1334
1335 return val;
1336}
1337
1338static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1339{
3d16543d 1340 struct tg3 *tp = bp->priv;
24bb4fb6 1341 u32 ret = 0;
158d7abd 1342
24bb4fb6 1343 spin_lock_bh(&tp->lock);
158d7abd
MC
1344
1345 if (tg3_writephy(tp, reg, val))
24bb4fb6 1346 ret = -EIO;
158d7abd 1347
24bb4fb6
MC
1348 spin_unlock_bh(&tp->lock);
1349
1350 return ret;
158d7abd
MC
1351}
1352
1353static int tg3_mdio_reset(struct mii_bus *bp)
1354{
1355 return 0;
1356}
1357
9c61d6bc 1358static void tg3_mdio_config_5785(struct tg3 *tp)
a9daf367
MC
1359{
1360 u32 val;
fcb389df 1361 struct phy_device *phydev;
a9daf367 1362
3f0e3ad7 1363 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
fcb389df 1364 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
6a443a0f
MC
1365 case PHY_ID_BCM50610:
1366 case PHY_ID_BCM50610M:
fcb389df
MC
1367 val = MAC_PHYCFG2_50610_LED_MODES;
1368 break;
6a443a0f 1369 case PHY_ID_BCMAC131:
fcb389df
MC
1370 val = MAC_PHYCFG2_AC131_LED_MODES;
1371 break;
6a443a0f 1372 case PHY_ID_RTL8211C:
fcb389df
MC
1373 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1374 break;
6a443a0f 1375 case PHY_ID_RTL8201E:
fcb389df
MC
1376 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1377 break;
1378 default:
a9daf367 1379 return;
fcb389df
MC
1380 }
1381
1382 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1383 tw32(MAC_PHYCFG2, val);
1384
1385 val = tr32(MAC_PHYCFG1);
bb85fbb6
MC
1386 val &= ~(MAC_PHYCFG1_RGMII_INT |
1387 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1388 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
fcb389df
MC
1389 tw32(MAC_PHYCFG1, val);
1390
1391 return;
1392 }
1393
63c3a66f 1394 if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
fcb389df
MC
1395 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1396 MAC_PHYCFG2_FMODE_MASK_MASK |
1397 MAC_PHYCFG2_GMODE_MASK_MASK |
1398 MAC_PHYCFG2_ACT_MASK_MASK |
1399 MAC_PHYCFG2_QUAL_MASK_MASK |
1400 MAC_PHYCFG2_INBAND_ENABLE;
1401
1402 tw32(MAC_PHYCFG2, val);
a9daf367 1403
bb85fbb6
MC
1404 val = tr32(MAC_PHYCFG1);
1405 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1406 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
63c3a66f
JP
1407 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1408 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
a9daf367 1409 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
63c3a66f 1410 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
a9daf367
MC
1411 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1412 }
bb85fbb6
MC
1413 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1414 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1415 tw32(MAC_PHYCFG1, val);
a9daf367 1416
a9daf367
MC
1417 val = tr32(MAC_EXT_RGMII_MODE);
1418 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1419 MAC_RGMII_MODE_RX_QUALITY |
1420 MAC_RGMII_MODE_RX_ACTIVITY |
1421 MAC_RGMII_MODE_RX_ENG_DET |
1422 MAC_RGMII_MODE_TX_ENABLE |
1423 MAC_RGMII_MODE_TX_LOWPWR |
1424 MAC_RGMII_MODE_TX_RESET);
63c3a66f
JP
1425 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1426 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
a9daf367
MC
1427 val |= MAC_RGMII_MODE_RX_INT_B |
1428 MAC_RGMII_MODE_RX_QUALITY |
1429 MAC_RGMII_MODE_RX_ACTIVITY |
1430 MAC_RGMII_MODE_RX_ENG_DET;
63c3a66f 1431 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
a9daf367
MC
1432 val |= MAC_RGMII_MODE_TX_ENABLE |
1433 MAC_RGMII_MODE_TX_LOWPWR |
1434 MAC_RGMII_MODE_TX_RESET;
1435 }
1436 tw32(MAC_EXT_RGMII_MODE, val);
1437}
1438
158d7abd
MC
1439static void tg3_mdio_start(struct tg3 *tp)
1440{
158d7abd
MC
1441 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1442 tw32_f(MAC_MI_MODE, tp->mi_mode);
1443 udelay(80);
a9daf367 1444
63c3a66f 1445 if (tg3_flag(tp, MDIOBUS_INITED) &&
9ea4818d
MC
1446 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1447 tg3_mdio_config_5785(tp);
1448}
1449
1450static int tg3_mdio_init(struct tg3 *tp)
1451{
1452 int i;
1453 u32 reg;
1454 struct phy_device *phydev;
1455
63c3a66f 1456 if (tg3_flag(tp, 5717_PLUS)) {
9c7df915 1457 u32 is_serdes;
882e9793 1458
69f11c99 1459 tp->phy_addr = tp->pci_fn + 1;
882e9793 1460
d1ec96af
MC
1461 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1462 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1463 else
1464 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1465 TG3_CPMU_PHY_STRAP_IS_SERDES;
882e9793
MC
1466 if (is_serdes)
1467 tp->phy_addr += 7;
1468 } else
3f0e3ad7 1469 tp->phy_addr = TG3_PHY_MII_ADDR;
882e9793 1470
158d7abd
MC
1471 tg3_mdio_start(tp);
1472
63c3a66f 1473 if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
158d7abd
MC
1474 return 0;
1475
298cf9be
LB
1476 tp->mdio_bus = mdiobus_alloc();
1477 if (tp->mdio_bus == NULL)
1478 return -ENOMEM;
158d7abd 1479
298cf9be
LB
1480 tp->mdio_bus->name = "tg3 mdio bus";
1481 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
158d7abd 1482 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
298cf9be
LB
1483 tp->mdio_bus->priv = tp;
1484 tp->mdio_bus->parent = &tp->pdev->dev;
1485 tp->mdio_bus->read = &tg3_mdio_read;
1486 tp->mdio_bus->write = &tg3_mdio_write;
1487 tp->mdio_bus->reset = &tg3_mdio_reset;
3f0e3ad7 1488 tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
298cf9be 1489 tp->mdio_bus->irq = &tp->mdio_irq[0];
158d7abd
MC
1490
1491 for (i = 0; i < PHY_MAX_ADDR; i++)
298cf9be 1492 tp->mdio_bus->irq[i] = PHY_POLL;
158d7abd
MC
1493
1494 /* The bus registration will look for all the PHYs on the mdio bus.
1495 * Unfortunately, it does not ensure the PHY is powered up before
1496 * accessing the PHY ID registers. A chip reset is the
1497 * quickest way to bring the device back to an operational state..
1498 */
1499 if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1500 tg3_bmcr_reset(tp);
1501
298cf9be 1502 i = mdiobus_register(tp->mdio_bus);
a9daf367 1503 if (i) {
ab96b241 1504 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
9c61d6bc 1505 mdiobus_free(tp->mdio_bus);
a9daf367
MC
1506 return i;
1507 }
158d7abd 1508
3f0e3ad7 1509 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
a9daf367 1510
9c61d6bc 1511 if (!phydev || !phydev->drv) {
ab96b241 1512 dev_warn(&tp->pdev->dev, "No PHY devices\n");
9c61d6bc
MC
1513 mdiobus_unregister(tp->mdio_bus);
1514 mdiobus_free(tp->mdio_bus);
1515 return -ENODEV;
1516 }
1517
1518 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
6a443a0f 1519 case PHY_ID_BCM57780:
321d32a0 1520 phydev->interface = PHY_INTERFACE_MODE_GMII;
c704dc23 1521 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
321d32a0 1522 break;
6a443a0f
MC
1523 case PHY_ID_BCM50610:
1524 case PHY_ID_BCM50610M:
32e5a8d6 1525 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
c704dc23 1526 PHY_BRCM_RX_REFCLK_UNUSED |
52fae083 1527 PHY_BRCM_DIS_TXCRXC_NOENRGY |
c704dc23 1528 PHY_BRCM_AUTO_PWRDWN_ENABLE;
63c3a66f 1529 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
a9daf367 1530 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
63c3a66f 1531 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
a9daf367 1532 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
63c3a66f 1533 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
a9daf367 1534 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
fcb389df 1535 /* fallthru */
6a443a0f 1536 case PHY_ID_RTL8211C:
fcb389df 1537 phydev->interface = PHY_INTERFACE_MODE_RGMII;
a9daf367 1538 break;
6a443a0f
MC
1539 case PHY_ID_RTL8201E:
1540 case PHY_ID_BCMAC131:
a9daf367 1541 phydev->interface = PHY_INTERFACE_MODE_MII;
cdd4e09d 1542 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
f07e9af3 1543 tp->phy_flags |= TG3_PHYFLG_IS_FET;
a9daf367
MC
1544 break;
1545 }
1546
63c3a66f 1547 tg3_flag_set(tp, MDIOBUS_INITED);
9c61d6bc
MC
1548
1549 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1550 tg3_mdio_config_5785(tp);
a9daf367
MC
1551
1552 return 0;
158d7abd
MC
1553}
1554
1555static void tg3_mdio_fini(struct tg3 *tp)
1556{
63c3a66f
JP
1557 if (tg3_flag(tp, MDIOBUS_INITED)) {
1558 tg3_flag_clear(tp, MDIOBUS_INITED);
298cf9be
LB
1559 mdiobus_unregister(tp->mdio_bus);
1560 mdiobus_free(tp->mdio_bus);
158d7abd
MC
1561 }
1562}
1563
4ba526ce
MC
1564/* tp->lock is held. */
1565static inline void tg3_generate_fw_event(struct tg3 *tp)
1566{
1567 u32 val;
1568
1569 val = tr32(GRC_RX_CPU_EVENT);
1570 val |= GRC_RX_CPU_DRIVER_EVENT;
1571 tw32_f(GRC_RX_CPU_EVENT, val);
1572
1573 tp->last_event_jiffies = jiffies;
1574}
1575
1576#define TG3_FW_EVENT_TIMEOUT_USEC 2500
1577
95e2869a
MC
1578/* tp->lock is held. */
1579static void tg3_wait_for_event_ack(struct tg3 *tp)
1580{
1581 int i;
4ba526ce
MC
1582 unsigned int delay_cnt;
1583 long time_remain;
1584
1585 /* If enough time has passed, no wait is necessary. */
1586 time_remain = (long)(tp->last_event_jiffies + 1 +
1587 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1588 (long)jiffies;
1589 if (time_remain < 0)
1590 return;
1591
1592 /* Check if we can shorten the wait time. */
1593 delay_cnt = jiffies_to_usecs(time_remain);
1594 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1595 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1596 delay_cnt = (delay_cnt >> 3) + 1;
95e2869a 1597
4ba526ce 1598 for (i = 0; i < delay_cnt; i++) {
95e2869a
MC
1599 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1600 break;
4ba526ce 1601 udelay(8);
95e2869a
MC
1602 }
1603}
1604
1605/* tp->lock is held. */
b28f389d 1606static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
95e2869a 1607{
b28f389d 1608 u32 reg, val;
95e2869a
MC
1609
1610 val = 0;
1611 if (!tg3_readphy(tp, MII_BMCR, &reg))
1612 val = reg << 16;
1613 if (!tg3_readphy(tp, MII_BMSR, &reg))
1614 val |= (reg & 0xffff);
b28f389d 1615 *data++ = val;
95e2869a
MC
1616
1617 val = 0;
1618 if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1619 val = reg << 16;
1620 if (!tg3_readphy(tp, MII_LPA, &reg))
1621 val |= (reg & 0xffff);
b28f389d 1622 *data++ = val;
95e2869a
MC
1623
1624 val = 0;
f07e9af3 1625 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
95e2869a
MC
1626 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1627 val = reg << 16;
1628 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1629 val |= (reg & 0xffff);
1630 }
b28f389d 1631 *data++ = val;
95e2869a
MC
1632
1633 if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1634 val = reg << 16;
1635 else
1636 val = 0;
b28f389d
MC
1637 *data++ = val;
1638}
1639
1640/* tp->lock is held. */
1641static void tg3_ump_link_report(struct tg3 *tp)
1642{
1643 u32 data[4];
1644
1645 if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1646 return;
1647
1648 tg3_phy_gather_ump_data(tp, data);
1649
1650 tg3_wait_for_event_ack(tp);
1651
1652 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1653 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1654 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1655 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1656 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1657 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
95e2869a 1658
4ba526ce 1659 tg3_generate_fw_event(tp);
95e2869a
MC
1660}
1661
8d5a89b3
MC
1662/* tp->lock is held. */
1663static void tg3_stop_fw(struct tg3 *tp)
1664{
1665 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1666 /* Wait for RX cpu to ACK the previous event. */
1667 tg3_wait_for_event_ack(tp);
1668
1669 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1670
1671 tg3_generate_fw_event(tp);
1672
1673 /* Wait for RX cpu to ACK this event. */
1674 tg3_wait_for_event_ack(tp);
1675 }
1676}
1677
fd6d3f0e
MC
1678/* tp->lock is held. */
1679static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1680{
1681 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1682 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1683
1684 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1685 switch (kind) {
1686 case RESET_KIND_INIT:
1687 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1688 DRV_STATE_START);
1689 break;
1690
1691 case RESET_KIND_SHUTDOWN:
1692 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1693 DRV_STATE_UNLOAD);
1694 break;
1695
1696 case RESET_KIND_SUSPEND:
1697 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1698 DRV_STATE_SUSPEND);
1699 break;
1700
1701 default:
1702 break;
1703 }
1704 }
1705
1706 if (kind == RESET_KIND_INIT ||
1707 kind == RESET_KIND_SUSPEND)
1708 tg3_ape_driver_state_change(tp, kind);
1709}
1710
1711/* tp->lock is held. */
1712static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1713{
1714 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1715 switch (kind) {
1716 case RESET_KIND_INIT:
1717 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1718 DRV_STATE_START_DONE);
1719 break;
1720
1721 case RESET_KIND_SHUTDOWN:
1722 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1723 DRV_STATE_UNLOAD_DONE);
1724 break;
1725
1726 default:
1727 break;
1728 }
1729 }
1730
1731 if (kind == RESET_KIND_SHUTDOWN)
1732 tg3_ape_driver_state_change(tp, kind);
1733}
1734
1735/* tp->lock is held. */
1736static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1737{
1738 if (tg3_flag(tp, ENABLE_ASF)) {
1739 switch (kind) {
1740 case RESET_KIND_INIT:
1741 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1742 DRV_STATE_START);
1743 break;
1744
1745 case RESET_KIND_SHUTDOWN:
1746 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1747 DRV_STATE_UNLOAD);
1748 break;
1749
1750 case RESET_KIND_SUSPEND:
1751 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1752 DRV_STATE_SUSPEND);
1753 break;
1754
1755 default:
1756 break;
1757 }
1758 }
1759}
1760
1761static int tg3_poll_fw(struct tg3 *tp)
1762{
1763 int i;
1764 u32 val;
1765
1766 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1767 /* Wait up to 20ms for init done. */
1768 for (i = 0; i < 200; i++) {
1769 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1770 return 0;
1771 udelay(100);
1772 }
1773 return -ENODEV;
1774 }
1775
1776 /* Wait for firmware initialization to complete. */
1777 for (i = 0; i < 100000; i++) {
1778 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1779 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1780 break;
1781 udelay(10);
1782 }
1783
1784 /* Chip might not be fitted with firmware. Some Sun onboard
1785 * parts are configured like that. So don't signal the timeout
1786 * of the above loop as an error, but do report the lack of
1787 * running firmware once.
1788 */
1789 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1790 tg3_flag_set(tp, NO_FWARE_REPORTED);
1791
1792 netdev_info(tp->dev, "No firmware running\n");
1793 }
1794
1795 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
1796 /* The 57765 A0 needs a little more
1797 * time to do some important work.
1798 */
1799 mdelay(10);
1800 }
1801
1802 return 0;
1803}
1804
95e2869a
MC
1805static void tg3_link_report(struct tg3 *tp)
1806{
1807 if (!netif_carrier_ok(tp->dev)) {
05dbe005 1808 netif_info(tp, link, tp->dev, "Link is down\n");
95e2869a
MC
1809 tg3_ump_link_report(tp);
1810 } else if (netif_msg_link(tp)) {
05dbe005
JP
1811 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1812 (tp->link_config.active_speed == SPEED_1000 ?
1813 1000 :
1814 (tp->link_config.active_speed == SPEED_100 ?
1815 100 : 10)),
1816 (tp->link_config.active_duplex == DUPLEX_FULL ?
1817 "full" : "half"));
1818
1819 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1820 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1821 "on" : "off",
1822 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1823 "on" : "off");
47007831
MC
1824
1825 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1826 netdev_info(tp->dev, "EEE is %s\n",
1827 tp->setlpicnt ? "enabled" : "disabled");
1828
95e2869a
MC
1829 tg3_ump_link_report(tp);
1830 }
1831}
1832
95e2869a
MC
1833static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1834{
1835 u16 miireg;
1836
e18ce346 1837 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
95e2869a 1838 miireg = ADVERTISE_1000XPAUSE;
e18ce346 1839 else if (flow_ctrl & FLOW_CTRL_TX)
95e2869a 1840 miireg = ADVERTISE_1000XPSE_ASYM;
e18ce346 1841 else if (flow_ctrl & FLOW_CTRL_RX)
95e2869a
MC
1842 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1843 else
1844 miireg = 0;
1845
1846 return miireg;
1847}
1848
95e2869a
MC
1849static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1850{
1851 u8 cap = 0;
1852
f3791cdf
MC
1853 if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1854 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1855 } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1856 if (lcladv & ADVERTISE_1000XPAUSE)
1857 cap = FLOW_CTRL_RX;
1858 if (rmtadv & ADVERTISE_1000XPAUSE)
e18ce346 1859 cap = FLOW_CTRL_TX;
95e2869a
MC
1860 }
1861
1862 return cap;
1863}
1864
f51f3562 1865static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
95e2869a 1866{
b02fd9e3 1867 u8 autoneg;
f51f3562 1868 u8 flowctrl = 0;
95e2869a
MC
1869 u32 old_rx_mode = tp->rx_mode;
1870 u32 old_tx_mode = tp->tx_mode;
1871
63c3a66f 1872 if (tg3_flag(tp, USE_PHYLIB))
3f0e3ad7 1873 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
b02fd9e3
MC
1874 else
1875 autoneg = tp->link_config.autoneg;
1876
63c3a66f 1877 if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
f07e9af3 1878 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
f51f3562 1879 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
95e2869a 1880 else
bc02ff95 1881 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
f51f3562
MC
1882 } else
1883 flowctrl = tp->link_config.flowctrl;
95e2869a 1884
f51f3562 1885 tp->link_config.active_flowctrl = flowctrl;
95e2869a 1886
e18ce346 1887 if (flowctrl & FLOW_CTRL_RX)
95e2869a
MC
1888 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1889 else
1890 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1891
f51f3562 1892 if (old_rx_mode != tp->rx_mode)
95e2869a 1893 tw32_f(MAC_RX_MODE, tp->rx_mode);
95e2869a 1894
e18ce346 1895 if (flowctrl & FLOW_CTRL_TX)
95e2869a
MC
1896 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1897 else
1898 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1899
f51f3562 1900 if (old_tx_mode != tp->tx_mode)
95e2869a 1901 tw32_f(MAC_TX_MODE, tp->tx_mode);
95e2869a
MC
1902}
1903
b02fd9e3
MC
1904static void tg3_adjust_link(struct net_device *dev)
1905{
1906 u8 oldflowctrl, linkmesg = 0;
1907 u32 mac_mode, lcl_adv, rmt_adv;
1908 struct tg3 *tp = netdev_priv(dev);
3f0e3ad7 1909 struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
b02fd9e3 1910
24bb4fb6 1911 spin_lock_bh(&tp->lock);
b02fd9e3
MC
1912
1913 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1914 MAC_MODE_HALF_DUPLEX);
1915
1916 oldflowctrl = tp->link_config.active_flowctrl;
1917
1918 if (phydev->link) {
1919 lcl_adv = 0;
1920 rmt_adv = 0;
1921
1922 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1923 mac_mode |= MAC_MODE_PORT_MODE_MII;
c3df0748
MC
1924 else if (phydev->speed == SPEED_1000 ||
1925 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
b02fd9e3 1926 mac_mode |= MAC_MODE_PORT_MODE_GMII;
c3df0748
MC
1927 else
1928 mac_mode |= MAC_MODE_PORT_MODE_MII;
b02fd9e3
MC
1929
1930 if (phydev->duplex == DUPLEX_HALF)
1931 mac_mode |= MAC_MODE_HALF_DUPLEX;
1932 else {
f88788f0 1933 lcl_adv = mii_advertise_flowctrl(
b02fd9e3
MC
1934 tp->link_config.flowctrl);
1935
1936 if (phydev->pause)
1937 rmt_adv = LPA_PAUSE_CAP;
1938 if (phydev->asym_pause)
1939 rmt_adv |= LPA_PAUSE_ASYM;
1940 }
1941
1942 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1943 } else
1944 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1945
1946 if (mac_mode != tp->mac_mode) {
1947 tp->mac_mode = mac_mode;
1948 tw32_f(MAC_MODE, tp->mac_mode);
1949 udelay(40);
1950 }
1951
fcb389df
MC
1952 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1953 if (phydev->speed == SPEED_10)
1954 tw32(MAC_MI_STAT,
1955 MAC_MI_STAT_10MBPS_MODE |
1956 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1957 else
1958 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1959 }
1960
b02fd9e3
MC
1961 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1962 tw32(MAC_TX_LENGTHS,
1963 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1964 (6 << TX_LENGTHS_IPG_SHIFT) |
1965 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1966 else
1967 tw32(MAC_TX_LENGTHS,
1968 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1969 (6 << TX_LENGTHS_IPG_SHIFT) |
1970 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1971
34655ad6 1972 if (phydev->link != tp->old_link ||
b02fd9e3
MC
1973 phydev->speed != tp->link_config.active_speed ||
1974 phydev->duplex != tp->link_config.active_duplex ||
1975 oldflowctrl != tp->link_config.active_flowctrl)
c6cdf436 1976 linkmesg = 1;
b02fd9e3 1977
34655ad6 1978 tp->old_link = phydev->link;
b02fd9e3
MC
1979 tp->link_config.active_speed = phydev->speed;
1980 tp->link_config.active_duplex = phydev->duplex;
1981
24bb4fb6 1982 spin_unlock_bh(&tp->lock);
b02fd9e3
MC
1983
1984 if (linkmesg)
1985 tg3_link_report(tp);
1986}
1987
1988static int tg3_phy_init(struct tg3 *tp)
1989{
1990 struct phy_device *phydev;
1991
f07e9af3 1992 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
b02fd9e3
MC
1993 return 0;
1994
1995 /* Bring the PHY back to a known state. */
1996 tg3_bmcr_reset(tp);
1997
3f0e3ad7 1998 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
b02fd9e3
MC
1999
2000 /* Attach the MAC to the PHY. */
fb28ad35 2001 phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
a9daf367 2002 phydev->dev_flags, phydev->interface);
b02fd9e3 2003 if (IS_ERR(phydev)) {
ab96b241 2004 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
b02fd9e3
MC
2005 return PTR_ERR(phydev);
2006 }
2007
b02fd9e3 2008 /* Mask with MAC supported features. */
9c61d6bc
MC
2009 switch (phydev->interface) {
2010 case PHY_INTERFACE_MODE_GMII:
2011 case PHY_INTERFACE_MODE_RGMII:
f07e9af3 2012 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
321d32a0
MC
2013 phydev->supported &= (PHY_GBIT_FEATURES |
2014 SUPPORTED_Pause |
2015 SUPPORTED_Asym_Pause);
2016 break;
2017 }
2018 /* fallthru */
9c61d6bc
MC
2019 case PHY_INTERFACE_MODE_MII:
2020 phydev->supported &= (PHY_BASIC_FEATURES |
2021 SUPPORTED_Pause |
2022 SUPPORTED_Asym_Pause);
2023 break;
2024 default:
3f0e3ad7 2025 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
9c61d6bc
MC
2026 return -EINVAL;
2027 }
2028
f07e9af3 2029 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
b02fd9e3
MC
2030
2031 phydev->advertising = phydev->supported;
2032
b02fd9e3
MC
2033 return 0;
2034}
2035
2036static void tg3_phy_start(struct tg3 *tp)
2037{
2038 struct phy_device *phydev;
2039
f07e9af3 2040 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
b02fd9e3
MC
2041 return;
2042
3f0e3ad7 2043 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
b02fd9e3 2044
80096068
MC
2045 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2046 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
c6700ce2
MC
2047 phydev->speed = tp->link_config.speed;
2048 phydev->duplex = tp->link_config.duplex;
2049 phydev->autoneg = tp->link_config.autoneg;
2050 phydev->advertising = tp->link_config.advertising;
b02fd9e3
MC
2051 }
2052
2053 phy_start(phydev);
2054
2055 phy_start_aneg(phydev);
2056}
2057
2058static void tg3_phy_stop(struct tg3 *tp)
2059{
f07e9af3 2060 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
b02fd9e3
MC
2061 return;
2062
3f0e3ad7 2063 phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
b02fd9e3
MC
2064}
2065
2066static void tg3_phy_fini(struct tg3 *tp)
2067{
f07e9af3 2068 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
3f0e3ad7 2069 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
f07e9af3 2070 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
b02fd9e3
MC
2071 }
2072}
2073
941ec90f
MC
2074static int tg3_phy_set_extloopbk(struct tg3 *tp)
2075{
2076 int err;
2077 u32 val;
2078
2079 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2080 return 0;
2081
2082 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2083 /* Cannot do read-modify-write on 5401 */
2084 err = tg3_phy_auxctl_write(tp,
2085 MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2086 MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2087 0x4c20);
2088 goto done;
2089 }
2090
2091 err = tg3_phy_auxctl_read(tp,
2092 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2093 if (err)
2094 return err;
2095
2096 val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2097 err = tg3_phy_auxctl_write(tp,
2098 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2099
2100done:
2101 return err;
2102}
2103
7f97a4bd
MC
2104static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2105{
2106 u32 phytest;
2107
2108 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2109 u32 phy;
2110
2111 tg3_writephy(tp, MII_TG3_FET_TEST,
2112 phytest | MII_TG3_FET_SHADOW_EN);
2113 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2114 if (enable)
2115 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2116 else
2117 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2118 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2119 }
2120 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2121 }
2122}
2123
6833c043
MC
2124static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2125{
2126 u32 reg;
2127
63c3a66f
JP
2128 if (!tg3_flag(tp, 5705_PLUS) ||
2129 (tg3_flag(tp, 5717_PLUS) &&
f07e9af3 2130 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
6833c043
MC
2131 return;
2132
f07e9af3 2133 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
7f97a4bd
MC
2134 tg3_phy_fet_toggle_apd(tp, enable);
2135 return;
2136 }
2137
6833c043
MC
2138 reg = MII_TG3_MISC_SHDW_WREN |
2139 MII_TG3_MISC_SHDW_SCR5_SEL |
2140 MII_TG3_MISC_SHDW_SCR5_LPED |
2141 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2142 MII_TG3_MISC_SHDW_SCR5_SDTL |
2143 MII_TG3_MISC_SHDW_SCR5_C125OE;
2144 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
2145 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2146
2147 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2148
2149
2150 reg = MII_TG3_MISC_SHDW_WREN |
2151 MII_TG3_MISC_SHDW_APD_SEL |
2152 MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2153 if (enable)
2154 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2155
2156 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2157}
2158
9ef8ca99
MC
2159static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
2160{
2161 u32 phy;
2162
63c3a66f 2163 if (!tg3_flag(tp, 5705_PLUS) ||
f07e9af3 2164 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
9ef8ca99
MC
2165 return;
2166
f07e9af3 2167 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
9ef8ca99
MC
2168 u32 ephy;
2169
535ef6e1
MC
2170 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2171 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2172
2173 tg3_writephy(tp, MII_TG3_FET_TEST,
2174 ephy | MII_TG3_FET_SHADOW_EN);
2175 if (!tg3_readphy(tp, reg, &phy)) {
9ef8ca99 2176 if (enable)
535ef6e1 2177 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
9ef8ca99 2178 else
535ef6e1
MC
2179 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2180 tg3_writephy(tp, reg, phy);
9ef8ca99 2181 }
535ef6e1 2182 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
9ef8ca99
MC
2183 }
2184 } else {
15ee95c3
MC
2185 int ret;
2186
2187 ret = tg3_phy_auxctl_read(tp,
2188 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2189 if (!ret) {
9ef8ca99
MC
2190 if (enable)
2191 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2192 else
2193 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
b4bd2929
MC
2194 tg3_phy_auxctl_write(tp,
2195 MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
9ef8ca99
MC
2196 }
2197 }
2198}
2199
1da177e4
LT
2200static void tg3_phy_set_wirespeed(struct tg3 *tp)
2201{
15ee95c3 2202 int ret;
1da177e4
LT
2203 u32 val;
2204
f07e9af3 2205 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
1da177e4
LT
2206 return;
2207
15ee95c3
MC
2208 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2209 if (!ret)
b4bd2929
MC
2210 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2211 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
1da177e4
LT
2212}
2213
b2a5c19c
MC
2214static void tg3_phy_apply_otp(struct tg3 *tp)
2215{
2216 u32 otp, phy;
2217
2218 if (!tp->phy_otp)
2219 return;
2220
2221 otp = tp->phy_otp;
2222
1d36ba45
MC
2223 if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
2224 return;
b2a5c19c
MC
2225
2226 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2227 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2228 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2229
2230 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2231 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2232 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2233
2234 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2235 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2236 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2237
2238 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2239 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2240
2241 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2242 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2243
2244 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2245 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2246 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2247
1d36ba45 2248 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
b2a5c19c
MC
2249}
2250
52b02d04
MC
2251static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
2252{
2253 u32 val;
2254
2255 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2256 return;
2257
2258 tp->setlpicnt = 0;
2259
2260 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2261 current_link_up == 1 &&
a6b68dab
MC
2262 tp->link_config.active_duplex == DUPLEX_FULL &&
2263 (tp->link_config.active_speed == SPEED_100 ||
2264 tp->link_config.active_speed == SPEED_1000)) {
52b02d04
MC
2265 u32 eeectl;
2266
2267 if (tp->link_config.active_speed == SPEED_1000)
2268 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2269 else
2270 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2271
2272 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2273
3110f5f5
MC
2274 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
2275 TG3_CL45_D7_EEERES_STAT, &val);
52b02d04 2276
b0c5943f
MC
2277 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2278 val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
52b02d04
MC
2279 tp->setlpicnt = 2;
2280 }
2281
2282 if (!tp->setlpicnt) {
b715ce94
MC
2283 if (current_link_up == 1 &&
2284 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2285 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2286 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2287 }
2288
52b02d04
MC
2289 val = tr32(TG3_CPMU_EEE_MODE);
2290 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2291 }
2292}
2293
b0c5943f
MC
2294static void tg3_phy_eee_enable(struct tg3 *tp)
2295{
2296 u32 val;
2297
2298 if (tp->link_config.active_speed == SPEED_1000 &&
2299 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2300 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
55086ad9 2301 tg3_flag(tp, 57765_CLASS)) &&
b0c5943f 2302 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
b715ce94
MC
2303 val = MII_TG3_DSP_TAP26_ALNOKO |
2304 MII_TG3_DSP_TAP26_RMRXSTO;
2305 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
b0c5943f
MC
2306 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2307 }
2308
2309 val = tr32(TG3_CPMU_EEE_MODE);
2310 tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2311}
2312
1da177e4
LT
2313static int tg3_wait_macro_done(struct tg3 *tp)
2314{
2315 int limit = 100;
2316
2317 while (limit--) {
2318 u32 tmp32;
2319
f08aa1a8 2320 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
1da177e4
LT
2321 if ((tmp32 & 0x1000) == 0)
2322 break;
2323 }
2324 }
d4675b52 2325 if (limit < 0)
1da177e4
LT
2326 return -EBUSY;
2327
2328 return 0;
2329}
2330
2331static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2332{
2333 static const u32 test_pat[4][6] = {
2334 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2335 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2336 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2337 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2338 };
2339 int chan;
2340
2341 for (chan = 0; chan < 4; chan++) {
2342 int i;
2343
2344 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2345 (chan * 0x2000) | 0x0200);
f08aa1a8 2346 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1da177e4
LT
2347
2348 for (i = 0; i < 6; i++)
2349 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2350 test_pat[chan][i]);
2351
f08aa1a8 2352 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1da177e4
LT
2353 if (tg3_wait_macro_done(tp)) {
2354 *resetp = 1;
2355 return -EBUSY;
2356 }
2357
2358 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2359 (chan * 0x2000) | 0x0200);
f08aa1a8 2360 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
1da177e4
LT
2361 if (tg3_wait_macro_done(tp)) {
2362 *resetp = 1;
2363 return -EBUSY;
2364 }
2365
f08aa1a8 2366 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
1da177e4
LT
2367 if (tg3_wait_macro_done(tp)) {
2368 *resetp = 1;
2369 return -EBUSY;
2370 }
2371
2372 for (i = 0; i < 6; i += 2) {
2373 u32 low, high;
2374
2375 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2376 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2377 tg3_wait_macro_done(tp)) {
2378 *resetp = 1;
2379 return -EBUSY;
2380 }
2381 low &= 0x7fff;
2382 high &= 0x000f;
2383 if (low != test_pat[chan][i] ||
2384 high != test_pat[chan][i+1]) {
2385 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2386 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2387 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2388
2389 return -EBUSY;
2390 }
2391 }
2392 }
2393
2394 return 0;
2395}
2396
2397static int tg3_phy_reset_chanpat(struct tg3 *tp)
2398{
2399 int chan;
2400
2401 for (chan = 0; chan < 4; chan++) {
2402 int i;
2403
2404 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2405 (chan * 0x2000) | 0x0200);
f08aa1a8 2406 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1da177e4
LT
2407 for (i = 0; i < 6; i++)
2408 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
f08aa1a8 2409 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1da177e4
LT
2410 if (tg3_wait_macro_done(tp))
2411 return -EBUSY;
2412 }
2413
2414 return 0;
2415}
2416
2417static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2418{
2419 u32 reg32, phy9_orig;
2420 int retries, do_phy_reset, err;
2421
2422 retries = 10;
2423 do_phy_reset = 1;
2424 do {
2425 if (do_phy_reset) {
2426 err = tg3_bmcr_reset(tp);
2427 if (err)
2428 return err;
2429 do_phy_reset = 0;
2430 }
2431
2432 /* Disable transmitter and interrupt. */
2433 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2434 continue;
2435
2436 reg32 |= 0x3000;
2437 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2438
2439 /* Set full-duplex, 1000 mbps. */
2440 tg3_writephy(tp, MII_BMCR,
221c5637 2441 BMCR_FULLDPLX | BMCR_SPEED1000);
1da177e4
LT
2442
2443 /* Set to master mode. */
221c5637 2444 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
1da177e4
LT
2445 continue;
2446
221c5637
MC
2447 tg3_writephy(tp, MII_CTRL1000,
2448 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
1da177e4 2449
1d36ba45
MC
2450 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
2451 if (err)
2452 return err;
1da177e4
LT
2453
2454 /* Block the PHY control access. */
6ee7c0a0 2455 tg3_phydsp_write(tp, 0x8005, 0x0800);
1da177e4
LT
2456
2457 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2458 if (!err)
2459 break;
2460 } while (--retries);
2461
2462 err = tg3_phy_reset_chanpat(tp);
2463 if (err)
2464 return err;
2465
6ee7c0a0 2466 tg3_phydsp_write(tp, 0x8005, 0x0000);
1da177e4
LT
2467
2468 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
f08aa1a8 2469 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
1da177e4 2470
1d36ba45 2471 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1da177e4 2472
221c5637 2473 tg3_writephy(tp, MII_CTRL1000, phy9_orig);
1da177e4
LT
2474
2475 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2476 reg32 &= ~0x3000;
2477 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2478 } else if (!err)
2479 err = -EBUSY;
2480
2481 return err;
2482}
2483
f4a46d1f
NNS
2484static void tg3_carrier_on(struct tg3 *tp)
2485{
2486 netif_carrier_on(tp->dev);
2487 tp->link_up = true;
2488}
2489
2490static void tg3_carrier_off(struct tg3 *tp)
2491{
2492 netif_carrier_off(tp->dev);
2493 tp->link_up = false;
2494}
2495
1da177e4
LT
2496/* This will reset the tigon3 PHY if there is no valid
2497 * link unless the FORCE argument is non-zero.
2498 */
2499static int tg3_phy_reset(struct tg3 *tp)
2500{
f833c4c1 2501 u32 val, cpmuctrl;
1da177e4
LT
2502 int err;
2503
60189ddf 2504 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
60189ddf
MC
2505 val = tr32(GRC_MISC_CFG);
2506 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2507 udelay(40);
2508 }
f833c4c1
MC
2509 err = tg3_readphy(tp, MII_BMSR, &val);
2510 err |= tg3_readphy(tp, MII_BMSR, &val);
1da177e4
LT
2511 if (err != 0)
2512 return -EBUSY;
2513
f4a46d1f
NNS
2514 if (netif_running(tp->dev) && tp->link_up) {
2515 tg3_carrier_off(tp);
c8e1e82b
MC
2516 tg3_link_report(tp);
2517 }
2518
1da177e4
LT
2519 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2520 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2521 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2522 err = tg3_phy_reset_5703_4_5(tp);
2523 if (err)
2524 return err;
2525 goto out;
2526 }
2527
b2a5c19c
MC
2528 cpmuctrl = 0;
2529 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2530 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2531 cpmuctrl = tr32(TG3_CPMU_CTRL);
2532 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2533 tw32(TG3_CPMU_CTRL,
2534 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2535 }
2536
1da177e4
LT
2537 err = tg3_bmcr_reset(tp);
2538 if (err)
2539 return err;
2540
b2a5c19c 2541 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
f833c4c1
MC
2542 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2543 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
b2a5c19c
MC
2544
2545 tw32(TG3_CPMU_CTRL, cpmuctrl);
2546 }
2547
bcb37f6c
MC
2548 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2549 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
ce057f01
MC
2550 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2551 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2552 CPMU_LSPD_1000MB_MACCLK_12_5) {
2553 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2554 udelay(40);
2555 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2556 }
2557 }
2558
63c3a66f 2559 if (tg3_flag(tp, 5717_PLUS) &&
f07e9af3 2560 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
ecf1410b
MC
2561 return 0;
2562
b2a5c19c
MC
2563 tg3_phy_apply_otp(tp);
2564
f07e9af3 2565 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
6833c043
MC
2566 tg3_phy_toggle_apd(tp, true);
2567 else
2568 tg3_phy_toggle_apd(tp, false);
2569
1da177e4 2570out:
1d36ba45
MC
2571 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2572 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
6ee7c0a0
MC
2573 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2574 tg3_phydsp_write(tp, 0x000a, 0x0323);
1d36ba45 2575 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1da177e4 2576 }
1d36ba45 2577
f07e9af3 2578 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
f08aa1a8
MC
2579 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2580 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
1da177e4 2581 }
1d36ba45 2582
f07e9af3 2583 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
1d36ba45
MC
2584 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2585 tg3_phydsp_write(tp, 0x000a, 0x310b);
2586 tg3_phydsp_write(tp, 0x201f, 0x9506);
2587 tg3_phydsp_write(tp, 0x401f, 0x14e2);
2588 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2589 }
f07e9af3 2590 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
1d36ba45
MC
2591 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2592 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2593 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2594 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2595 tg3_writephy(tp, MII_TG3_TEST1,
2596 MII_TG3_TEST1_TRIM_EN | 0x4);
2597 } else
2598 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2599
2600 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2601 }
c424cb24 2602 }
1d36ba45 2603
1da177e4
LT
2604 /* Set Extended packet length bit (bit 14) on all chips that */
2605 /* support jumbo frames */
79eb6904 2606 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
1da177e4 2607 /* Cannot do read-modify-write on 5401 */
b4bd2929 2608 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
63c3a66f 2609 } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
1da177e4 2610 /* Set bit 14 with read-modify-write to preserve other bits */
15ee95c3
MC
2611 err = tg3_phy_auxctl_read(tp,
2612 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2613 if (!err)
b4bd2929
MC
2614 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2615 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
1da177e4
LT
2616 }
2617
2618 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2619 * jumbo frames transmission.
2620 */
63c3a66f 2621 if (tg3_flag(tp, JUMBO_CAPABLE)) {
f833c4c1 2622 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
c6cdf436 2623 tg3_writephy(tp, MII_TG3_EXT_CTRL,
f833c4c1 2624 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1da177e4
LT
2625 }
2626
715116a1 2627 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
715116a1 2628 /* adjust output voltage */
535ef6e1 2629 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
715116a1
MC
2630 }
2631
9ef8ca99 2632 tg3_phy_toggle_automdix(tp, 1);
1da177e4
LT
2633 tg3_phy_set_wirespeed(tp);
2634 return 0;
2635}
2636
3a1e19d3
MC
2637#define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2638#define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2639#define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2640 TG3_GPIO_MSG_NEED_VAUX)
2641#define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2642 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2643 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2644 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2645 (TG3_GPIO_MSG_DRVR_PRES << 12))
2646
2647#define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2648 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2649 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2650 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2651 (TG3_GPIO_MSG_NEED_VAUX << 12))
2652
2653static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2654{
2655 u32 status, shift;
2656
2657 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2658 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2659 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2660 else
2661 status = tr32(TG3_CPMU_DRV_STATUS);
2662
2663 shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2664 status &= ~(TG3_GPIO_MSG_MASK << shift);
2665 status |= (newstat << shift);
2666
2667 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2668 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2669 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2670 else
2671 tw32(TG3_CPMU_DRV_STATUS, status);
2672
2673 return status >> TG3_APE_GPIO_MSG_SHIFT;
2674}
2675
520b2756
MC
2676static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2677{
2678 if (!tg3_flag(tp, IS_NIC))
2679 return 0;
2680
3a1e19d3
MC
2681 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2682 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2683 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2684 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2685 return -EIO;
520b2756 2686
3a1e19d3
MC
2687 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2688
2689 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2690 TG3_GRC_LCLCTL_PWRSW_DELAY);
2691
2692 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2693 } else {
2694 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2695 TG3_GRC_LCLCTL_PWRSW_DELAY);
2696 }
6f5c8f83 2697
520b2756
MC
2698 return 0;
2699}
2700
2701static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2702{
2703 u32 grc_local_ctrl;
2704
2705 if (!tg3_flag(tp, IS_NIC) ||
2706 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2707 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)
2708 return;
2709
2710 grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2711
2712 tw32_wait_f(GRC_LOCAL_CTRL,
2713 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2714 TG3_GRC_LCLCTL_PWRSW_DELAY);
2715
2716 tw32_wait_f(GRC_LOCAL_CTRL,
2717 grc_local_ctrl,
2718 TG3_GRC_LCLCTL_PWRSW_DELAY);
2719
2720 tw32_wait_f(GRC_LOCAL_CTRL,
2721 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2722 TG3_GRC_LCLCTL_PWRSW_DELAY);
2723}
2724
2725static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2726{
2727 if (!tg3_flag(tp, IS_NIC))
2728 return;
2729
2730 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2731 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2732 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2733 (GRC_LCLCTRL_GPIO_OE0 |
2734 GRC_LCLCTRL_GPIO_OE1 |
2735 GRC_LCLCTRL_GPIO_OE2 |
2736 GRC_LCLCTRL_GPIO_OUTPUT0 |
2737 GRC_LCLCTRL_GPIO_OUTPUT1),
2738 TG3_GRC_LCLCTL_PWRSW_DELAY);
2739 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2740 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2741 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2742 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2743 GRC_LCLCTRL_GPIO_OE1 |
2744 GRC_LCLCTRL_GPIO_OE2 |
2745 GRC_LCLCTRL_GPIO_OUTPUT0 |
2746 GRC_LCLCTRL_GPIO_OUTPUT1 |
2747 tp->grc_local_ctrl;
2748 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2749 TG3_GRC_LCLCTL_PWRSW_DELAY);
2750
2751 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2752 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2753 TG3_GRC_LCLCTL_PWRSW_DELAY);
2754
2755 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2756 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2757 TG3_GRC_LCLCTL_PWRSW_DELAY);
2758 } else {
2759 u32 no_gpio2;
2760 u32 grc_local_ctrl = 0;
2761
2762 /* Workaround to prevent overdrawing Amps. */
2763 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2764 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2765 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2766 grc_local_ctrl,
2767 TG3_GRC_LCLCTL_PWRSW_DELAY);
2768 }
2769
2770 /* On 5753 and variants, GPIO2 cannot be used. */
2771 no_gpio2 = tp->nic_sram_data_cfg &
2772 NIC_SRAM_DATA_CFG_NO_GPIO2;
2773
2774 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2775 GRC_LCLCTRL_GPIO_OE1 |
2776 GRC_LCLCTRL_GPIO_OE2 |
2777 GRC_LCLCTRL_GPIO_OUTPUT1 |
2778 GRC_LCLCTRL_GPIO_OUTPUT2;
2779 if (no_gpio2) {
2780 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2781 GRC_LCLCTRL_GPIO_OUTPUT2);
2782 }
2783 tw32_wait_f(GRC_LOCAL_CTRL,
2784 tp->grc_local_ctrl | grc_local_ctrl,
2785 TG3_GRC_LCLCTL_PWRSW_DELAY);
2786
2787 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2788
2789 tw32_wait_f(GRC_LOCAL_CTRL,
2790 tp->grc_local_ctrl | grc_local_ctrl,
2791 TG3_GRC_LCLCTL_PWRSW_DELAY);
2792
2793 if (!no_gpio2) {
2794 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2795 tw32_wait_f(GRC_LOCAL_CTRL,
2796 tp->grc_local_ctrl | grc_local_ctrl,
2797 TG3_GRC_LCLCTL_PWRSW_DELAY);
2798 }
2799 }
3a1e19d3
MC
2800}
2801
cd0d7228 2802static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
3a1e19d3
MC
2803{
2804 u32 msg = 0;
2805
2806 /* Serialize power state transitions */
2807 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2808 return;
2809
cd0d7228 2810 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
3a1e19d3
MC
2811 msg = TG3_GPIO_MSG_NEED_VAUX;
2812
2813 msg = tg3_set_function_status(tp, msg);
2814
2815 if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2816 goto done;
6f5c8f83 2817
3a1e19d3
MC
2818 if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2819 tg3_pwrsrc_switch_to_vaux(tp);
2820 else
2821 tg3_pwrsrc_die_with_vmain(tp);
2822
2823done:
6f5c8f83 2824 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
520b2756
MC
2825}
2826
cd0d7228 2827static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
1da177e4 2828{
683644b7 2829 bool need_vaux = false;
1da177e4 2830
334355aa 2831 /* The GPIOs do something completely different on 57765. */
55086ad9 2832 if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
1da177e4
LT
2833 return;
2834
3a1e19d3
MC
2835 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2836 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2837 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
cd0d7228
MC
2838 tg3_frob_aux_power_5717(tp, include_wol ?
2839 tg3_flag(tp, WOL_ENABLE) != 0 : 0);
3a1e19d3
MC
2840 return;
2841 }
2842
2843 if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
8c2dc7e1
MC
2844 struct net_device *dev_peer;
2845
2846 dev_peer = pci_get_drvdata(tp->pdev_peer);
683644b7 2847
bc1c7567 2848 /* remove_one() may have been run on the peer. */
683644b7
MC
2849 if (dev_peer) {
2850 struct tg3 *tp_peer = netdev_priv(dev_peer);
2851
63c3a66f 2852 if (tg3_flag(tp_peer, INIT_COMPLETE))
683644b7
MC
2853 return;
2854
cd0d7228 2855 if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
63c3a66f 2856 tg3_flag(tp_peer, ENABLE_ASF))
683644b7
MC
2857 need_vaux = true;
2858 }
1da177e4
LT
2859 }
2860
cd0d7228
MC
2861 if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2862 tg3_flag(tp, ENABLE_ASF))
683644b7
MC
2863 need_vaux = true;
2864
520b2756
MC
2865 if (need_vaux)
2866 tg3_pwrsrc_switch_to_vaux(tp);
2867 else
2868 tg3_pwrsrc_die_with_vmain(tp);
1da177e4
LT
2869}
2870
e8f3f6ca
MC
2871static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2872{
2873 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2874 return 1;
79eb6904 2875 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
e8f3f6ca
MC
2876 if (speed != SPEED_10)
2877 return 1;
2878 } else if (speed == SPEED_10)
2879 return 1;
2880
2881 return 0;
2882}
2883
0a459aac 2884static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
15c3b696 2885{
ce057f01
MC
2886 u32 val;
2887
f07e9af3 2888 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
5129724a
MC
2889 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2890 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2891 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2892
2893 sg_dig_ctrl |=
2894 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2895 tw32(SG_DIG_CTRL, sg_dig_ctrl);
2896 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2897 }
3f7045c1 2898 return;
5129724a 2899 }
3f7045c1 2900
60189ddf 2901 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
60189ddf
MC
2902 tg3_bmcr_reset(tp);
2903 val = tr32(GRC_MISC_CFG);
2904 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2905 udelay(40);
2906 return;
f07e9af3 2907 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
0e5f784c
MC
2908 u32 phytest;
2909 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2910 u32 phy;
2911
2912 tg3_writephy(tp, MII_ADVERTISE, 0);
2913 tg3_writephy(tp, MII_BMCR,
2914 BMCR_ANENABLE | BMCR_ANRESTART);
2915
2916 tg3_writephy(tp, MII_TG3_FET_TEST,
2917 phytest | MII_TG3_FET_SHADOW_EN);
2918 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2919 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2920 tg3_writephy(tp,
2921 MII_TG3_FET_SHDW_AUXMODE4,
2922 phy);
2923 }
2924 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2925 }
2926 return;
0a459aac 2927 } else if (do_low_power) {
715116a1
MC
2928 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2929 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
0a459aac 2930
b4bd2929
MC
2931 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2932 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2933 MII_TG3_AUXCTL_PCTL_VREG_11V;
2934 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
715116a1 2935 }
3f7045c1 2936
15c3b696
MC
2937 /* The PHY should not be powered down on some chips because
2938 * of bugs.
2939 */
2940 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2941 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2942 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
085f1afc
MC
2943 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)) ||
2944 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
2945 !tp->pci_fn))
15c3b696 2946 return;
ce057f01 2947
bcb37f6c
MC
2948 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2949 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
ce057f01
MC
2950 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2951 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2952 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2953 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2954 }
2955
15c3b696
MC
2956 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2957}
2958
ffbcfed4
MC
2959/* tp->lock is held. */
2960static int tg3_nvram_lock(struct tg3 *tp)
2961{
63c3a66f 2962 if (tg3_flag(tp, NVRAM)) {
ffbcfed4
MC
2963 int i;
2964
2965 if (tp->nvram_lock_cnt == 0) {
2966 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2967 for (i = 0; i < 8000; i++) {
2968 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2969 break;
2970 udelay(20);
2971 }
2972 if (i == 8000) {
2973 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2974 return -ENODEV;
2975 }
2976 }
2977 tp->nvram_lock_cnt++;
2978 }
2979 return 0;
2980}
2981
2982/* tp->lock is held. */
2983static void tg3_nvram_unlock(struct tg3 *tp)
2984{
63c3a66f 2985 if (tg3_flag(tp, NVRAM)) {
ffbcfed4
MC
2986 if (tp->nvram_lock_cnt > 0)
2987 tp->nvram_lock_cnt--;
2988 if (tp->nvram_lock_cnt == 0)
2989 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2990 }
2991}
2992
2993/* tp->lock is held. */
2994static void tg3_enable_nvram_access(struct tg3 *tp)
2995{
63c3a66f 2996 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
ffbcfed4
MC
2997 u32 nvaccess = tr32(NVRAM_ACCESS);
2998
2999 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3000 }
3001}
3002
3003/* tp->lock is held. */
3004static void tg3_disable_nvram_access(struct tg3 *tp)
3005{
63c3a66f 3006 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
ffbcfed4
MC
3007 u32 nvaccess = tr32(NVRAM_ACCESS);
3008
3009 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3010 }
3011}
3012
3013static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3014 u32 offset, u32 *val)
3015{
3016 u32 tmp;
3017 int i;
3018
3019 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3020 return -EINVAL;
3021
3022 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3023 EEPROM_ADDR_DEVID_MASK |
3024 EEPROM_ADDR_READ);
3025 tw32(GRC_EEPROM_ADDR,
3026 tmp |
3027 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3028 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3029 EEPROM_ADDR_ADDR_MASK) |
3030 EEPROM_ADDR_READ | EEPROM_ADDR_START);
3031
3032 for (i = 0; i < 1000; i++) {
3033 tmp = tr32(GRC_EEPROM_ADDR);
3034
3035 if (tmp & EEPROM_ADDR_COMPLETE)
3036 break;
3037 msleep(1);
3038 }
3039 if (!(tmp & EEPROM_ADDR_COMPLETE))
3040 return -EBUSY;
3041
62cedd11
MC
3042 tmp = tr32(GRC_EEPROM_DATA);
3043
3044 /*
3045 * The data will always be opposite the native endian
3046 * format. Perform a blind byteswap to compensate.
3047 */
3048 *val = swab32(tmp);
3049
ffbcfed4
MC
3050 return 0;
3051}
3052
3053#define NVRAM_CMD_TIMEOUT 10000
3054
3055static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3056{
3057 int i;
3058
3059 tw32(NVRAM_CMD, nvram_cmd);
3060 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3061 udelay(10);
3062 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3063 udelay(10);
3064 break;
3065 }
3066 }
3067
3068 if (i == NVRAM_CMD_TIMEOUT)
3069 return -EBUSY;
3070
3071 return 0;
3072}
3073
3074static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3075{
63c3a66f
JP
3076 if (tg3_flag(tp, NVRAM) &&
3077 tg3_flag(tp, NVRAM_BUFFERED) &&
3078 tg3_flag(tp, FLASH) &&
3079 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
ffbcfed4
MC
3080 (tp->nvram_jedecnum == JEDEC_ATMEL))
3081
3082 addr = ((addr / tp->nvram_pagesize) <<
3083 ATMEL_AT45DB0X1B_PAGE_POS) +
3084 (addr % tp->nvram_pagesize);
3085
3086 return addr;
3087}
3088
3089static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3090{
63c3a66f
JP
3091 if (tg3_flag(tp, NVRAM) &&
3092 tg3_flag(tp, NVRAM_BUFFERED) &&
3093 tg3_flag(tp, FLASH) &&
3094 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
ffbcfed4
MC
3095 (tp->nvram_jedecnum == JEDEC_ATMEL))
3096
3097 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3098 tp->nvram_pagesize) +
3099 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3100
3101 return addr;
3102}
3103
e4f34110
MC
3104/* NOTE: Data read in from NVRAM is byteswapped according to
3105 * the byteswapping settings for all other register accesses.
3106 * tg3 devices are BE devices, so on a BE machine, the data
3107 * returned will be exactly as it is seen in NVRAM. On a LE
3108 * machine, the 32-bit value will be byteswapped.
3109 */
ffbcfed4
MC
3110static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3111{
3112 int ret;
3113
63c3a66f 3114 if (!tg3_flag(tp, NVRAM))
ffbcfed4
MC
3115 return tg3_nvram_read_using_eeprom(tp, offset, val);
3116
3117 offset = tg3_nvram_phys_addr(tp, offset);
3118
3119 if (offset > NVRAM_ADDR_MSK)
3120 return -EINVAL;
3121
3122 ret = tg3_nvram_lock(tp);
3123 if (ret)
3124 return ret;
3125
3126 tg3_enable_nvram_access(tp);
3127
3128 tw32(NVRAM_ADDR, offset);
3129 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3130 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3131
3132 if (ret == 0)
e4f34110 3133 *val = tr32(NVRAM_RDDATA);
ffbcfed4
MC
3134
3135 tg3_disable_nvram_access(tp);
3136
3137 tg3_nvram_unlock(tp);
3138
3139 return ret;
3140}
3141
a9dc529d
MC
3142/* Ensures NVRAM data is in bytestream format. */
3143static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
ffbcfed4
MC
3144{
3145 u32 v;
a9dc529d 3146 int res = tg3_nvram_read(tp, offset, &v);
ffbcfed4 3147 if (!res)
a9dc529d 3148 *val = cpu_to_be32(v);
ffbcfed4
MC
3149 return res;
3150}
3151
dbe9b92a
MC
3152static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3153 u32 offset, u32 len, u8 *buf)
3154{
3155 int i, j, rc = 0;
3156 u32 val;
3157
3158 for (i = 0; i < len; i += 4) {
3159 u32 addr;
3160 __be32 data;
3161
3162 addr = offset + i;
3163
3164 memcpy(&data, buf + i, 4);
3165
3166 /*
3167 * The SEEPROM interface expects the data to always be opposite
3168 * the native endian format. We accomplish this by reversing
3169 * all the operations that would have been performed on the
3170 * data from a call to tg3_nvram_read_be32().
3171 */
3172 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3173
3174 val = tr32(GRC_EEPROM_ADDR);
3175 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3176
3177 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3178 EEPROM_ADDR_READ);
3179 tw32(GRC_EEPROM_ADDR, val |
3180 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3181 (addr & EEPROM_ADDR_ADDR_MASK) |
3182 EEPROM_ADDR_START |
3183 EEPROM_ADDR_WRITE);
3184
3185 for (j = 0; j < 1000; j++) {
3186 val = tr32(GRC_EEPROM_ADDR);
3187
3188 if (val & EEPROM_ADDR_COMPLETE)
3189 break;
3190 msleep(1);
3191 }
3192 if (!(val & EEPROM_ADDR_COMPLETE)) {
3193 rc = -EBUSY;
3194 break;
3195 }
3196 }
3197
3198 return rc;
3199}
3200
3201/* offset and length are dword aligned */
3202static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3203 u8 *buf)
3204{
3205 int ret = 0;
3206 u32 pagesize = tp->nvram_pagesize;
3207 u32 pagemask = pagesize - 1;
3208 u32 nvram_cmd;
3209 u8 *tmp;
3210
3211 tmp = kmalloc(pagesize, GFP_KERNEL);
3212 if (tmp == NULL)
3213 return -ENOMEM;
3214
3215 while (len) {
3216 int j;
3217 u32 phy_addr, page_off, size;
3218
3219 phy_addr = offset & ~pagemask;
3220
3221 for (j = 0; j < pagesize; j += 4) {
3222 ret = tg3_nvram_read_be32(tp, phy_addr + j,
3223 (__be32 *) (tmp + j));
3224 if (ret)
3225 break;
3226 }
3227 if (ret)
3228 break;
3229
3230 page_off = offset & pagemask;
3231 size = pagesize;
3232 if (len < size)
3233 size = len;
3234
3235 len -= size;
3236
3237 memcpy(tmp + page_off, buf, size);
3238
3239 offset = offset + (pagesize - page_off);
3240
3241 tg3_enable_nvram_access(tp);
3242
3243 /*
3244 * Before we can erase the flash page, we need
3245 * to issue a special "write enable" command.
3246 */
3247 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3248
3249 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3250 break;
3251
3252 /* Erase the target page */
3253 tw32(NVRAM_ADDR, phy_addr);
3254
3255 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3256 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3257
3258 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3259 break;
3260
3261 /* Issue another write enable to start the write. */
3262 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3263
3264 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3265 break;
3266
3267 for (j = 0; j < pagesize; j += 4) {
3268 __be32 data;
3269
3270 data = *((__be32 *) (tmp + j));
3271
3272 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3273
3274 tw32(NVRAM_ADDR, phy_addr + j);
3275
3276 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3277 NVRAM_CMD_WR;
3278
3279 if (j == 0)
3280 nvram_cmd |= NVRAM_CMD_FIRST;
3281 else if (j == (pagesize - 4))
3282 nvram_cmd |= NVRAM_CMD_LAST;
3283
3284 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3285 if (ret)
3286 break;
3287 }
3288 if (ret)
3289 break;
3290 }
3291
3292 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3293 tg3_nvram_exec_cmd(tp, nvram_cmd);
3294
3295 kfree(tmp);
3296
3297 return ret;
3298}
3299
3300/* offset and length are dword aligned */
3301static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3302 u8 *buf)
3303{
3304 int i, ret = 0;
3305
3306 for (i = 0; i < len; i += 4, offset += 4) {
3307 u32 page_off, phy_addr, nvram_cmd;
3308 __be32 data;
3309
3310 memcpy(&data, buf + i, 4);
3311 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3312
3313 page_off = offset % tp->nvram_pagesize;
3314
3315 phy_addr = tg3_nvram_phys_addr(tp, offset);
3316
dbe9b92a
MC
3317 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3318
3319 if (page_off == 0 || i == 0)
3320 nvram_cmd |= NVRAM_CMD_FIRST;
3321 if (page_off == (tp->nvram_pagesize - 4))
3322 nvram_cmd |= NVRAM_CMD_LAST;
3323
3324 if (i == (len - 4))
3325 nvram_cmd |= NVRAM_CMD_LAST;
3326
42278224
MC
3327 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3328 !tg3_flag(tp, FLASH) ||
3329 !tg3_flag(tp, 57765_PLUS))
3330 tw32(NVRAM_ADDR, phy_addr);
3331
dbe9b92a
MC
3332 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
3333 !tg3_flag(tp, 5755_PLUS) &&
3334 (tp->nvram_jedecnum == JEDEC_ST) &&
3335 (nvram_cmd & NVRAM_CMD_FIRST)) {
3336 u32 cmd;
3337
3338 cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3339 ret = tg3_nvram_exec_cmd(tp, cmd);
3340 if (ret)
3341 break;
3342 }
3343 if (!tg3_flag(tp, FLASH)) {
3344 /* We always do complete word writes to eeprom. */
3345 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3346 }
3347
3348 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3349 if (ret)
3350 break;
3351 }
3352 return ret;
3353}
3354
3355/* offset and length are dword aligned */
3356static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3357{
3358 int ret;
3359
3360 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3361 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3362 ~GRC_LCLCTRL_GPIO_OUTPUT1);
3363 udelay(40);
3364 }
3365
3366 if (!tg3_flag(tp, NVRAM)) {
3367 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3368 } else {
3369 u32 grc_mode;
3370
3371 ret = tg3_nvram_lock(tp);
3372 if (ret)
3373 return ret;
3374
3375 tg3_enable_nvram_access(tp);
3376 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3377 tw32(NVRAM_WRITE1, 0x406);
3378
3379 grc_mode = tr32(GRC_MODE);
3380 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3381
3382 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3383 ret = tg3_nvram_write_block_buffered(tp, offset, len,
3384 buf);
3385 } else {
3386 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3387 buf);
3388 }
3389
3390 grc_mode = tr32(GRC_MODE);
3391 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3392
3393 tg3_disable_nvram_access(tp);
3394 tg3_nvram_unlock(tp);
3395 }
3396
3397 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3398 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3399 udelay(40);
3400 }
3401
3402 return ret;
3403}
3404
997b4f13
MC
3405#define RX_CPU_SCRATCH_BASE 0x30000
3406#define RX_CPU_SCRATCH_SIZE 0x04000
3407#define TX_CPU_SCRATCH_BASE 0x34000
3408#define TX_CPU_SCRATCH_SIZE 0x04000
3409
3410/* tp->lock is held. */
3411static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
3412{
3413 int i;
3414
3415 BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3416
3417 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3418 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3419
3420 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3421 return 0;
3422 }
3423 if (offset == RX_CPU_BASE) {
3424 for (i = 0; i < 10000; i++) {
3425 tw32(offset + CPU_STATE, 0xffffffff);
3426 tw32(offset + CPU_MODE, CPU_MODE_HALT);
3427 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3428 break;
3429 }
3430
3431 tw32(offset + CPU_STATE, 0xffffffff);
3432 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
3433 udelay(10);
3434 } else {
3435 for (i = 0; i < 10000; i++) {
3436 tw32(offset + CPU_STATE, 0xffffffff);
3437 tw32(offset + CPU_MODE, CPU_MODE_HALT);
3438 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3439 break;
3440 }
3441 }
3442
3443 if (i >= 10000) {
3444 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3445 __func__, offset == RX_CPU_BASE ? "RX" : "TX");
3446 return -ENODEV;
3447 }
3448
3449 /* Clear firmware's nvram arbitration. */
3450 if (tg3_flag(tp, NVRAM))
3451 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3452 return 0;
3453}
3454
3455struct fw_info {
3456 unsigned int fw_base;
3457 unsigned int fw_len;
3458 const __be32 *fw_data;
3459};
3460
3461/* tp->lock is held. */
3462static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3463 u32 cpu_scratch_base, int cpu_scratch_size,
3464 struct fw_info *info)
3465{
3466 int err, lock_err, i;
3467 void (*write_op)(struct tg3 *, u32, u32);
3468
3469 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3470 netdev_err(tp->dev,
3471 "%s: Trying to load TX cpu firmware which is 5705\n",
3472 __func__);
3473 return -EINVAL;
3474 }
3475
3476 if (tg3_flag(tp, 5705_PLUS))
3477 write_op = tg3_write_mem;
3478 else
3479 write_op = tg3_write_indirect_reg32;
3480
3481 /* It is possible that bootcode is still loading at this point.
3482 * Get the nvram lock first before halting the cpu.
3483 */
3484 lock_err = tg3_nvram_lock(tp);
3485 err = tg3_halt_cpu(tp, cpu_base);
3486 if (!lock_err)
3487 tg3_nvram_unlock(tp);
3488 if (err)
3489 goto out;
3490
3491 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3492 write_op(tp, cpu_scratch_base + i, 0);
3493 tw32(cpu_base + CPU_STATE, 0xffffffff);
3494 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
3495 for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
3496 write_op(tp, (cpu_scratch_base +
3497 (info->fw_base & 0xffff) +
3498 (i * sizeof(u32))),
3499 be32_to_cpu(info->fw_data[i]));
3500
3501 err = 0;
3502
3503out:
3504 return err;
3505}
3506
3507/* tp->lock is held. */
3508static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3509{
3510 struct fw_info info;
3511 const __be32 *fw_data;
3512 int err, i;
3513
3514 fw_data = (void *)tp->fw->data;
3515
3516 /* Firmware blob starts with version numbers, followed by
3517 start address and length. We are setting complete length.
3518 length = end_address_of_bss - start_address_of_text.
3519 Remainder is the blob to be loaded contiguously
3520 from start address. */
3521
3522 info.fw_base = be32_to_cpu(fw_data[1]);
3523 info.fw_len = tp->fw->size - 12;
3524 info.fw_data = &fw_data[3];
3525
3526 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3527 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3528 &info);
3529 if (err)
3530 return err;
3531
3532 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3533 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3534 &info);
3535 if (err)
3536 return err;
3537
3538 /* Now startup only the RX cpu. */
3539 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3540 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3541
3542 for (i = 0; i < 5; i++) {
3543 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
3544 break;
3545 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3546 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
3547 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3548 udelay(1000);
3549 }
3550 if (i >= 5) {
3551 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3552 "should be %08x\n", __func__,
3553 tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
3554 return -ENODEV;
3555 }
3556 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3557 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
3558
3559 return 0;
3560}
3561
3562/* tp->lock is held. */
3563static int tg3_load_tso_firmware(struct tg3 *tp)
3564{
3565 struct fw_info info;
3566 const __be32 *fw_data;
3567 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3568 int err, i;
3569
3570 if (tg3_flag(tp, HW_TSO_1) ||
3571 tg3_flag(tp, HW_TSO_2) ||
3572 tg3_flag(tp, HW_TSO_3))
3573 return 0;
3574
3575 fw_data = (void *)tp->fw->data;
3576
3577 /* Firmware blob starts with version numbers, followed by
3578 start address and length. We are setting complete length.
3579 length = end_address_of_bss - start_address_of_text.
3580 Remainder is the blob to be loaded contiguously
3581 from start address. */
3582
3583 info.fw_base = be32_to_cpu(fw_data[1]);
3584 cpu_scratch_size = tp->fw_len;
3585 info.fw_len = tp->fw->size - 12;
3586 info.fw_data = &fw_data[3];
3587
3588 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
3589 cpu_base = RX_CPU_BASE;
3590 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3591 } else {
3592 cpu_base = TX_CPU_BASE;
3593 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3594 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3595 }
3596
3597 err = tg3_load_firmware_cpu(tp, cpu_base,
3598 cpu_scratch_base, cpu_scratch_size,
3599 &info);
3600 if (err)
3601 return err;
3602
3603 /* Now startup the cpu. */
3604 tw32(cpu_base + CPU_STATE, 0xffffffff);
3605 tw32_f(cpu_base + CPU_PC, info.fw_base);
3606
3607 for (i = 0; i < 5; i++) {
3608 if (tr32(cpu_base + CPU_PC) == info.fw_base)
3609 break;
3610 tw32(cpu_base + CPU_STATE, 0xffffffff);
3611 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3612 tw32_f(cpu_base + CPU_PC, info.fw_base);
3613 udelay(1000);
3614 }
3615 if (i >= 5) {
3616 netdev_err(tp->dev,
3617 "%s fails to set CPU PC, is %08x should be %08x\n",
3618 __func__, tr32(cpu_base + CPU_PC), info.fw_base);
3619 return -ENODEV;
3620 }
3621 tw32(cpu_base + CPU_STATE, 0xffffffff);
3622 tw32_f(cpu_base + CPU_MODE, 0x00000000);
3623 return 0;
3624}
3625
3626
3f007891
MC
3627/* tp->lock is held. */
3628static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
3629{
3630 u32 addr_high, addr_low;
3631 int i;
3632
3633 addr_high = ((tp->dev->dev_addr[0] << 8) |
3634 tp->dev->dev_addr[1]);
3635 addr_low = ((tp->dev->dev_addr[2] << 24) |
3636 (tp->dev->dev_addr[3] << 16) |
3637 (tp->dev->dev_addr[4] << 8) |
3638 (tp->dev->dev_addr[5] << 0));
3639 for (i = 0; i < 4; i++) {
3640 if (i == 1 && skip_mac_1)
3641 continue;
3642 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3643 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3644 }
3645
3646 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3647 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
3648 for (i = 0; i < 12; i++) {
3649 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3650 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3651 }
3652 }
3653
3654 addr_high = (tp->dev->dev_addr[0] +
3655 tp->dev->dev_addr[1] +
3656 tp->dev->dev_addr[2] +
3657 tp->dev->dev_addr[3] +
3658 tp->dev->dev_addr[4] +
3659 tp->dev->dev_addr[5]) &
3660 TX_BACKOFF_SEED_MASK;
3661 tw32(MAC_TX_BACKOFF_SEED, addr_high);
3662}
3663
c866b7ea 3664static void tg3_enable_register_access(struct tg3 *tp)
1da177e4 3665{
c866b7ea
RW
3666 /*
3667 * Make sure register accesses (indirect or otherwise) will function
3668 * correctly.
1da177e4
LT
3669 */
3670 pci_write_config_dword(tp->pdev,
c866b7ea
RW
3671 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3672}
1da177e4 3673
c866b7ea
RW
3674static int tg3_power_up(struct tg3 *tp)
3675{
bed9829f 3676 int err;
8c6bda1a 3677
bed9829f 3678 tg3_enable_register_access(tp);
1da177e4 3679
bed9829f
MC
3680 err = pci_set_power_state(tp->pdev, PCI_D0);
3681 if (!err) {
3682 /* Switch out of Vaux if it is a NIC */
3683 tg3_pwrsrc_switch_to_vmain(tp);
3684 } else {
3685 netdev_err(tp->dev, "Transition to D0 failed\n");
3686 }
1da177e4 3687
bed9829f 3688 return err;
c866b7ea 3689}
1da177e4 3690
4b409522
MC
3691static int tg3_setup_phy(struct tg3 *, int);
3692
c866b7ea
RW
3693static int tg3_power_down_prepare(struct tg3 *tp)
3694{
3695 u32 misc_host_ctrl;
3696 bool device_should_wake, do_low_power;
3697
3698 tg3_enable_register_access(tp);
5e7dfd0f
MC
3699
3700 /* Restore the CLKREQ setting. */
0f49bfbd
JL
3701 if (tg3_flag(tp, CLKREQ_BUG))
3702 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
3703 PCI_EXP_LNKCTL_CLKREQ_EN);
5e7dfd0f 3704
1da177e4
LT
3705 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
3706 tw32(TG3PCI_MISC_HOST_CTRL,
3707 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
3708
c866b7ea 3709 device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
63c3a66f 3710 tg3_flag(tp, WOL_ENABLE);
05ac4cb7 3711
63c3a66f 3712 if (tg3_flag(tp, USE_PHYLIB)) {
0a459aac 3713 do_low_power = false;
f07e9af3 3714 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
80096068 3715 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
b02fd9e3 3716 struct phy_device *phydev;
0a459aac 3717 u32 phyid, advertising;
b02fd9e3 3718
3f0e3ad7 3719 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
b02fd9e3 3720
80096068 3721 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
b02fd9e3 3722
c6700ce2
MC
3723 tp->link_config.speed = phydev->speed;
3724 tp->link_config.duplex = phydev->duplex;
3725 tp->link_config.autoneg = phydev->autoneg;
3726 tp->link_config.advertising = phydev->advertising;
b02fd9e3
MC
3727
3728 advertising = ADVERTISED_TP |
3729 ADVERTISED_Pause |
3730 ADVERTISED_Autoneg |
3731 ADVERTISED_10baseT_Half;
3732
63c3a66f
JP
3733 if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
3734 if (tg3_flag(tp, WOL_SPEED_100MB))
b02fd9e3
MC
3735 advertising |=
3736 ADVERTISED_100baseT_Half |
3737 ADVERTISED_100baseT_Full |
3738 ADVERTISED_10baseT_Full;
3739 else
3740 advertising |= ADVERTISED_10baseT_Full;
3741 }
3742
3743 phydev->advertising = advertising;
3744
3745 phy_start_aneg(phydev);
0a459aac
MC
3746
3747 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
6a443a0f
MC
3748 if (phyid != PHY_ID_BCMAC131) {
3749 phyid &= PHY_BCM_OUI_MASK;
3750 if (phyid == PHY_BCM_OUI_1 ||
3751 phyid == PHY_BCM_OUI_2 ||
3752 phyid == PHY_BCM_OUI_3)
0a459aac
MC
3753 do_low_power = true;
3754 }
b02fd9e3 3755 }
dd477003 3756 } else {
2023276e 3757 do_low_power = true;
0a459aac 3758
c6700ce2 3759 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
80096068 3760 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
1da177e4 3761
2855b9fe 3762 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
dd477003 3763 tg3_setup_phy(tp, 0);
1da177e4
LT
3764 }
3765
b5d3772c
MC
3766 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3767 u32 val;
3768
3769 val = tr32(GRC_VCPU_EXT_CTRL);
3770 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
63c3a66f 3771 } else if (!tg3_flag(tp, ENABLE_ASF)) {
6921d201
MC
3772 int i;
3773 u32 val;
3774
3775 for (i = 0; i < 200; i++) {
3776 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
3777 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
3778 break;
3779 msleep(1);
3780 }
3781 }
63c3a66f 3782 if (tg3_flag(tp, WOL_CAP))
a85feb8c
GZ
3783 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
3784 WOL_DRV_STATE_SHUTDOWN |
3785 WOL_DRV_WOL |
3786 WOL_SET_MAGIC_PKT);
6921d201 3787
05ac4cb7 3788 if (device_should_wake) {
1da177e4
LT
3789 u32 mac_mode;
3790
f07e9af3 3791 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
b4bd2929
MC
3792 if (do_low_power &&
3793 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
3794 tg3_phy_auxctl_write(tp,
3795 MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
3796 MII_TG3_AUXCTL_PCTL_WOL_EN |
3797 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3798 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
dd477003
MC
3799 udelay(40);
3800 }
1da177e4 3801
f07e9af3 3802 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3f7045c1
MC
3803 mac_mode = MAC_MODE_PORT_MODE_GMII;
3804 else
3805 mac_mode = MAC_MODE_PORT_MODE_MII;
1da177e4 3806
e8f3f6ca
MC
3807 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
3808 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3809 ASIC_REV_5700) {
63c3a66f 3810 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
e8f3f6ca
MC
3811 SPEED_100 : SPEED_10;
3812 if (tg3_5700_link_polarity(tp, speed))
3813 mac_mode |= MAC_MODE_LINK_POLARITY;
3814 else
3815 mac_mode &= ~MAC_MODE_LINK_POLARITY;
3816 }
1da177e4
LT
3817 } else {
3818 mac_mode = MAC_MODE_PORT_MODE_TBI;
3819 }
3820
63c3a66f 3821 if (!tg3_flag(tp, 5750_PLUS))
1da177e4
LT
3822 tw32(MAC_LED_CTRL, tp->led_ctrl);
3823
05ac4cb7 3824 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
63c3a66f
JP
3825 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
3826 (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
05ac4cb7 3827 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
1da177e4 3828
63c3a66f 3829 if (tg3_flag(tp, ENABLE_APE))
d2394e6b
MC
3830 mac_mode |= MAC_MODE_APE_TX_EN |
3831 MAC_MODE_APE_RX_EN |
3832 MAC_MODE_TDE_ENABLE;
3bda1258 3833
1da177e4
LT
3834 tw32_f(MAC_MODE, mac_mode);
3835 udelay(100);
3836
3837 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
3838 udelay(10);
3839 }
3840
63c3a66f 3841 if (!tg3_flag(tp, WOL_SPEED_100MB) &&
1da177e4
LT
3842 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3843 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
3844 u32 base_val;
3845
3846 base_val = tp->pci_clock_ctrl;
3847 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
3848 CLOCK_CTRL_TXCLK_DISABLE);
3849
b401e9e2
MC
3850 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
3851 CLOCK_CTRL_PWRDOWN_PLL133, 40);
63c3a66f
JP
3852 } else if (tg3_flag(tp, 5780_CLASS) ||
3853 tg3_flag(tp, CPMU_PRESENT) ||
6ff6f81d 3854 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
4cf78e4f 3855 /* do nothing */
63c3a66f 3856 } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
1da177e4
LT
3857 u32 newbits1, newbits2;
3858
3859 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3860 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3861 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
3862 CLOCK_CTRL_TXCLK_DISABLE |
3863 CLOCK_CTRL_ALTCLK);
3864 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
63c3a66f 3865 } else if (tg3_flag(tp, 5705_PLUS)) {
1da177e4
LT
3866 newbits1 = CLOCK_CTRL_625_CORE;
3867 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
3868 } else {
3869 newbits1 = CLOCK_CTRL_ALTCLK;
3870 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3871 }
3872
b401e9e2
MC
3873 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
3874 40);
1da177e4 3875
b401e9e2
MC
3876 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
3877 40);
1da177e4 3878
63c3a66f 3879 if (!tg3_flag(tp, 5705_PLUS)) {
1da177e4
LT
3880 u32 newbits3;
3881
3882 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3883 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3884 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
3885 CLOCK_CTRL_TXCLK_DISABLE |
3886 CLOCK_CTRL_44MHZ_CORE);
3887 } else {
3888 newbits3 = CLOCK_CTRL_44MHZ_CORE;
3889 }
3890
b401e9e2
MC
3891 tw32_wait_f(TG3PCI_CLOCK_CTRL,
3892 tp->pci_clock_ctrl | newbits3, 40);
1da177e4
LT
3893 }
3894 }
3895
63c3a66f 3896 if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
0a459aac 3897 tg3_power_down_phy(tp, do_low_power);
6921d201 3898
cd0d7228 3899 tg3_frob_aux_power(tp, true);
1da177e4
LT
3900
3901 /* Workaround for unstable PLL clock */
3902 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
3903 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
3904 u32 val = tr32(0x7d00);
3905
3906 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
3907 tw32(0x7d00, val);
63c3a66f 3908 if (!tg3_flag(tp, ENABLE_ASF)) {
ec41c7df
MC
3909 int err;
3910
3911 err = tg3_nvram_lock(tp);
1da177e4 3912 tg3_halt_cpu(tp, RX_CPU_BASE);
ec41c7df
MC
3913 if (!err)
3914 tg3_nvram_unlock(tp);
6921d201 3915 }
1da177e4
LT
3916 }
3917
bbadf503
MC
3918 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
3919
c866b7ea
RW
3920 return 0;
3921}
12dac075 3922
c866b7ea
RW
3923static void tg3_power_down(struct tg3 *tp)
3924{
3925 tg3_power_down_prepare(tp);
1da177e4 3926
63c3a66f 3927 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
c866b7ea 3928 pci_set_power_state(tp->pdev, PCI_D3hot);
1da177e4
LT
3929}
3930
1da177e4
LT
3931static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
3932{
3933 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
3934 case MII_TG3_AUX_STAT_10HALF:
3935 *speed = SPEED_10;
3936 *duplex = DUPLEX_HALF;
3937 break;
3938
3939 case MII_TG3_AUX_STAT_10FULL:
3940 *speed = SPEED_10;
3941 *duplex = DUPLEX_FULL;
3942 break;
3943
3944 case MII_TG3_AUX_STAT_100HALF:
3945 *speed = SPEED_100;
3946 *duplex = DUPLEX_HALF;
3947 break;
3948
3949 case MII_TG3_AUX_STAT_100FULL:
3950 *speed = SPEED_100;
3951 *duplex = DUPLEX_FULL;
3952 break;
3953
3954 case MII_TG3_AUX_STAT_1000HALF:
3955 *speed = SPEED_1000;
3956 *duplex = DUPLEX_HALF;
3957 break;
3958
3959 case MII_TG3_AUX_STAT_1000FULL:
3960 *speed = SPEED_1000;
3961 *duplex = DUPLEX_FULL;
3962 break;
3963
3964 default:
f07e9af3 3965 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
715116a1
MC
3966 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
3967 SPEED_10;
3968 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
3969 DUPLEX_HALF;
3970 break;
3971 }
e740522e
MC
3972 *speed = SPEED_UNKNOWN;
3973 *duplex = DUPLEX_UNKNOWN;
1da177e4 3974 break;
855e1111 3975 }
1da177e4
LT
3976}
3977
42b64a45 3978static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
1da177e4 3979{
42b64a45
MC
3980 int err = 0;
3981 u32 val, new_adv;
1da177e4 3982
42b64a45 3983 new_adv = ADVERTISE_CSMA;
202ff1c2 3984 new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
f88788f0 3985 new_adv |= mii_advertise_flowctrl(flowctrl);
1da177e4 3986
42b64a45
MC
3987 err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
3988 if (err)
3989 goto done;
ba4d07a8 3990
4f272096
MC
3991 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3992 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
ba4d07a8 3993
4f272096
MC
3994 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3995 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
3996 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
ba4d07a8 3997
4f272096
MC
3998 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
3999 if (err)
4000 goto done;
4001 }
1da177e4 4002
42b64a45
MC
4003 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4004 goto done;
52b02d04 4005
42b64a45
MC
4006 tw32(TG3_CPMU_EEE_MODE,
4007 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
52b02d04 4008
42b64a45
MC
4009 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
4010 if (!err) {
4011 u32 err2;
52b02d04 4012
b715ce94
MC
4013 val = 0;
4014 /* Advertise 100-BaseTX EEE ability */
4015 if (advertise & ADVERTISED_100baseT_Full)
4016 val |= MDIO_AN_EEE_ADV_100TX;
4017 /* Advertise 1000-BaseT EEE ability */
4018 if (advertise & ADVERTISED_1000baseT_Full)
4019 val |= MDIO_AN_EEE_ADV_1000T;
4020 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4021 if (err)
4022 val = 0;
4023
21a00ab2
MC
4024 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
4025 case ASIC_REV_5717:
4026 case ASIC_REV_57765:
55086ad9 4027 case ASIC_REV_57766:
21a00ab2 4028 case ASIC_REV_5719:
b715ce94
MC
4029 /* If we advertised any eee advertisements above... */
4030 if (val)
4031 val = MII_TG3_DSP_TAP26_ALNOKO |
4032 MII_TG3_DSP_TAP26_RMRXSTO |
4033 MII_TG3_DSP_TAP26_OPCSINPT;
21a00ab2 4034 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
be671947
MC
4035 /* Fall through */
4036 case ASIC_REV_5720:
4037 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4038 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4039 MII_TG3_DSP_CH34TP2_HIBW01);
21a00ab2 4040 }
52b02d04 4041
42b64a45
MC
4042 err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
4043 if (!err)
4044 err = err2;
4045 }
4046
4047done:
4048 return err;
4049}
4050
4051static void tg3_phy_copper_begin(struct tg3 *tp)
4052{
d13ba512
MC
4053 if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4054 (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4055 u32 adv, fc;
4056
4057 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
4058 adv = ADVERTISED_10baseT_Half |
4059 ADVERTISED_10baseT_Full;
4060 if (tg3_flag(tp, WOL_SPEED_100MB))
4061 adv |= ADVERTISED_100baseT_Half |
4062 ADVERTISED_100baseT_Full;
4063
4064 fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
42b64a45 4065 } else {
d13ba512
MC
4066 adv = tp->link_config.advertising;
4067 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4068 adv &= ~(ADVERTISED_1000baseT_Half |
4069 ADVERTISED_1000baseT_Full);
4070
4071 fc = tp->link_config.flowctrl;
52b02d04 4072 }
52b02d04 4073
d13ba512 4074 tg3_phy_autoneg_cfg(tp, adv, fc);
52b02d04 4075
d13ba512
MC
4076 tg3_writephy(tp, MII_BMCR,
4077 BMCR_ANENABLE | BMCR_ANRESTART);
4078 } else {
4079 int i;
1da177e4
LT
4080 u32 bmcr, orig_bmcr;
4081
4082 tp->link_config.active_speed = tp->link_config.speed;
4083 tp->link_config.active_duplex = tp->link_config.duplex;
4084
4085 bmcr = 0;
4086 switch (tp->link_config.speed) {
4087 default:
4088 case SPEED_10:
4089 break;
4090
4091 case SPEED_100:
4092 bmcr |= BMCR_SPEED100;
4093 break;
4094
4095 case SPEED_1000:
221c5637 4096 bmcr |= BMCR_SPEED1000;
1da177e4 4097 break;
855e1111 4098 }
1da177e4
LT
4099
4100 if (tp->link_config.duplex == DUPLEX_FULL)
4101 bmcr |= BMCR_FULLDPLX;
4102
4103 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4104 (bmcr != orig_bmcr)) {
4105 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4106 for (i = 0; i < 1500; i++) {
4107 u32 tmp;
4108
4109 udelay(10);
4110 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4111 tg3_readphy(tp, MII_BMSR, &tmp))
4112 continue;
4113 if (!(tmp & BMSR_LSTATUS)) {
4114 udelay(40);
4115 break;
4116 }
4117 }
4118 tg3_writephy(tp, MII_BMCR, bmcr);
4119 udelay(40);
4120 }
1da177e4
LT
4121 }
4122}
4123
4124static int tg3_init_5401phy_dsp(struct tg3 *tp)
4125{
4126 int err;
4127
4128 /* Turn off tap power management. */
4129 /* Set Extended packet length bit */
b4bd2929 4130 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
1da177e4 4131
6ee7c0a0
MC
4132 err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4133 err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4134 err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4135 err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4136 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
1da177e4
LT
4137
4138 udelay(40);
4139
4140 return err;
4141}
4142
e2bf73e7 4143static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
1da177e4 4144{
e2bf73e7 4145 u32 advmsk, tgtadv, advertising;
3600d918 4146
e2bf73e7
MC
4147 advertising = tp->link_config.advertising;
4148 tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
1da177e4 4149
e2bf73e7
MC
4150 advmsk = ADVERTISE_ALL;
4151 if (tp->link_config.active_duplex == DUPLEX_FULL) {
f88788f0 4152 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
e2bf73e7
MC
4153 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4154 }
1da177e4 4155
e2bf73e7
MC
4156 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4157 return false;
4158
4159 if ((*lcladv & advmsk) != tgtadv)
4160 return false;
b99d2a57 4161
f07e9af3 4162 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1da177e4
LT
4163 u32 tg3_ctrl;
4164
e2bf73e7 4165 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
3600d918 4166
221c5637 4167 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
e2bf73e7 4168 return false;
1da177e4 4169
3198e07f
MC
4170 if (tgtadv &&
4171 (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4172 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)) {
4173 tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4174 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4175 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4176 } else {
4177 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4178 }
4179
e2bf73e7
MC
4180 if (tg3_ctrl != tgtadv)
4181 return false;
ef167e27
MC
4182 }
4183
e2bf73e7 4184 return true;
ef167e27
MC
4185}
4186
859edb26
MC
4187static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4188{
4189 u32 lpeth = 0;
4190
4191 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4192 u32 val;
4193
4194 if (tg3_readphy(tp, MII_STAT1000, &val))
4195 return false;
4196
4197 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4198 }
4199
4200 if (tg3_readphy(tp, MII_LPA, rmtadv))
4201 return false;
4202
4203 lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4204 tp->link_config.rmt_adv = lpeth;
4205
4206 return true;
4207}
4208
f4a46d1f
NNS
4209static bool tg3_test_and_report_link_chg(struct tg3 *tp, int curr_link_up)
4210{
4211 if (curr_link_up != tp->link_up) {
4212 if (curr_link_up) {
4213 tg3_carrier_on(tp);
4214 } else {
4215 tg3_carrier_off(tp);
4216 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4217 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4218 }
4219
4220 tg3_link_report(tp);
4221 return true;
4222 }
4223
4224 return false;
4225}
4226
1da177e4
LT
4227static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
4228{
4229 int current_link_up;
f833c4c1 4230 u32 bmsr, val;
ef167e27 4231 u32 lcl_adv, rmt_adv;
1da177e4
LT
4232 u16 current_speed;
4233 u8 current_duplex;
4234 int i, err;
4235
4236 tw32(MAC_EVENT, 0);
4237
4238 tw32_f(MAC_STATUS,
4239 (MAC_STATUS_SYNC_CHANGED |
4240 MAC_STATUS_CFG_CHANGED |
4241 MAC_STATUS_MI_COMPLETION |
4242 MAC_STATUS_LNKSTATE_CHANGED));
4243 udelay(40);
4244
8ef21428
MC
4245 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4246 tw32_f(MAC_MI_MODE,
4247 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4248 udelay(80);
4249 }
1da177e4 4250
b4bd2929 4251 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
1da177e4
LT
4252
4253 /* Some third-party PHYs need to be reset on link going
4254 * down.
4255 */
4256 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
4257 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
4258 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
f4a46d1f 4259 tp->link_up) {
1da177e4
LT
4260 tg3_readphy(tp, MII_BMSR, &bmsr);
4261 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4262 !(bmsr & BMSR_LSTATUS))
4263 force_reset = 1;
4264 }
4265 if (force_reset)
4266 tg3_phy_reset(tp);
4267
79eb6904 4268 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
1da177e4
LT
4269 tg3_readphy(tp, MII_BMSR, &bmsr);
4270 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
63c3a66f 4271 !tg3_flag(tp, INIT_COMPLETE))
1da177e4
LT
4272 bmsr = 0;
4273
4274 if (!(bmsr & BMSR_LSTATUS)) {
4275 err = tg3_init_5401phy_dsp(tp);
4276 if (err)
4277 return err;
4278
4279 tg3_readphy(tp, MII_BMSR, &bmsr);
4280 for (i = 0; i < 1000; i++) {
4281 udelay(10);
4282 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4283 (bmsr & BMSR_LSTATUS)) {
4284 udelay(40);
4285 break;
4286 }
4287 }
4288
79eb6904
MC
4289 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4290 TG3_PHY_REV_BCM5401_B0 &&
1da177e4
LT
4291 !(bmsr & BMSR_LSTATUS) &&
4292 tp->link_config.active_speed == SPEED_1000) {
4293 err = tg3_phy_reset(tp);
4294 if (!err)
4295 err = tg3_init_5401phy_dsp(tp);
4296 if (err)
4297 return err;
4298 }
4299 }
4300 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4301 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
4302 /* 5701 {A0,B0} CRC bug workaround */
4303 tg3_writephy(tp, 0x15, 0x0a75);
f08aa1a8
MC
4304 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4305 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4306 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
1da177e4
LT
4307 }
4308
4309 /* Clear pending interrupts... */
f833c4c1
MC
4310 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4311 tg3_readphy(tp, MII_TG3_ISTAT, &val);
1da177e4 4312
f07e9af3 4313 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
1da177e4 4314 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
f07e9af3 4315 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
1da177e4
LT
4316 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4317
4318 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
4319 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
4320 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4321 tg3_writephy(tp, MII_TG3_EXT_CTRL,
4322 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4323 else
4324 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4325 }
4326
4327 current_link_up = 0;
e740522e
MC
4328 current_speed = SPEED_UNKNOWN;
4329 current_duplex = DUPLEX_UNKNOWN;
e348c5e7 4330 tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
859edb26 4331 tp->link_config.rmt_adv = 0;
1da177e4 4332
f07e9af3 4333 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
15ee95c3
MC
4334 err = tg3_phy_auxctl_read(tp,
4335 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4336 &val);
4337 if (!err && !(val & (1 << 10))) {
b4bd2929
MC
4338 tg3_phy_auxctl_write(tp,
4339 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4340 val | (1 << 10));
1da177e4
LT
4341 goto relink;
4342 }
4343 }
4344
4345 bmsr = 0;
4346 for (i = 0; i < 100; i++) {
4347 tg3_readphy(tp, MII_BMSR, &bmsr);
4348 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4349 (bmsr & BMSR_LSTATUS))
4350 break;
4351 udelay(40);
4352 }
4353
4354 if (bmsr & BMSR_LSTATUS) {
4355 u32 aux_stat, bmcr;
4356
4357 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4358 for (i = 0; i < 2000; i++) {
4359 udelay(10);
4360 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4361 aux_stat)
4362 break;
4363 }
4364
4365 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4366 &current_speed,
4367 &current_duplex);
4368
4369 bmcr = 0;
4370 for (i = 0; i < 200; i++) {
4371 tg3_readphy(tp, MII_BMCR, &bmcr);
4372 if (tg3_readphy(tp, MII_BMCR, &bmcr))
4373 continue;
4374 if (bmcr && bmcr != 0x7fff)
4375 break;
4376 udelay(10);
4377 }
4378
ef167e27
MC
4379 lcl_adv = 0;
4380 rmt_adv = 0;
1da177e4 4381
ef167e27
MC
4382 tp->link_config.active_speed = current_speed;
4383 tp->link_config.active_duplex = current_duplex;
4384
4385 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4386 if ((bmcr & BMCR_ANENABLE) &&
e2bf73e7 4387 tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
859edb26 4388 tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
e2bf73e7 4389 current_link_up = 1;
1da177e4
LT
4390 } else {
4391 if (!(bmcr & BMCR_ANENABLE) &&
4392 tp->link_config.speed == current_speed &&
ef167e27
MC
4393 tp->link_config.duplex == current_duplex &&
4394 tp->link_config.flowctrl ==
4395 tp->link_config.active_flowctrl) {
1da177e4 4396 current_link_up = 1;
1da177e4
LT
4397 }
4398 }
4399
ef167e27 4400 if (current_link_up == 1 &&
e348c5e7
MC
4401 tp->link_config.active_duplex == DUPLEX_FULL) {
4402 u32 reg, bit;
4403
4404 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4405 reg = MII_TG3_FET_GEN_STAT;
4406 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4407 } else {
4408 reg = MII_TG3_EXT_STAT;
4409 bit = MII_TG3_EXT_STAT_MDIX;
4410 }
4411
4412 if (!tg3_readphy(tp, reg, &val) && (val & bit))
4413 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4414
ef167e27 4415 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
e348c5e7 4416 }
1da177e4
LT
4417 }
4418
1da177e4 4419relink:
80096068 4420 if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
1da177e4
LT
4421 tg3_phy_copper_begin(tp);
4422
f833c4c1 4423 tg3_readphy(tp, MII_BMSR, &bmsr);
06c03c02
MB
4424 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4425 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
1da177e4
LT
4426 current_link_up = 1;
4427 }
4428
4429 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4430 if (current_link_up == 1) {
4431 if (tp->link_config.active_speed == SPEED_100 ||
4432 tp->link_config.active_speed == SPEED_10)
4433 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4434 else
4435 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
f07e9af3 4436 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
7f97a4bd
MC
4437 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4438 else
1da177e4
LT
4439 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4440
4441 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4442 if (tp->link_config.active_duplex == DUPLEX_HALF)
4443 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4444
1da177e4 4445 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
e8f3f6ca
MC
4446 if (current_link_up == 1 &&
4447 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
1da177e4 4448 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
e8f3f6ca
MC
4449 else
4450 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1da177e4
LT
4451 }
4452
4453 /* ??? Without this setting Netgear GA302T PHY does not
4454 * ??? send/receive packets...
4455 */
79eb6904 4456 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
1da177e4
LT
4457 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
4458 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
4459 tw32_f(MAC_MI_MODE, tp->mi_mode);
4460 udelay(80);
4461 }
4462
4463 tw32_f(MAC_MODE, tp->mac_mode);
4464 udelay(40);
4465
52b02d04
MC
4466 tg3_phy_eee_adjust(tp, current_link_up);
4467
63c3a66f 4468 if (tg3_flag(tp, USE_LINKCHG_REG)) {
1da177e4
LT
4469 /* Polled via timer. */
4470 tw32_f(MAC_EVENT, 0);
4471 } else {
4472 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4473 }
4474 udelay(40);
4475
4476 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
4477 current_link_up == 1 &&
4478 tp->link_config.active_speed == SPEED_1000 &&
63c3a66f 4479 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
1da177e4
LT
4480 udelay(120);
4481 tw32_f(MAC_STATUS,
4482 (MAC_STATUS_SYNC_CHANGED |
4483 MAC_STATUS_CFG_CHANGED));
4484 udelay(40);
4485 tg3_write_mem(tp,
4486 NIC_SRAM_FIRMWARE_MBOX,
4487 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
4488 }
4489
5e7dfd0f 4490 /* Prevent send BD corruption. */
63c3a66f 4491 if (tg3_flag(tp, CLKREQ_BUG)) {
5e7dfd0f
MC
4492 if (tp->link_config.active_speed == SPEED_100 ||
4493 tp->link_config.active_speed == SPEED_10)
0f49bfbd
JL
4494 pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
4495 PCI_EXP_LNKCTL_CLKREQ_EN);
5e7dfd0f 4496 else
0f49bfbd
JL
4497 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4498 PCI_EXP_LNKCTL_CLKREQ_EN);
5e7dfd0f
MC
4499 }
4500
f4a46d1f 4501 tg3_test_and_report_link_chg(tp, current_link_up);
1da177e4
LT
4502
4503 return 0;
4504}
4505
4506struct tg3_fiber_aneginfo {
4507 int state;
4508#define ANEG_STATE_UNKNOWN 0
4509#define ANEG_STATE_AN_ENABLE 1
4510#define ANEG_STATE_RESTART_INIT 2
4511#define ANEG_STATE_RESTART 3
4512#define ANEG_STATE_DISABLE_LINK_OK 4
4513#define ANEG_STATE_ABILITY_DETECT_INIT 5
4514#define ANEG_STATE_ABILITY_DETECT 6
4515#define ANEG_STATE_ACK_DETECT_INIT 7
4516#define ANEG_STATE_ACK_DETECT 8
4517#define ANEG_STATE_COMPLETE_ACK_INIT 9
4518#define ANEG_STATE_COMPLETE_ACK 10
4519#define ANEG_STATE_IDLE_DETECT_INIT 11
4520#define ANEG_STATE_IDLE_DETECT 12
4521#define ANEG_STATE_LINK_OK 13
4522#define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
4523#define ANEG_STATE_NEXT_PAGE_WAIT 15
4524
4525 u32 flags;
4526#define MR_AN_ENABLE 0x00000001
4527#define MR_RESTART_AN 0x00000002
4528#define MR_AN_COMPLETE 0x00000004
4529#define MR_PAGE_RX 0x00000008
4530#define MR_NP_LOADED 0x00000010
4531#define MR_TOGGLE_TX 0x00000020
4532#define MR_LP_ADV_FULL_DUPLEX 0x00000040
4533#define MR_LP_ADV_HALF_DUPLEX 0x00000080
4534#define MR_LP_ADV_SYM_PAUSE 0x00000100
4535#define MR_LP_ADV_ASYM_PAUSE 0x00000200
4536#define MR_LP_ADV_REMOTE_FAULT1 0x00000400
4537#define MR_LP_ADV_REMOTE_FAULT2 0x00000800
4538#define MR_LP_ADV_NEXT_PAGE 0x00001000
4539#define MR_TOGGLE_RX 0x00002000
4540#define MR_NP_RX 0x00004000
4541
4542#define MR_LINK_OK 0x80000000
4543
4544 unsigned long link_time, cur_time;
4545
4546 u32 ability_match_cfg;
4547 int ability_match_count;
4548
4549 char ability_match, idle_match, ack_match;
4550
4551 u32 txconfig, rxconfig;
4552#define ANEG_CFG_NP 0x00000080
4553#define ANEG_CFG_ACK 0x00000040
4554#define ANEG_CFG_RF2 0x00000020
4555#define ANEG_CFG_RF1 0x00000010
4556#define ANEG_CFG_PS2 0x00000001
4557#define ANEG_CFG_PS1 0x00008000
4558#define ANEG_CFG_HD 0x00004000
4559#define ANEG_CFG_FD 0x00002000
4560#define ANEG_CFG_INVAL 0x00001f06
4561
4562};
4563#define ANEG_OK 0
4564#define ANEG_DONE 1
4565#define ANEG_TIMER_ENAB 2
4566#define ANEG_FAILED -1
4567
4568#define ANEG_STATE_SETTLE_TIME 10000
4569
4570static int tg3_fiber_aneg_smachine(struct tg3 *tp,
4571 struct tg3_fiber_aneginfo *ap)
4572{
5be73b47 4573 u16 flowctrl;
1da177e4
LT
4574 unsigned long delta;
4575 u32 rx_cfg_reg;
4576 int ret;
4577
4578 if (ap->state == ANEG_STATE_UNKNOWN) {
4579 ap->rxconfig = 0;
4580 ap->link_time = 0;
4581 ap->cur_time = 0;
4582 ap->ability_match_cfg = 0;
4583 ap->ability_match_count = 0;
4584 ap->ability_match = 0;
4585 ap->idle_match = 0;
4586 ap->ack_match = 0;
4587 }
4588 ap->cur_time++;
4589
4590 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
4591 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
4592
4593 if (rx_cfg_reg != ap->ability_match_cfg) {
4594 ap->ability_match_cfg = rx_cfg_reg;
4595 ap->ability_match = 0;
4596 ap->ability_match_count = 0;
4597 } else {
4598 if (++ap->ability_match_count > 1) {
4599 ap->ability_match = 1;
4600 ap->ability_match_cfg = rx_cfg_reg;
4601 }
4602 }
4603 if (rx_cfg_reg & ANEG_CFG_ACK)
4604 ap->ack_match = 1;
4605 else
4606 ap->ack_match = 0;
4607
4608 ap->idle_match = 0;
4609 } else {
4610 ap->idle_match = 1;
4611 ap->ability_match_cfg = 0;
4612 ap->ability_match_count = 0;
4613 ap->ability_match = 0;
4614 ap->ack_match = 0;
4615
4616 rx_cfg_reg = 0;
4617 }
4618
4619 ap->rxconfig = rx_cfg_reg;
4620 ret = ANEG_OK;
4621
33f401ae 4622 switch (ap->state) {
1da177e4
LT
4623 case ANEG_STATE_UNKNOWN:
4624 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
4625 ap->state = ANEG_STATE_AN_ENABLE;
4626
4627 /* fallthru */
4628 case ANEG_STATE_AN_ENABLE:
4629 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
4630 if (ap->flags & MR_AN_ENABLE) {
4631 ap->link_time = 0;
4632 ap->cur_time = 0;
4633 ap->ability_match_cfg = 0;
4634 ap->ability_match_count = 0;
4635 ap->ability_match = 0;
4636 ap->idle_match = 0;
4637 ap->ack_match = 0;
4638
4639 ap->state = ANEG_STATE_RESTART_INIT;
4640 } else {
4641 ap->state = ANEG_STATE_DISABLE_LINK_OK;
4642 }
4643 break;
4644
4645 case ANEG_STATE_RESTART_INIT:
4646 ap->link_time = ap->cur_time;
4647 ap->flags &= ~(MR_NP_LOADED);
4648 ap->txconfig = 0;
4649 tw32(MAC_TX_AUTO_NEG, 0);
4650 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4651 tw32_f(MAC_MODE, tp->mac_mode);
4652 udelay(40);
4653
4654 ret = ANEG_TIMER_ENAB;
4655 ap->state = ANEG_STATE_RESTART;
4656
4657 /* fallthru */
4658 case ANEG_STATE_RESTART:
4659 delta = ap->cur_time - ap->link_time;
859a5887 4660 if (delta > ANEG_STATE_SETTLE_TIME)
1da177e4 4661 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
859a5887 4662 else
1da177e4 4663 ret = ANEG_TIMER_ENAB;
1da177e4
LT
4664 break;
4665
4666 case ANEG_STATE_DISABLE_LINK_OK:
4667 ret = ANEG_DONE;
4668 break;
4669
4670 case ANEG_STATE_ABILITY_DETECT_INIT:
4671 ap->flags &= ~(MR_TOGGLE_TX);
5be73b47
MC
4672 ap->txconfig = ANEG_CFG_FD;
4673 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4674 if (flowctrl & ADVERTISE_1000XPAUSE)
4675 ap->txconfig |= ANEG_CFG_PS1;
4676 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4677 ap->txconfig |= ANEG_CFG_PS2;
1da177e4
LT
4678 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4679 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4680 tw32_f(MAC_MODE, tp->mac_mode);
4681 udelay(40);
4682
4683 ap->state = ANEG_STATE_ABILITY_DETECT;
4684 break;
4685
4686 case ANEG_STATE_ABILITY_DETECT:
859a5887 4687 if (ap->ability_match != 0 && ap->rxconfig != 0)
1da177e4 4688 ap->state = ANEG_STATE_ACK_DETECT_INIT;
1da177e4
LT
4689 break;
4690
4691 case ANEG_STATE_ACK_DETECT_INIT:
4692 ap->txconfig |= ANEG_CFG_ACK;
4693 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4694 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4695 tw32_f(MAC_MODE, tp->mac_mode);
4696 udelay(40);
4697
4698 ap->state = ANEG_STATE_ACK_DETECT;
4699
4700 /* fallthru */
4701 case ANEG_STATE_ACK_DETECT:
4702 if (ap->ack_match != 0) {
4703 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
4704 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
4705 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
4706 } else {
4707 ap->state = ANEG_STATE_AN_ENABLE;
4708 }
4709 } else if (ap->ability_match != 0 &&
4710 ap->rxconfig == 0) {
4711 ap->state = ANEG_STATE_AN_ENABLE;
4712 }
4713 break;
4714
4715 case ANEG_STATE_COMPLETE_ACK_INIT:
4716 if (ap->rxconfig & ANEG_CFG_INVAL) {
4717 ret = ANEG_FAILED;
4718 break;
4719 }
4720 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
4721 MR_LP_ADV_HALF_DUPLEX |
4722 MR_LP_ADV_SYM_PAUSE |
4723 MR_LP_ADV_ASYM_PAUSE |
4724 MR_LP_ADV_REMOTE_FAULT1 |
4725 MR_LP_ADV_REMOTE_FAULT2 |
4726 MR_LP_ADV_NEXT_PAGE |
4727 MR_TOGGLE_RX |
4728 MR_NP_RX);
4729 if (ap->rxconfig & ANEG_CFG_FD)
4730 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
4731 if (ap->rxconfig & ANEG_CFG_HD)
4732 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
4733 if (ap->rxconfig & ANEG_CFG_PS1)
4734 ap->flags |= MR_LP_ADV_SYM_PAUSE;
4735 if (ap->rxconfig & ANEG_CFG_PS2)
4736 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
4737 if (ap->rxconfig & ANEG_CFG_RF1)
4738 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
4739 if (ap->rxconfig & ANEG_CFG_RF2)
4740 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
4741 if (ap->rxconfig & ANEG_CFG_NP)
4742 ap->flags |= MR_LP_ADV_NEXT_PAGE;
4743
4744 ap->link_time = ap->cur_time;
4745
4746 ap->flags ^= (MR_TOGGLE_TX);
4747 if (ap->rxconfig & 0x0008)
4748 ap->flags |= MR_TOGGLE_RX;
4749 if (ap->rxconfig & ANEG_CFG_NP)
4750 ap->flags |= MR_NP_RX;
4751 ap->flags |= MR_PAGE_RX;
4752
4753 ap->state = ANEG_STATE_COMPLETE_ACK;
4754 ret = ANEG_TIMER_ENAB;
4755 break;
4756
4757 case ANEG_STATE_COMPLETE_ACK:
4758 if (ap->ability_match != 0 &&
4759 ap->rxconfig == 0) {
4760 ap->state = ANEG_STATE_AN_ENABLE;
4761 break;
4762 }
4763 delta = ap->cur_time - ap->link_time;
4764 if (delta > ANEG_STATE_SETTLE_TIME) {
4765 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
4766 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4767 } else {
4768 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
4769 !(ap->flags & MR_NP_RX)) {
4770 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4771 } else {
4772 ret = ANEG_FAILED;
4773 }
4774 }
4775 }
4776 break;
4777
4778 case ANEG_STATE_IDLE_DETECT_INIT:
4779 ap->link_time = ap->cur_time;
4780 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4781 tw32_f(MAC_MODE, tp->mac_mode);
4782 udelay(40);
4783
4784 ap->state = ANEG_STATE_IDLE_DETECT;
4785 ret = ANEG_TIMER_ENAB;
4786 break;
4787
4788 case ANEG_STATE_IDLE_DETECT:
4789 if (ap->ability_match != 0 &&
4790 ap->rxconfig == 0) {
4791 ap->state = ANEG_STATE_AN_ENABLE;
4792 break;
4793 }
4794 delta = ap->cur_time - ap->link_time;
4795 if (delta > ANEG_STATE_SETTLE_TIME) {
4796 /* XXX another gem from the Broadcom driver :( */
4797 ap->state = ANEG_STATE_LINK_OK;
4798 }
4799 break;
4800
4801 case ANEG_STATE_LINK_OK:
4802 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
4803 ret = ANEG_DONE;
4804 break;
4805
4806 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
4807 /* ??? unimplemented */
4808 break;
4809
4810 case ANEG_STATE_NEXT_PAGE_WAIT:
4811 /* ??? unimplemented */
4812 break;
4813
4814 default:
4815 ret = ANEG_FAILED;
4816 break;
855e1111 4817 }
1da177e4
LT
4818
4819 return ret;
4820}
4821
5be73b47 4822static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
1da177e4
LT
4823{
4824 int res = 0;
4825 struct tg3_fiber_aneginfo aninfo;
4826 int status = ANEG_FAILED;
4827 unsigned int tick;
4828 u32 tmp;
4829
4830 tw32_f(MAC_TX_AUTO_NEG, 0);
4831
4832 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
4833 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
4834 udelay(40);
4835
4836 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
4837 udelay(40);
4838
4839 memset(&aninfo, 0, sizeof(aninfo));
4840 aninfo.flags |= MR_AN_ENABLE;
4841 aninfo.state = ANEG_STATE_UNKNOWN;
4842 aninfo.cur_time = 0;
4843 tick = 0;
4844 while (++tick < 195000) {
4845 status = tg3_fiber_aneg_smachine(tp, &aninfo);
4846 if (status == ANEG_DONE || status == ANEG_FAILED)
4847 break;
4848
4849 udelay(1);
4850 }
4851
4852 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4853 tw32_f(MAC_MODE, tp->mac_mode);
4854 udelay(40);
4855
5be73b47
MC
4856 *txflags = aninfo.txconfig;
4857 *rxflags = aninfo.flags;
1da177e4
LT
4858
4859 if (status == ANEG_DONE &&
4860 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
4861 MR_LP_ADV_FULL_DUPLEX)))
4862 res = 1;
4863
4864 return res;
4865}
4866
4867static void tg3_init_bcm8002(struct tg3 *tp)
4868{
4869 u32 mac_status = tr32(MAC_STATUS);
4870 int i;
4871
4872 /* Reset when initting first time or we have a link. */
63c3a66f 4873 if (tg3_flag(tp, INIT_COMPLETE) &&
1da177e4
LT
4874 !(mac_status & MAC_STATUS_PCS_SYNCED))
4875 return;
4876
4877 /* Set PLL lock range. */
4878 tg3_writephy(tp, 0x16, 0x8007);
4879
4880 /* SW reset */
4881 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
4882
4883 /* Wait for reset to complete. */
4884 /* XXX schedule_timeout() ... */
4885 for (i = 0; i < 500; i++)
4886 udelay(10);
4887
4888 /* Config mode; select PMA/Ch 1 regs. */
4889 tg3_writephy(tp, 0x10, 0x8411);
4890
4891 /* Enable auto-lock and comdet, select txclk for tx. */
4892 tg3_writephy(tp, 0x11, 0x0a10);
4893
4894 tg3_writephy(tp, 0x18, 0x00a0);
4895 tg3_writephy(tp, 0x16, 0x41ff);
4896
4897 /* Assert and deassert POR. */
4898 tg3_writephy(tp, 0x13, 0x0400);
4899 udelay(40);
4900 tg3_writephy(tp, 0x13, 0x0000);
4901
4902 tg3_writephy(tp, 0x11, 0x0a50);
4903 udelay(40);
4904 tg3_writephy(tp, 0x11, 0x0a10);
4905
4906 /* Wait for signal to stabilize */
4907 /* XXX schedule_timeout() ... */
4908 for (i = 0; i < 15000; i++)
4909 udelay(10);
4910
4911 /* Deselect the channel register so we can read the PHYID
4912 * later.
4913 */
4914 tg3_writephy(tp, 0x10, 0x8011);
4915}
4916
4917static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
4918{
82cd3d11 4919 u16 flowctrl;
1da177e4
LT
4920 u32 sg_dig_ctrl, sg_dig_status;
4921 u32 serdes_cfg, expected_sg_dig_ctrl;
4922 int workaround, port_a;
4923 int current_link_up;
4924
4925 serdes_cfg = 0;
4926 expected_sg_dig_ctrl = 0;
4927 workaround = 0;
4928 port_a = 1;
4929 current_link_up = 0;
4930
4931 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
4932 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
4933 workaround = 1;
4934 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
4935 port_a = 0;
4936
4937 /* preserve bits 0-11,13,14 for signal pre-emphasis */
4938 /* preserve bits 20-23 for voltage regulator */
4939 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
4940 }
4941
4942 sg_dig_ctrl = tr32(SG_DIG_CTRL);
4943
4944 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
c98f6e3b 4945 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
1da177e4
LT
4946 if (workaround) {
4947 u32 val = serdes_cfg;
4948
4949 if (port_a)
4950 val |= 0xc010000;
4951 else
4952 val |= 0x4010000;
4953 tw32_f(MAC_SERDES_CFG, val);
4954 }
c98f6e3b
MC
4955
4956 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
1da177e4
LT
4957 }
4958 if (mac_status & MAC_STATUS_PCS_SYNCED) {
4959 tg3_setup_flow_control(tp, 0, 0);
4960 current_link_up = 1;
4961 }
4962 goto out;
4963 }
4964
4965 /* Want auto-negotiation. */
c98f6e3b 4966 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
1da177e4 4967
82cd3d11
MC
4968 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4969 if (flowctrl & ADVERTISE_1000XPAUSE)
4970 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
4971 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4972 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
1da177e4
LT
4973
4974 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
f07e9af3 4975 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
3d3ebe74
MC
4976 tp->serdes_counter &&
4977 ((mac_status & (MAC_STATUS_PCS_SYNCED |
4978 MAC_STATUS_RCVD_CFG)) ==
4979 MAC_STATUS_PCS_SYNCED)) {
4980 tp->serdes_counter--;
4981 current_link_up = 1;
4982 goto out;
4983 }
4984restart_autoneg:
1da177e4
LT
4985 if (workaround)
4986 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
c98f6e3b 4987 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
1da177e4
LT
4988 udelay(5);
4989 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
4990
3d3ebe74 4991 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
f07e9af3 4992 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
1da177e4
LT
4993 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
4994 MAC_STATUS_SIGNAL_DET)) {
3d3ebe74 4995 sg_dig_status = tr32(SG_DIG_STATUS);
1da177e4
LT
4996 mac_status = tr32(MAC_STATUS);
4997
c98f6e3b 4998 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
1da177e4 4999 (mac_status & MAC_STATUS_PCS_SYNCED)) {
82cd3d11
MC
5000 u32 local_adv = 0, remote_adv = 0;
5001
5002 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5003 local_adv |= ADVERTISE_1000XPAUSE;
5004 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5005 local_adv |= ADVERTISE_1000XPSE_ASYM;
1da177e4 5006
c98f6e3b 5007 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
82cd3d11 5008 remote_adv |= LPA_1000XPAUSE;
c98f6e3b 5009 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
82cd3d11 5010 remote_adv |= LPA_1000XPAUSE_ASYM;
1da177e4 5011
859edb26
MC
5012 tp->link_config.rmt_adv =
5013 mii_adv_to_ethtool_adv_x(remote_adv);
5014
1da177e4
LT
5015 tg3_setup_flow_control(tp, local_adv, remote_adv);
5016 current_link_up = 1;
3d3ebe74 5017 tp->serdes_counter = 0;
f07e9af3 5018 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
c98f6e3b 5019 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
3d3ebe74
MC
5020 if (tp->serdes_counter)
5021 tp->serdes_counter--;
1da177e4
LT
5022 else {
5023 if (workaround) {
5024 u32 val = serdes_cfg;
5025
5026 if (port_a)
5027 val |= 0xc010000;
5028 else
5029 val |= 0x4010000;
5030
5031 tw32_f(MAC_SERDES_CFG, val);
5032 }
5033
c98f6e3b 5034 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
1da177e4
LT
5035 udelay(40);
5036
5037 /* Link parallel detection - link is up */
5038 /* only if we have PCS_SYNC and not */
5039 /* receiving config code words */
5040 mac_status = tr32(MAC_STATUS);
5041 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5042 !(mac_status & MAC_STATUS_RCVD_CFG)) {
5043 tg3_setup_flow_control(tp, 0, 0);
5044 current_link_up = 1;
f07e9af3
MC
5045 tp->phy_flags |=
5046 TG3_PHYFLG_PARALLEL_DETECT;
3d3ebe74
MC
5047 tp->serdes_counter =
5048 SERDES_PARALLEL_DET_TIMEOUT;
5049 } else
5050 goto restart_autoneg;
1da177e4
LT
5051 }
5052 }
3d3ebe74
MC
5053 } else {
5054 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
f07e9af3 5055 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
1da177e4
LT
5056 }
5057
5058out:
5059 return current_link_up;
5060}
5061
5062static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5063{
5064 int current_link_up = 0;
5065
5cf64b8a 5066 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
1da177e4 5067 goto out;
1da177e4
LT
5068
5069 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5be73b47 5070 u32 txflags, rxflags;
1da177e4 5071 int i;
6aa20a22 5072
5be73b47
MC
5073 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5074 u32 local_adv = 0, remote_adv = 0;
1da177e4 5075
5be73b47
MC
5076 if (txflags & ANEG_CFG_PS1)
5077 local_adv |= ADVERTISE_1000XPAUSE;
5078 if (txflags & ANEG_CFG_PS2)
5079 local_adv |= ADVERTISE_1000XPSE_ASYM;
5080
5081 if (rxflags & MR_LP_ADV_SYM_PAUSE)
5082 remote_adv |= LPA_1000XPAUSE;
5083 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5084 remote_adv |= LPA_1000XPAUSE_ASYM;
1da177e4 5085
859edb26
MC
5086 tp->link_config.rmt_adv =
5087 mii_adv_to_ethtool_adv_x(remote_adv);
5088
1da177e4
LT
5089 tg3_setup_flow_control(tp, local_adv, remote_adv);
5090
1da177e4
LT
5091 current_link_up = 1;
5092 }
5093 for (i = 0; i < 30; i++) {
5094 udelay(20);
5095 tw32_f(MAC_STATUS,
5096 (MAC_STATUS_SYNC_CHANGED |
5097 MAC_STATUS_CFG_CHANGED));
5098 udelay(40);
5099 if ((tr32(MAC_STATUS) &
5100 (MAC_STATUS_SYNC_CHANGED |
5101 MAC_STATUS_CFG_CHANGED)) == 0)
5102 break;
5103 }
5104
5105 mac_status = tr32(MAC_STATUS);
5106 if (current_link_up == 0 &&
5107 (mac_status & MAC_STATUS_PCS_SYNCED) &&
5108 !(mac_status & MAC_STATUS_RCVD_CFG))
5109 current_link_up = 1;
5110 } else {
5be73b47
MC
5111 tg3_setup_flow_control(tp, 0, 0);
5112
1da177e4
LT
5113 /* Forcing 1000FD link up. */
5114 current_link_up = 1;
1da177e4
LT
5115
5116 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5117 udelay(40);
e8f3f6ca
MC
5118
5119 tw32_f(MAC_MODE, tp->mac_mode);
5120 udelay(40);
1da177e4
LT
5121 }
5122
5123out:
5124 return current_link_up;
5125}
5126
5127static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
5128{
5129 u32 orig_pause_cfg;
5130 u16 orig_active_speed;
5131 u8 orig_active_duplex;
5132 u32 mac_status;
5133 int current_link_up;
5134 int i;
5135
8d018621 5136 orig_pause_cfg = tp->link_config.active_flowctrl;
1da177e4
LT
5137 orig_active_speed = tp->link_config.active_speed;
5138 orig_active_duplex = tp->link_config.active_duplex;
5139
63c3a66f 5140 if (!tg3_flag(tp, HW_AUTONEG) &&
f4a46d1f 5141 tp->link_up &&
63c3a66f 5142 tg3_flag(tp, INIT_COMPLETE)) {
1da177e4
LT
5143 mac_status = tr32(MAC_STATUS);
5144 mac_status &= (MAC_STATUS_PCS_SYNCED |
5145 MAC_STATUS_SIGNAL_DET |
5146 MAC_STATUS_CFG_CHANGED |
5147 MAC_STATUS_RCVD_CFG);
5148 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5149 MAC_STATUS_SIGNAL_DET)) {
5150 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5151 MAC_STATUS_CFG_CHANGED));
5152 return 0;
5153 }
5154 }
5155
5156 tw32_f(MAC_TX_AUTO_NEG, 0);
5157
5158 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5159 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5160 tw32_f(MAC_MODE, tp->mac_mode);
5161 udelay(40);
5162
79eb6904 5163 if (tp->phy_id == TG3_PHY_ID_BCM8002)
1da177e4
LT
5164 tg3_init_bcm8002(tp);
5165
5166 /* Enable link change event even when serdes polling. */
5167 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5168 udelay(40);
5169
5170 current_link_up = 0;
859edb26 5171 tp->link_config.rmt_adv = 0;
1da177e4
LT
5172 mac_status = tr32(MAC_STATUS);
5173
63c3a66f 5174 if (tg3_flag(tp, HW_AUTONEG))
1da177e4
LT
5175 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5176 else
5177 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5178
898a56f8 5179 tp->napi[0].hw_status->status =
1da177e4 5180 (SD_STATUS_UPDATED |
898a56f8 5181 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
1da177e4
LT
5182
5183 for (i = 0; i < 100; i++) {
5184 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5185 MAC_STATUS_CFG_CHANGED));
5186 udelay(5);
5187 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
3d3ebe74
MC
5188 MAC_STATUS_CFG_CHANGED |
5189 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
1da177e4
LT
5190 break;
5191 }
5192
5193 mac_status = tr32(MAC_STATUS);
5194 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5195 current_link_up = 0;
3d3ebe74
MC
5196 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5197 tp->serdes_counter == 0) {
1da177e4
LT
5198 tw32_f(MAC_MODE, (tp->mac_mode |
5199 MAC_MODE_SEND_CONFIGS));
5200 udelay(1);
5201 tw32_f(MAC_MODE, tp->mac_mode);
5202 }
5203 }
5204
5205 if (current_link_up == 1) {
5206 tp->link_config.active_speed = SPEED_1000;
5207 tp->link_config.active_duplex = DUPLEX_FULL;
5208 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5209 LED_CTRL_LNKLED_OVERRIDE |
5210 LED_CTRL_1000MBPS_ON));
5211 } else {
e740522e
MC
5212 tp->link_config.active_speed = SPEED_UNKNOWN;
5213 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
1da177e4
LT
5214 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5215 LED_CTRL_LNKLED_OVERRIDE |
5216 LED_CTRL_TRAFFIC_OVERRIDE));
5217 }
5218
f4a46d1f 5219 if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
8d018621 5220 u32 now_pause_cfg = tp->link_config.active_flowctrl;
1da177e4
LT
5221 if (orig_pause_cfg != now_pause_cfg ||
5222 orig_active_speed != tp->link_config.active_speed ||
5223 orig_active_duplex != tp->link_config.active_duplex)
5224 tg3_link_report(tp);
5225 }
5226
5227 return 0;
5228}
5229
747e8f8b
MC
5230static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
5231{
5232 int current_link_up, err = 0;
5233 u32 bmsr, bmcr;
5234 u16 current_speed;
5235 u8 current_duplex;
ef167e27 5236 u32 local_adv, remote_adv;
747e8f8b
MC
5237
5238 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5239 tw32_f(MAC_MODE, tp->mac_mode);
5240 udelay(40);
5241
5242 tw32(MAC_EVENT, 0);
5243
5244 tw32_f(MAC_STATUS,
5245 (MAC_STATUS_SYNC_CHANGED |
5246 MAC_STATUS_CFG_CHANGED |
5247 MAC_STATUS_MI_COMPLETION |
5248 MAC_STATUS_LNKSTATE_CHANGED));
5249 udelay(40);
5250
5251 if (force_reset)
5252 tg3_phy_reset(tp);
5253
5254 current_link_up = 0;
e740522e
MC
5255 current_speed = SPEED_UNKNOWN;
5256 current_duplex = DUPLEX_UNKNOWN;
859edb26 5257 tp->link_config.rmt_adv = 0;
747e8f8b
MC
5258
5259 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5260 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
d4d2c558
MC
5261 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
5262 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5263 bmsr |= BMSR_LSTATUS;
5264 else
5265 bmsr &= ~BMSR_LSTATUS;
5266 }
747e8f8b
MC
5267
5268 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5269
5270 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
f07e9af3 5271 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
747e8f8b
MC
5272 /* do nothing, just check for link up at the end */
5273 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
28011cf1 5274 u32 adv, newadv;
747e8f8b
MC
5275
5276 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
28011cf1
MC
5277 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5278 ADVERTISE_1000XPAUSE |
5279 ADVERTISE_1000XPSE_ASYM |
5280 ADVERTISE_SLCT);
747e8f8b 5281
28011cf1 5282 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
37f07023 5283 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
747e8f8b 5284
28011cf1
MC
5285 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5286 tg3_writephy(tp, MII_ADVERTISE, newadv);
747e8f8b
MC
5287 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5288 tg3_writephy(tp, MII_BMCR, bmcr);
5289
5290 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3d3ebe74 5291 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
f07e9af3 5292 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
747e8f8b
MC
5293
5294 return err;
5295 }
5296 } else {
5297 u32 new_bmcr;
5298
5299 bmcr &= ~BMCR_SPEED1000;
5300 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5301
5302 if (tp->link_config.duplex == DUPLEX_FULL)
5303 new_bmcr |= BMCR_FULLDPLX;
5304
5305 if (new_bmcr != bmcr) {
5306 /* BMCR_SPEED1000 is a reserved bit that needs
5307 * to be set on write.
5308 */
5309 new_bmcr |= BMCR_SPEED1000;
5310
5311 /* Force a linkdown */
f4a46d1f 5312 if (tp->link_up) {
747e8f8b
MC
5313 u32 adv;
5314
5315 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5316 adv &= ~(ADVERTISE_1000XFULL |
5317 ADVERTISE_1000XHALF |
5318 ADVERTISE_SLCT);
5319 tg3_writephy(tp, MII_ADVERTISE, adv);
5320 tg3_writephy(tp, MII_BMCR, bmcr |
5321 BMCR_ANRESTART |
5322 BMCR_ANENABLE);
5323 udelay(10);
f4a46d1f 5324 tg3_carrier_off(tp);
747e8f8b
MC
5325 }
5326 tg3_writephy(tp, MII_BMCR, new_bmcr);
5327 bmcr = new_bmcr;
5328 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5329 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
d4d2c558
MC
5330 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
5331 ASIC_REV_5714) {
5332 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5333 bmsr |= BMSR_LSTATUS;
5334 else
5335 bmsr &= ~BMSR_LSTATUS;
5336 }
f07e9af3 5337 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
747e8f8b
MC
5338 }
5339 }
5340
5341 if (bmsr & BMSR_LSTATUS) {
5342 current_speed = SPEED_1000;
5343 current_link_up = 1;
5344 if (bmcr & BMCR_FULLDPLX)
5345 current_duplex = DUPLEX_FULL;
5346 else
5347 current_duplex = DUPLEX_HALF;
5348
ef167e27
MC
5349 local_adv = 0;
5350 remote_adv = 0;
5351
747e8f8b 5352 if (bmcr & BMCR_ANENABLE) {
ef167e27 5353 u32 common;
747e8f8b
MC
5354
5355 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5356 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5357 common = local_adv & remote_adv;
5358 if (common & (ADVERTISE_1000XHALF |
5359 ADVERTISE_1000XFULL)) {
5360 if (common & ADVERTISE_1000XFULL)
5361 current_duplex = DUPLEX_FULL;
5362 else
5363 current_duplex = DUPLEX_HALF;
859edb26
MC
5364
5365 tp->link_config.rmt_adv =
5366 mii_adv_to_ethtool_adv_x(remote_adv);
63c3a66f 5367 } else if (!tg3_flag(tp, 5780_CLASS)) {
57d8b880 5368 /* Link is up via parallel detect */
859a5887 5369 } else {
747e8f8b 5370 current_link_up = 0;
859a5887 5371 }
747e8f8b
MC
5372 }
5373 }
5374
ef167e27
MC
5375 if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
5376 tg3_setup_flow_control(tp, local_adv, remote_adv);
5377
747e8f8b
MC
5378 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5379 if (tp->link_config.active_duplex == DUPLEX_HALF)
5380 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5381
5382 tw32_f(MAC_MODE, tp->mac_mode);
5383 udelay(40);
5384
5385 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5386
5387 tp->link_config.active_speed = current_speed;
5388 tp->link_config.active_duplex = current_duplex;
5389
f4a46d1f 5390 tg3_test_and_report_link_chg(tp, current_link_up);
747e8f8b
MC
5391 return err;
5392}
5393
5394static void tg3_serdes_parallel_detect(struct tg3 *tp)
5395{
3d3ebe74 5396 if (tp->serdes_counter) {
747e8f8b 5397 /* Give autoneg time to complete. */
3d3ebe74 5398 tp->serdes_counter--;
747e8f8b
MC
5399 return;
5400 }
c6cdf436 5401
f4a46d1f 5402 if (!tp->link_up &&
747e8f8b
MC
5403 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5404 u32 bmcr;
5405
5406 tg3_readphy(tp, MII_BMCR, &bmcr);
5407 if (bmcr & BMCR_ANENABLE) {
5408 u32 phy1, phy2;
5409
5410 /* Select shadow register 0x1f */
f08aa1a8
MC
5411 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5412 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
747e8f8b
MC
5413
5414 /* Select expansion interrupt status register */
f08aa1a8
MC
5415 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5416 MII_TG3_DSP_EXP1_INT_STAT);
5417 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5418 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
747e8f8b
MC
5419
5420 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5421 /* We have signal detect and not receiving
5422 * config code words, link is up by parallel
5423 * detection.
5424 */
5425
5426 bmcr &= ~BMCR_ANENABLE;
5427 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5428 tg3_writephy(tp, MII_BMCR, bmcr);
f07e9af3 5429 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
747e8f8b
MC
5430 }
5431 }
f4a46d1f 5432 } else if (tp->link_up &&
859a5887 5433 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
f07e9af3 5434 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
747e8f8b
MC
5435 u32 phy2;
5436
5437 /* Select expansion interrupt status register */
f08aa1a8
MC
5438 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5439 MII_TG3_DSP_EXP1_INT_STAT);
5440 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
747e8f8b
MC
5441 if (phy2 & 0x20) {
5442 u32 bmcr;
5443
5444 /* Config code words received, turn on autoneg. */
5445 tg3_readphy(tp, MII_BMCR, &bmcr);
5446 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
5447
f07e9af3 5448 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
747e8f8b
MC
5449
5450 }
5451 }
5452}
5453
1da177e4
LT
5454static int tg3_setup_phy(struct tg3 *tp, int force_reset)
5455{
f2096f94 5456 u32 val;
1da177e4
LT
5457 int err;
5458
f07e9af3 5459 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
1da177e4 5460 err = tg3_setup_fiber_phy(tp, force_reset);
f07e9af3 5461 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
747e8f8b 5462 err = tg3_setup_fiber_mii_phy(tp, force_reset);
859a5887 5463 else
1da177e4 5464 err = tg3_setup_copper_phy(tp, force_reset);
1da177e4 5465
bcb37f6c 5466 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
f2096f94 5467 u32 scale;
aa6c91fe
MC
5468
5469 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
5470 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
5471 scale = 65;
5472 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
5473 scale = 6;
5474 else
5475 scale = 12;
5476
5477 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
5478 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
5479 tw32(GRC_MISC_CFG, val);
5480 }
5481
f2096f94
MC
5482 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5483 (6 << TX_LENGTHS_IPG_SHIFT);
5484 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
5485 val |= tr32(MAC_TX_LENGTHS) &
5486 (TX_LENGTHS_JMB_FRM_LEN_MSK |
5487 TX_LENGTHS_CNT_DWN_VAL_MSK);
5488
1da177e4
LT
5489 if (tp->link_config.active_speed == SPEED_1000 &&
5490 tp->link_config.active_duplex == DUPLEX_HALF)
f2096f94
MC
5491 tw32(MAC_TX_LENGTHS, val |
5492 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
1da177e4 5493 else
f2096f94
MC
5494 tw32(MAC_TX_LENGTHS, val |
5495 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
1da177e4 5496
63c3a66f 5497 if (!tg3_flag(tp, 5705_PLUS)) {
f4a46d1f 5498 if (tp->link_up) {
1da177e4 5499 tw32(HOSTCC_STAT_COAL_TICKS,
15f9850d 5500 tp->coal.stats_block_coalesce_usecs);
1da177e4
LT
5501 } else {
5502 tw32(HOSTCC_STAT_COAL_TICKS, 0);
5503 }
5504 }
5505
63c3a66f 5506 if (tg3_flag(tp, ASPM_WORKAROUND)) {
f2096f94 5507 val = tr32(PCIE_PWR_MGMT_THRESH);
f4a46d1f 5508 if (!tp->link_up)
8ed5d97e
MC
5509 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
5510 tp->pwrmgmt_thresh;
5511 else
5512 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
5513 tw32(PCIE_PWR_MGMT_THRESH, val);
5514 }
5515
1da177e4
LT
5516 return err;
5517}
5518
66cfd1bd
MC
5519static inline int tg3_irq_sync(struct tg3 *tp)
5520{
5521 return tp->irq_sync;
5522}
5523
97bd8e49
MC
5524static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
5525{
5526 int i;
5527
5528 dst = (u32 *)((u8 *)dst + off);
5529 for (i = 0; i < len; i += sizeof(u32))
5530 *dst++ = tr32(off + i);
5531}
5532
5533static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
5534{
5535 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
5536 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
5537 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
5538 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
5539 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
5540 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
5541 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
5542 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
5543 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
5544 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
5545 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
5546 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
5547 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
5548 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
5549 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
5550 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
5551 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
5552 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
5553 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
5554
63c3a66f 5555 if (tg3_flag(tp, SUPPORT_MSIX))
97bd8e49
MC
5556 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
5557
5558 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
5559 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
5560 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
5561 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
5562 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
5563 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
5564 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
5565 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
5566
63c3a66f 5567 if (!tg3_flag(tp, 5705_PLUS)) {
97bd8e49
MC
5568 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
5569 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
5570 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
5571 }
5572
5573 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
5574 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
5575 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
5576 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
5577 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
5578
63c3a66f 5579 if (tg3_flag(tp, NVRAM))
97bd8e49
MC
5580 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
5581}
5582
5583static void tg3_dump_state(struct tg3 *tp)
5584{
5585 int i;
5586 u32 *regs;
5587
5588 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
5589 if (!regs) {
5590 netdev_err(tp->dev, "Failed allocating register dump buffer\n");
5591 return;
5592 }
5593
63c3a66f 5594 if (tg3_flag(tp, PCI_EXPRESS)) {
97bd8e49
MC
5595 /* Read up to but not including private PCI registers */
5596 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
5597 regs[i / sizeof(u32)] = tr32(i);
5598 } else
5599 tg3_dump_legacy_regs(tp, regs);
5600
5601 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
5602 if (!regs[i + 0] && !regs[i + 1] &&
5603 !regs[i + 2] && !regs[i + 3])
5604 continue;
5605
5606 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
5607 i * 4,
5608 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
5609 }
5610
5611 kfree(regs);
5612
5613 for (i = 0; i < tp->irq_cnt; i++) {
5614 struct tg3_napi *tnapi = &tp->napi[i];
5615
5616 /* SW status block */
5617 netdev_err(tp->dev,
5618 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
5619 i,
5620 tnapi->hw_status->status,
5621 tnapi->hw_status->status_tag,
5622 tnapi->hw_status->rx_jumbo_consumer,
5623 tnapi->hw_status->rx_consumer,
5624 tnapi->hw_status->rx_mini_consumer,
5625 tnapi->hw_status->idx[0].rx_producer,
5626 tnapi->hw_status->idx[0].tx_consumer);
5627
5628 netdev_err(tp->dev,
5629 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
5630 i,
5631 tnapi->last_tag, tnapi->last_irq_tag,
5632 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
5633 tnapi->rx_rcb_ptr,
5634 tnapi->prodring.rx_std_prod_idx,
5635 tnapi->prodring.rx_std_cons_idx,
5636 tnapi->prodring.rx_jmb_prod_idx,
5637 tnapi->prodring.rx_jmb_cons_idx);
5638 }
5639}
5640
df3e6548
MC
5641/* This is called whenever we suspect that the system chipset is re-
5642 * ordering the sequence of MMIO to the tx send mailbox. The symptom
5643 * is bogus tx completions. We try to recover by setting the
5644 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
5645 * in the workqueue.
5646 */
5647static void tg3_tx_recover(struct tg3 *tp)
5648{
63c3a66f 5649 BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
df3e6548
MC
5650 tp->write32_tx_mbox == tg3_write_indirect_mbox);
5651
5129c3a3
MC
5652 netdev_warn(tp->dev,
5653 "The system may be re-ordering memory-mapped I/O "
5654 "cycles to the network device, attempting to recover. "
5655 "Please report the problem to the driver maintainer "
5656 "and include system chipset information.\n");
df3e6548
MC
5657
5658 spin_lock(&tp->lock);
63c3a66f 5659 tg3_flag_set(tp, TX_RECOVERY_PENDING);
df3e6548
MC
5660 spin_unlock(&tp->lock);
5661}
5662
f3f3f27e 5663static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
1b2a7205 5664{
f65aac16
MC
5665 /* Tell compiler to fetch tx indices from memory. */
5666 barrier();
f3f3f27e
MC
5667 return tnapi->tx_pending -
5668 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
1b2a7205
MC
5669}
5670
1da177e4
LT
5671/* Tigon3 never reports partial packet sends. So we do not
5672 * need special logic to handle SKBs that have not had all
5673 * of their frags sent yet, like SunGEM does.
5674 */
17375d25 5675static void tg3_tx(struct tg3_napi *tnapi)
1da177e4 5676{
17375d25 5677 struct tg3 *tp = tnapi->tp;
898a56f8 5678 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
f3f3f27e 5679 u32 sw_idx = tnapi->tx_cons;
fe5f5787
MC
5680 struct netdev_queue *txq;
5681 int index = tnapi - tp->napi;
298376d3 5682 unsigned int pkts_compl = 0, bytes_compl = 0;
fe5f5787 5683
63c3a66f 5684 if (tg3_flag(tp, ENABLE_TSS))
fe5f5787
MC
5685 index--;
5686
5687 txq = netdev_get_tx_queue(tp->dev, index);
1da177e4
LT
5688
5689 while (sw_idx != hw_idx) {
df8944cf 5690 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
1da177e4 5691 struct sk_buff *skb = ri->skb;
df3e6548
MC
5692 int i, tx_bug = 0;
5693
5694 if (unlikely(skb == NULL)) {
5695 tg3_tx_recover(tp);
5696 return;
5697 }
1da177e4 5698
f4188d8a 5699 pci_unmap_single(tp->pdev,
4e5e4f0d 5700 dma_unmap_addr(ri, mapping),
f4188d8a
AD
5701 skb_headlen(skb),
5702 PCI_DMA_TODEVICE);
1da177e4
LT
5703
5704 ri->skb = NULL;
5705
e01ee14d
MC
5706 while (ri->fragmented) {
5707 ri->fragmented = false;
5708 sw_idx = NEXT_TX(sw_idx);
5709 ri = &tnapi->tx_buffers[sw_idx];
5710 }
5711
1da177e4
LT
5712 sw_idx = NEXT_TX(sw_idx);
5713
5714 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
f3f3f27e 5715 ri = &tnapi->tx_buffers[sw_idx];
df3e6548
MC
5716 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
5717 tx_bug = 1;
f4188d8a
AD
5718
5719 pci_unmap_page(tp->pdev,
4e5e4f0d 5720 dma_unmap_addr(ri, mapping),
9e903e08 5721 skb_frag_size(&skb_shinfo(skb)->frags[i]),
f4188d8a 5722 PCI_DMA_TODEVICE);
e01ee14d
MC
5723
5724 while (ri->fragmented) {
5725 ri->fragmented = false;
5726 sw_idx = NEXT_TX(sw_idx);
5727 ri = &tnapi->tx_buffers[sw_idx];
5728 }
5729
1da177e4
LT
5730 sw_idx = NEXT_TX(sw_idx);
5731 }
5732
298376d3
TH
5733 pkts_compl++;
5734 bytes_compl += skb->len;
5735
f47c11ee 5736 dev_kfree_skb(skb);
df3e6548
MC
5737
5738 if (unlikely(tx_bug)) {
5739 tg3_tx_recover(tp);
5740 return;
5741 }
1da177e4
LT
5742 }
5743
5cb917bc 5744 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
298376d3 5745
f3f3f27e 5746 tnapi->tx_cons = sw_idx;
1da177e4 5747
1b2a7205
MC
5748 /* Need to make the tx_cons update visible to tg3_start_xmit()
5749 * before checking for netif_queue_stopped(). Without the
5750 * memory barrier, there is a small possibility that tg3_start_xmit()
5751 * will miss it and cause the queue to be stopped forever.
5752 */
5753 smp_mb();
5754
fe5f5787 5755 if (unlikely(netif_tx_queue_stopped(txq) &&
f3f3f27e 5756 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
fe5f5787
MC
5757 __netif_tx_lock(txq, smp_processor_id());
5758 if (netif_tx_queue_stopped(txq) &&
f3f3f27e 5759 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
fe5f5787
MC
5760 netif_tx_wake_queue(txq);
5761 __netif_tx_unlock(txq);
51b91468 5762 }
1da177e4
LT
5763}
5764
8d4057a9
ED
5765static void tg3_frag_free(bool is_frag, void *data)
5766{
5767 if (is_frag)
5768 put_page(virt_to_head_page(data));
5769 else
5770 kfree(data);
5771}
5772
9205fd9c 5773static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
2b2cdb65 5774{
8d4057a9
ED
5775 unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
5776 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5777
9205fd9c 5778 if (!ri->data)
2b2cdb65
MC
5779 return;
5780
4e5e4f0d 5781 pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
2b2cdb65 5782 map_sz, PCI_DMA_FROMDEVICE);
a1e8b307 5783 tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
9205fd9c 5784 ri->data = NULL;
2b2cdb65
MC
5785}
5786
8d4057a9 5787
1da177e4
LT
5788/* Returns size of skb allocated or < 0 on error.
5789 *
5790 * We only need to fill in the address because the other members
5791 * of the RX descriptor are invariant, see tg3_init_rings.
5792 *
5793 * Note the purposeful assymetry of cpu vs. chip accesses. For
5794 * posting buffers we only dirty the first cache line of the RX
5795 * descriptor (containing the address). Whereas for the RX status
5796 * buffers the cpu only reads the last cacheline of the RX descriptor
5797 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
5798 */
9205fd9c 5799static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
8d4057a9
ED
5800 u32 opaque_key, u32 dest_idx_unmasked,
5801 unsigned int *frag_size)
1da177e4
LT
5802{
5803 struct tg3_rx_buffer_desc *desc;
f94e290e 5804 struct ring_info *map;
9205fd9c 5805 u8 *data;
1da177e4 5806 dma_addr_t mapping;
9205fd9c 5807 int skb_size, data_size, dest_idx;
1da177e4 5808
1da177e4
LT
5809 switch (opaque_key) {
5810 case RXD_OPAQUE_RING_STD:
2c49a44d 5811 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
21f581a5
MC
5812 desc = &tpr->rx_std[dest_idx];
5813 map = &tpr->rx_std_buffers[dest_idx];
9205fd9c 5814 data_size = tp->rx_pkt_map_sz;
1da177e4
LT
5815 break;
5816
5817 case RXD_OPAQUE_RING_JUMBO:
2c49a44d 5818 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
79ed5ac7 5819 desc = &tpr->rx_jmb[dest_idx].std;
21f581a5 5820 map = &tpr->rx_jmb_buffers[dest_idx];
9205fd9c 5821 data_size = TG3_RX_JMB_MAP_SZ;
1da177e4
LT
5822 break;
5823
5824 default:
5825 return -EINVAL;
855e1111 5826 }
1da177e4
LT
5827
5828 /* Do not overwrite any of the map or rp information
5829 * until we are sure we can commit to a new buffer.
5830 *
5831 * Callers depend upon this behavior and assume that
5832 * we leave everything unchanged if we fail.
5833 */
9205fd9c
ED
5834 skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
5835 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
a1e8b307
ED
5836 if (skb_size <= PAGE_SIZE) {
5837 data = netdev_alloc_frag(skb_size);
5838 *frag_size = skb_size;
8d4057a9
ED
5839 } else {
5840 data = kmalloc(skb_size, GFP_ATOMIC);
5841 *frag_size = 0;
5842 }
9205fd9c 5843 if (!data)
1da177e4
LT
5844 return -ENOMEM;
5845
9205fd9c
ED
5846 mapping = pci_map_single(tp->pdev,
5847 data + TG3_RX_OFFSET(tp),
5848 data_size,
1da177e4 5849 PCI_DMA_FROMDEVICE);
8d4057a9 5850 if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
a1e8b307 5851 tg3_frag_free(skb_size <= PAGE_SIZE, data);
a21771dd
MC
5852 return -EIO;
5853 }
1da177e4 5854
9205fd9c 5855 map->data = data;
4e5e4f0d 5856 dma_unmap_addr_set(map, mapping, mapping);
1da177e4 5857
1da177e4
LT
5858 desc->addr_hi = ((u64)mapping >> 32);
5859 desc->addr_lo = ((u64)mapping & 0xffffffff);
5860
9205fd9c 5861 return data_size;
1da177e4
LT
5862}
5863
5864/* We only need to move over in the address because the other
5865 * members of the RX descriptor are invariant. See notes above
9205fd9c 5866 * tg3_alloc_rx_data for full details.
1da177e4 5867 */
a3896167
MC
5868static void tg3_recycle_rx(struct tg3_napi *tnapi,
5869 struct tg3_rx_prodring_set *dpr,
5870 u32 opaque_key, int src_idx,
5871 u32 dest_idx_unmasked)
1da177e4 5872{
17375d25 5873 struct tg3 *tp = tnapi->tp;
1da177e4
LT
5874 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
5875 struct ring_info *src_map, *dest_map;
8fea32b9 5876 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
c6cdf436 5877 int dest_idx;
1da177e4
LT
5878
5879 switch (opaque_key) {
5880 case RXD_OPAQUE_RING_STD:
2c49a44d 5881 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
a3896167
MC
5882 dest_desc = &dpr->rx_std[dest_idx];
5883 dest_map = &dpr->rx_std_buffers[dest_idx];
5884 src_desc = &spr->rx_std[src_idx];
5885 src_map = &spr->rx_std_buffers[src_idx];
1da177e4
LT
5886 break;
5887
5888 case RXD_OPAQUE_RING_JUMBO:
2c49a44d 5889 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
a3896167
MC
5890 dest_desc = &dpr->rx_jmb[dest_idx].std;
5891 dest_map = &dpr->rx_jmb_buffers[dest_idx];
5892 src_desc = &spr->rx_jmb[src_idx].std;
5893 src_map = &spr->rx_jmb_buffers[src_idx];
1da177e4
LT
5894 break;
5895
5896 default:
5897 return;
855e1111 5898 }
1da177e4 5899
9205fd9c 5900 dest_map->data = src_map->data;
4e5e4f0d
FT
5901 dma_unmap_addr_set(dest_map, mapping,
5902 dma_unmap_addr(src_map, mapping));
1da177e4
LT
5903 dest_desc->addr_hi = src_desc->addr_hi;
5904 dest_desc->addr_lo = src_desc->addr_lo;
e92967bf
MC
5905
5906 /* Ensure that the update to the skb happens after the physical
5907 * addresses have been transferred to the new BD location.
5908 */
5909 smp_wmb();
5910
9205fd9c 5911 src_map->data = NULL;
1da177e4
LT
5912}
5913
1da177e4
LT
5914/* The RX ring scheme is composed of multiple rings which post fresh
5915 * buffers to the chip, and one special ring the chip uses to report
5916 * status back to the host.
5917 *
5918 * The special ring reports the status of received packets to the
5919 * host. The chip does not write into the original descriptor the
5920 * RX buffer was obtained from. The chip simply takes the original
5921 * descriptor as provided by the host, updates the status and length
5922 * field, then writes this into the next status ring entry.
5923 *
5924 * Each ring the host uses to post buffers to the chip is described
5925 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
5926 * it is first placed into the on-chip ram. When the packet's length
5927 * is known, it walks down the TG3_BDINFO entries to select the ring.
5928 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
5929 * which is within the range of the new packet's length is chosen.
5930 *
5931 * The "separate ring for rx status" scheme may sound queer, but it makes
5932 * sense from a cache coherency perspective. If only the host writes
5933 * to the buffer post rings, and only the chip writes to the rx status
5934 * rings, then cache lines never move beyond shared-modified state.
5935 * If both the host and chip were to write into the same ring, cache line
5936 * eviction could occur since both entities want it in an exclusive state.
5937 */
17375d25 5938static int tg3_rx(struct tg3_napi *tnapi, int budget)
1da177e4 5939{
17375d25 5940 struct tg3 *tp = tnapi->tp;
f92905de 5941 u32 work_mask, rx_std_posted = 0;
4361935a 5942 u32 std_prod_idx, jmb_prod_idx;
72334482 5943 u32 sw_idx = tnapi->rx_rcb_ptr;
483ba50b 5944 u16 hw_idx;
1da177e4 5945 int received;
8fea32b9 5946 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
1da177e4 5947
8d9d7cfc 5948 hw_idx = *(tnapi->rx_rcb_prod_idx);
1da177e4
LT
5949 /*
5950 * We need to order the read of hw_idx and the read of
5951 * the opaque cookie.
5952 */
5953 rmb();
1da177e4
LT
5954 work_mask = 0;
5955 received = 0;
4361935a
MC
5956 std_prod_idx = tpr->rx_std_prod_idx;
5957 jmb_prod_idx = tpr->rx_jmb_prod_idx;
1da177e4 5958 while (sw_idx != hw_idx && budget > 0) {
afc081f8 5959 struct ring_info *ri;
72334482 5960 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
1da177e4
LT
5961 unsigned int len;
5962 struct sk_buff *skb;
5963 dma_addr_t dma_addr;
5964 u32 opaque_key, desc_idx, *post_ptr;
9205fd9c 5965 u8 *data;
1da177e4
LT
5966
5967 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
5968 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
5969 if (opaque_key == RXD_OPAQUE_RING_STD) {
8fea32b9 5970 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
4e5e4f0d 5971 dma_addr = dma_unmap_addr(ri, mapping);
9205fd9c 5972 data = ri->data;
4361935a 5973 post_ptr = &std_prod_idx;
f92905de 5974 rx_std_posted++;
1da177e4 5975 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
8fea32b9 5976 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
4e5e4f0d 5977 dma_addr = dma_unmap_addr(ri, mapping);
9205fd9c 5978 data = ri->data;
4361935a 5979 post_ptr = &jmb_prod_idx;
21f581a5 5980 } else
1da177e4 5981 goto next_pkt_nopost;
1da177e4
LT
5982
5983 work_mask |= opaque_key;
5984
5985 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
5986 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
5987 drop_it:
a3896167 5988 tg3_recycle_rx(tnapi, tpr, opaque_key,
1da177e4
LT
5989 desc_idx, *post_ptr);
5990 drop_it_no_recycle:
5991 /* Other statistics kept track of by card. */
b0057c51 5992 tp->rx_dropped++;
1da177e4
LT
5993 goto next_pkt;
5994 }
5995
9205fd9c 5996 prefetch(data + TG3_RX_OFFSET(tp));
ad829268
MC
5997 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
5998 ETH_FCS_LEN;
1da177e4 5999
d2757fc4 6000 if (len > TG3_RX_COPY_THRESH(tp)) {
1da177e4 6001 int skb_size;
8d4057a9 6002 unsigned int frag_size;
1da177e4 6003
9205fd9c 6004 skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
8d4057a9 6005 *post_ptr, &frag_size);
1da177e4
LT
6006 if (skb_size < 0)
6007 goto drop_it;
6008
287be12e 6009 pci_unmap_single(tp->pdev, dma_addr, skb_size,
1da177e4
LT
6010 PCI_DMA_FROMDEVICE);
6011
8d4057a9 6012 skb = build_skb(data, frag_size);
9205fd9c 6013 if (!skb) {
8d4057a9 6014 tg3_frag_free(frag_size != 0, data);
9205fd9c
ED
6015 goto drop_it_no_recycle;
6016 }
6017 skb_reserve(skb, TG3_RX_OFFSET(tp));
6018 /* Ensure that the update to the data happens
61e800cf
MC
6019 * after the usage of the old DMA mapping.
6020 */
6021 smp_wmb();
6022
9205fd9c 6023 ri->data = NULL;
61e800cf 6024
1da177e4 6025 } else {
a3896167 6026 tg3_recycle_rx(tnapi, tpr, opaque_key,
1da177e4
LT
6027 desc_idx, *post_ptr);
6028
9205fd9c
ED
6029 skb = netdev_alloc_skb(tp->dev,
6030 len + TG3_RAW_IP_ALIGN);
6031 if (skb == NULL)
1da177e4
LT
6032 goto drop_it_no_recycle;
6033
9205fd9c 6034 skb_reserve(skb, TG3_RAW_IP_ALIGN);
1da177e4 6035 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
9205fd9c
ED
6036 memcpy(skb->data,
6037 data + TG3_RX_OFFSET(tp),
6038 len);
1da177e4 6039 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
1da177e4
LT
6040 }
6041
9205fd9c 6042 skb_put(skb, len);
dc668910 6043 if ((tp->dev->features & NETIF_F_RXCSUM) &&
1da177e4
LT
6044 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6045 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6046 >> RXD_TCPCSUM_SHIFT) == 0xffff))
6047 skb->ip_summed = CHECKSUM_UNNECESSARY;
6048 else
bc8acf2c 6049 skb_checksum_none_assert(skb);
1da177e4
LT
6050
6051 skb->protocol = eth_type_trans(skb, tp->dev);
f7b493e0
MC
6052
6053 if (len > (tp->dev->mtu + ETH_HLEN) &&
6054 skb->protocol != htons(ETH_P_8021Q)) {
6055 dev_kfree_skb(skb);
b0057c51 6056 goto drop_it_no_recycle;
f7b493e0
MC
6057 }
6058
9dc7a113 6059 if (desc->type_flags & RXD_FLAG_VLAN &&
bf933c80
MC
6060 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6061 __vlan_hwaccel_put_tag(skb,
6062 desc->err_vlan & RXD_VLAN_MASK);
9dc7a113 6063
bf933c80 6064 napi_gro_receive(&tnapi->napi, skb);
1da177e4 6065
1da177e4
LT
6066 received++;
6067 budget--;
6068
6069next_pkt:
6070 (*post_ptr)++;
f92905de
MC
6071
6072 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
2c49a44d
MC
6073 tpr->rx_std_prod_idx = std_prod_idx &
6074 tp->rx_std_ring_mask;
86cfe4ff
MC
6075 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6076 tpr->rx_std_prod_idx);
f92905de
MC
6077 work_mask &= ~RXD_OPAQUE_RING_STD;
6078 rx_std_posted = 0;
6079 }
1da177e4 6080next_pkt_nopost:
483ba50b 6081 sw_idx++;
7cb32cf2 6082 sw_idx &= tp->rx_ret_ring_mask;
52f6d697
MC
6083
6084 /* Refresh hw_idx to see if there is new work */
6085 if (sw_idx == hw_idx) {
8d9d7cfc 6086 hw_idx = *(tnapi->rx_rcb_prod_idx);
52f6d697
MC
6087 rmb();
6088 }
1da177e4
LT
6089 }
6090
6091 /* ACK the status ring. */
72334482
MC
6092 tnapi->rx_rcb_ptr = sw_idx;
6093 tw32_rx_mbox(tnapi->consmbox, sw_idx);
1da177e4
LT
6094
6095 /* Refill RX ring(s). */
63c3a66f 6096 if (!tg3_flag(tp, ENABLE_RSS)) {
6541b806
MC
6097 /* Sync BD data before updating mailbox */
6098 wmb();
6099
b196c7e4 6100 if (work_mask & RXD_OPAQUE_RING_STD) {
2c49a44d
MC
6101 tpr->rx_std_prod_idx = std_prod_idx &
6102 tp->rx_std_ring_mask;
b196c7e4
MC
6103 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6104 tpr->rx_std_prod_idx);
6105 }
6106 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
2c49a44d
MC
6107 tpr->rx_jmb_prod_idx = jmb_prod_idx &
6108 tp->rx_jmb_ring_mask;
b196c7e4
MC
6109 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6110 tpr->rx_jmb_prod_idx);
6111 }
6112 mmiowb();
6113 } else if (work_mask) {
6114 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6115 * updated before the producer indices can be updated.
6116 */
6117 smp_wmb();
6118
2c49a44d
MC
6119 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6120 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
b196c7e4 6121
7ae52890
MC
6122 if (tnapi != &tp->napi[1]) {
6123 tp->rx_refill = true;
e4af1af9 6124 napi_schedule(&tp->napi[1].napi);
7ae52890 6125 }
1da177e4 6126 }
1da177e4
LT
6127
6128 return received;
6129}
6130
35f2d7d0 6131static void tg3_poll_link(struct tg3 *tp)
1da177e4 6132{
1da177e4 6133 /* handle link change and other phy events */
63c3a66f 6134 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
35f2d7d0
MC
6135 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
6136
1da177e4
LT
6137 if (sblk->status & SD_STATUS_LINK_CHG) {
6138 sblk->status = SD_STATUS_UPDATED |
35f2d7d0 6139 (sblk->status & ~SD_STATUS_LINK_CHG);
f47c11ee 6140 spin_lock(&tp->lock);
63c3a66f 6141 if (tg3_flag(tp, USE_PHYLIB)) {
dd477003
MC
6142 tw32_f(MAC_STATUS,
6143 (MAC_STATUS_SYNC_CHANGED |
6144 MAC_STATUS_CFG_CHANGED |
6145 MAC_STATUS_MI_COMPLETION |
6146 MAC_STATUS_LNKSTATE_CHANGED));
6147 udelay(40);
6148 } else
6149 tg3_setup_phy(tp, 0);
f47c11ee 6150 spin_unlock(&tp->lock);
1da177e4
LT
6151 }
6152 }
35f2d7d0
MC
6153}
6154
f89f38b8
MC
6155static int tg3_rx_prodring_xfer(struct tg3 *tp,
6156 struct tg3_rx_prodring_set *dpr,
6157 struct tg3_rx_prodring_set *spr)
b196c7e4
MC
6158{
6159 u32 si, di, cpycnt, src_prod_idx;
f89f38b8 6160 int i, err = 0;
b196c7e4
MC
6161
6162 while (1) {
6163 src_prod_idx = spr->rx_std_prod_idx;
6164
6165 /* Make sure updates to the rx_std_buffers[] entries and the
6166 * standard producer index are seen in the correct order.
6167 */
6168 smp_rmb();
6169
6170 if (spr->rx_std_cons_idx == src_prod_idx)
6171 break;
6172
6173 if (spr->rx_std_cons_idx < src_prod_idx)
6174 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
6175 else
2c49a44d
MC
6176 cpycnt = tp->rx_std_ring_mask + 1 -
6177 spr->rx_std_cons_idx;
b196c7e4 6178
2c49a44d
MC
6179 cpycnt = min(cpycnt,
6180 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
b196c7e4
MC
6181
6182 si = spr->rx_std_cons_idx;
6183 di = dpr->rx_std_prod_idx;
6184
e92967bf 6185 for (i = di; i < di + cpycnt; i++) {
9205fd9c 6186 if (dpr->rx_std_buffers[i].data) {
e92967bf 6187 cpycnt = i - di;
f89f38b8 6188 err = -ENOSPC;
e92967bf
MC
6189 break;
6190 }
6191 }
6192
6193 if (!cpycnt)
6194 break;
6195
6196 /* Ensure that updates to the rx_std_buffers ring and the
6197 * shadowed hardware producer ring from tg3_recycle_skb() are
6198 * ordered correctly WRT the skb check above.
6199 */
6200 smp_rmb();
6201
b196c7e4
MC
6202 memcpy(&dpr->rx_std_buffers[di],
6203 &spr->rx_std_buffers[si],
6204 cpycnt * sizeof(struct ring_info));
6205
6206 for (i = 0; i < cpycnt; i++, di++, si++) {
6207 struct tg3_rx_buffer_desc *sbd, *dbd;
6208 sbd = &spr->rx_std[si];
6209 dbd = &dpr->rx_std[di];
6210 dbd->addr_hi = sbd->addr_hi;
6211 dbd->addr_lo = sbd->addr_lo;
6212 }
6213
2c49a44d
MC
6214 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
6215 tp->rx_std_ring_mask;
6216 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
6217 tp->rx_std_ring_mask;
b196c7e4
MC
6218 }
6219
6220 while (1) {
6221 src_prod_idx = spr->rx_jmb_prod_idx;
6222
6223 /* Make sure updates to the rx_jmb_buffers[] entries and
6224 * the jumbo producer index are seen in the correct order.
6225 */
6226 smp_rmb();
6227
6228 if (spr->rx_jmb_cons_idx == src_prod_idx)
6229 break;
6230
6231 if (spr->rx_jmb_cons_idx < src_prod_idx)
6232 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
6233 else
2c49a44d
MC
6234 cpycnt = tp->rx_jmb_ring_mask + 1 -
6235 spr->rx_jmb_cons_idx;
b196c7e4
MC
6236
6237 cpycnt = min(cpycnt,
2c49a44d 6238 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
b196c7e4
MC
6239
6240 si = spr->rx_jmb_cons_idx;
6241 di = dpr->rx_jmb_prod_idx;
6242
e92967bf 6243 for (i = di; i < di + cpycnt; i++) {
9205fd9c 6244 if (dpr->rx_jmb_buffers[i].data) {
e92967bf 6245 cpycnt = i - di;
f89f38b8 6246 err = -ENOSPC;
e92967bf
MC
6247 break;
6248 }
6249 }
6250
6251 if (!cpycnt)
6252 break;
6253
6254 /* Ensure that updates to the rx_jmb_buffers ring and the
6255 * shadowed hardware producer ring from tg3_recycle_skb() are
6256 * ordered correctly WRT the skb check above.
6257 */
6258 smp_rmb();
6259
b196c7e4
MC
6260 memcpy(&dpr->rx_jmb_buffers[di],
6261 &spr->rx_jmb_buffers[si],
6262 cpycnt * sizeof(struct ring_info));
6263
6264 for (i = 0; i < cpycnt; i++, di++, si++) {
6265 struct tg3_rx_buffer_desc *sbd, *dbd;
6266 sbd = &spr->rx_jmb[si].std;
6267 dbd = &dpr->rx_jmb[di].std;
6268 dbd->addr_hi = sbd->addr_hi;
6269 dbd->addr_lo = sbd->addr_lo;
6270 }
6271
2c49a44d
MC
6272 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
6273 tp->rx_jmb_ring_mask;
6274 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
6275 tp->rx_jmb_ring_mask;
b196c7e4 6276 }
f89f38b8
MC
6277
6278 return err;
b196c7e4
MC
6279}
6280
35f2d7d0
MC
6281static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
6282{
6283 struct tg3 *tp = tnapi->tp;
1da177e4
LT
6284
6285 /* run TX completion thread */
f3f3f27e 6286 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
17375d25 6287 tg3_tx(tnapi);
63c3a66f 6288 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
4fd7ab59 6289 return work_done;
1da177e4
LT
6290 }
6291
f891ea16
MC
6292 if (!tnapi->rx_rcb_prod_idx)
6293 return work_done;
6294
1da177e4
LT
6295 /* run RX thread, within the bounds set by NAPI.
6296 * All RX "locking" is done by ensuring outside
bea3348e 6297 * code synchronizes with tg3->napi.poll()
1da177e4 6298 */
8d9d7cfc 6299 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
17375d25 6300 work_done += tg3_rx(tnapi, budget - work_done);
1da177e4 6301
63c3a66f 6302 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
8fea32b9 6303 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
f89f38b8 6304 int i, err = 0;
e4af1af9
MC
6305 u32 std_prod_idx = dpr->rx_std_prod_idx;
6306 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
b196c7e4 6307
7ae52890 6308 tp->rx_refill = false;
9102426a 6309 for (i = 1; i <= tp->rxq_cnt; i++)
f89f38b8 6310 err |= tg3_rx_prodring_xfer(tp, dpr,
8fea32b9 6311 &tp->napi[i].prodring);
b196c7e4
MC
6312
6313 wmb();
6314
e4af1af9
MC
6315 if (std_prod_idx != dpr->rx_std_prod_idx)
6316 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6317 dpr->rx_std_prod_idx);
b196c7e4 6318
e4af1af9
MC
6319 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
6320 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6321 dpr->rx_jmb_prod_idx);
b196c7e4
MC
6322
6323 mmiowb();
f89f38b8
MC
6324
6325 if (err)
6326 tw32_f(HOSTCC_MODE, tp->coal_now);
b196c7e4
MC
6327 }
6328
6f535763
DM
6329 return work_done;
6330}
6331
db219973
MC
6332static inline void tg3_reset_task_schedule(struct tg3 *tp)
6333{
6334 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
6335 schedule_work(&tp->reset_task);
6336}
6337
6338static inline void tg3_reset_task_cancel(struct tg3 *tp)
6339{
6340 cancel_work_sync(&tp->reset_task);
6341 tg3_flag_clear(tp, RESET_TASK_PENDING);
c7101359 6342 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
db219973
MC
6343}
6344
35f2d7d0
MC
6345static int tg3_poll_msix(struct napi_struct *napi, int budget)
6346{
6347 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6348 struct tg3 *tp = tnapi->tp;
6349 int work_done = 0;
6350 struct tg3_hw_status *sblk = tnapi->hw_status;
6351
6352 while (1) {
6353 work_done = tg3_poll_work(tnapi, work_done, budget);
6354
63c3a66f 6355 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
35f2d7d0
MC
6356 goto tx_recovery;
6357
6358 if (unlikely(work_done >= budget))
6359 break;
6360
c6cdf436 6361 /* tp->last_tag is used in tg3_int_reenable() below
35f2d7d0
MC
6362 * to tell the hw how much work has been processed,
6363 * so we must read it before checking for more work.
6364 */
6365 tnapi->last_tag = sblk->status_tag;
6366 tnapi->last_irq_tag = tnapi->last_tag;
6367 rmb();
6368
6369 /* check for RX/TX work to do */
6d40db7b
MC
6370 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
6371 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
7ae52890
MC
6372
6373 /* This test here is not race free, but will reduce
6374 * the number of interrupts by looping again.
6375 */
6376 if (tnapi == &tp->napi[1] && tp->rx_refill)
6377 continue;
6378
35f2d7d0
MC
6379 napi_complete(napi);
6380 /* Reenable interrupts. */
6381 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
7ae52890
MC
6382
6383 /* This test here is synchronized by napi_schedule()
6384 * and napi_complete() to close the race condition.
6385 */
6386 if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
6387 tw32(HOSTCC_MODE, tp->coalesce_mode |
6388 HOSTCC_MODE_ENABLE |
6389 tnapi->coal_now);
6390 }
35f2d7d0
MC
6391 mmiowb();
6392 break;
6393 }
6394 }
6395
6396 return work_done;
6397
6398tx_recovery:
6399 /* work_done is guaranteed to be less than budget. */
6400 napi_complete(napi);
db219973 6401 tg3_reset_task_schedule(tp);
35f2d7d0
MC
6402 return work_done;
6403}
6404
e64de4e6
MC
6405static void tg3_process_error(struct tg3 *tp)
6406{
6407 u32 val;
6408 bool real_error = false;
6409
63c3a66f 6410 if (tg3_flag(tp, ERROR_PROCESSED))
e64de4e6
MC
6411 return;
6412
6413 /* Check Flow Attention register */
6414 val = tr32(HOSTCC_FLOW_ATTN);
6415 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
6416 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n");
6417 real_error = true;
6418 }
6419
6420 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
6421 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n");
6422 real_error = true;
6423 }
6424
6425 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
6426 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n");
6427 real_error = true;
6428 }
6429
6430 if (!real_error)
6431 return;
6432
6433 tg3_dump_state(tp);
6434
63c3a66f 6435 tg3_flag_set(tp, ERROR_PROCESSED);
db219973 6436 tg3_reset_task_schedule(tp);
e64de4e6
MC
6437}
6438
6f535763
DM
6439static int tg3_poll(struct napi_struct *napi, int budget)
6440{
8ef0442f
MC
6441 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6442 struct tg3 *tp = tnapi->tp;
6f535763 6443 int work_done = 0;
898a56f8 6444 struct tg3_hw_status *sblk = tnapi->hw_status;
6f535763
DM
6445
6446 while (1) {
e64de4e6
MC
6447 if (sblk->status & SD_STATUS_ERROR)
6448 tg3_process_error(tp);
6449
35f2d7d0
MC
6450 tg3_poll_link(tp);
6451
17375d25 6452 work_done = tg3_poll_work(tnapi, work_done, budget);
6f535763 6453
63c3a66f 6454 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6f535763
DM
6455 goto tx_recovery;
6456
6457 if (unlikely(work_done >= budget))
6458 break;
6459
63c3a66f 6460 if (tg3_flag(tp, TAGGED_STATUS)) {
17375d25 6461 /* tp->last_tag is used in tg3_int_reenable() below
4fd7ab59
MC
6462 * to tell the hw how much work has been processed,
6463 * so we must read it before checking for more work.
6464 */
898a56f8
MC
6465 tnapi->last_tag = sblk->status_tag;
6466 tnapi->last_irq_tag = tnapi->last_tag;
4fd7ab59
MC
6467 rmb();
6468 } else
6469 sblk->status &= ~SD_STATUS_UPDATED;
6f535763 6470
17375d25 6471 if (likely(!tg3_has_work(tnapi))) {
288379f0 6472 napi_complete(napi);
17375d25 6473 tg3_int_reenable(tnapi);
6f535763
DM
6474 break;
6475 }
1da177e4
LT
6476 }
6477
bea3348e 6478 return work_done;
6f535763
DM
6479
6480tx_recovery:
4fd7ab59 6481 /* work_done is guaranteed to be less than budget. */
288379f0 6482 napi_complete(napi);
db219973 6483 tg3_reset_task_schedule(tp);
4fd7ab59 6484 return work_done;
1da177e4
LT
6485}
6486
66cfd1bd
MC
6487static void tg3_napi_disable(struct tg3 *tp)
6488{
6489 int i;
6490
6491 for (i = tp->irq_cnt - 1; i >= 0; i--)
6492 napi_disable(&tp->napi[i].napi);
6493}
6494
6495static void tg3_napi_enable(struct tg3 *tp)
6496{
6497 int i;
6498
6499 for (i = 0; i < tp->irq_cnt; i++)
6500 napi_enable(&tp->napi[i].napi);
6501}
6502
6503static void tg3_napi_init(struct tg3 *tp)
6504{
6505 int i;
6506
6507 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
6508 for (i = 1; i < tp->irq_cnt; i++)
6509 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
6510}
6511
6512static void tg3_napi_fini(struct tg3 *tp)
6513{
6514 int i;
6515
6516 for (i = 0; i < tp->irq_cnt; i++)
6517 netif_napi_del(&tp->napi[i].napi);
6518}
6519
6520static inline void tg3_netif_stop(struct tg3 *tp)
6521{
6522 tp->dev->trans_start = jiffies; /* prevent tx timeout */
6523 tg3_napi_disable(tp);
f4a46d1f 6524 netif_carrier_off(tp->dev);
66cfd1bd
MC
6525 netif_tx_disable(tp->dev);
6526}
6527
6528static inline void tg3_netif_start(struct tg3 *tp)
6529{
6530 /* NOTE: unconditional netif_tx_wake_all_queues is only
6531 * appropriate so long as all callers are assured to
6532 * have free tx slots (such as after tg3_init_hw)
6533 */
6534 netif_tx_wake_all_queues(tp->dev);
6535
f4a46d1f
NNS
6536 if (tp->link_up)
6537 netif_carrier_on(tp->dev);
6538
66cfd1bd
MC
6539 tg3_napi_enable(tp);
6540 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
6541 tg3_enable_ints(tp);
6542}
6543
f47c11ee
DM
6544static void tg3_irq_quiesce(struct tg3 *tp)
6545{
4f125f42
MC
6546 int i;
6547
f47c11ee
DM
6548 BUG_ON(tp->irq_sync);
6549
6550 tp->irq_sync = 1;
6551 smp_mb();
6552
4f125f42
MC
6553 for (i = 0; i < tp->irq_cnt; i++)
6554 synchronize_irq(tp->napi[i].irq_vec);
f47c11ee
DM
6555}
6556
f47c11ee
DM
6557/* Fully shutdown all tg3 driver activity elsewhere in the system.
6558 * If irq_sync is non-zero, then the IRQ handler must be synchronized
6559 * with as well. Most of the time, this is not necessary except when
6560 * shutting down the device.
6561 */
6562static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
6563{
46966545 6564 spin_lock_bh(&tp->lock);
f47c11ee
DM
6565 if (irq_sync)
6566 tg3_irq_quiesce(tp);
f47c11ee
DM
6567}
6568
6569static inline void tg3_full_unlock(struct tg3 *tp)
6570{
f47c11ee
DM
6571 spin_unlock_bh(&tp->lock);
6572}
6573
fcfa0a32
MC
6574/* One-shot MSI handler - Chip automatically disables interrupt
6575 * after sending MSI so driver doesn't have to do it.
6576 */
7d12e780 6577static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
fcfa0a32 6578{
09943a18
MC
6579 struct tg3_napi *tnapi = dev_id;
6580 struct tg3 *tp = tnapi->tp;
fcfa0a32 6581
898a56f8 6582 prefetch(tnapi->hw_status);
0c1d0e2b
MC
6583 if (tnapi->rx_rcb)
6584 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
fcfa0a32
MC
6585
6586 if (likely(!tg3_irq_sync(tp)))
09943a18 6587 napi_schedule(&tnapi->napi);
fcfa0a32
MC
6588
6589 return IRQ_HANDLED;
6590}
6591
88b06bc2
MC
6592/* MSI ISR - No need to check for interrupt sharing and no need to
6593 * flush status block and interrupt mailbox. PCI ordering rules
6594 * guarantee that MSI will arrive after the status block.
6595 */
7d12e780 6596static irqreturn_t tg3_msi(int irq, void *dev_id)
88b06bc2 6597{
09943a18
MC
6598 struct tg3_napi *tnapi = dev_id;
6599 struct tg3 *tp = tnapi->tp;
88b06bc2 6600
898a56f8 6601 prefetch(tnapi->hw_status);
0c1d0e2b
MC
6602 if (tnapi->rx_rcb)
6603 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
88b06bc2 6604 /*
fac9b83e 6605 * Writing any value to intr-mbox-0 clears PCI INTA# and
88b06bc2 6606 * chip-internal interrupt pending events.
fac9b83e 6607 * Writing non-zero to intr-mbox-0 additional tells the
88b06bc2
MC
6608 * NIC to stop sending us irqs, engaging "in-intr-handler"
6609 * event coalescing.
6610 */
5b39de91 6611 tw32_mailbox(tnapi->int_mbox, 0x00000001);
61487480 6612 if (likely(!tg3_irq_sync(tp)))
09943a18 6613 napi_schedule(&tnapi->napi);
61487480 6614
88b06bc2
MC
6615 return IRQ_RETVAL(1);
6616}
6617
7d12e780 6618static irqreturn_t tg3_interrupt(int irq, void *dev_id)
1da177e4 6619{
09943a18
MC
6620 struct tg3_napi *tnapi = dev_id;
6621 struct tg3 *tp = tnapi->tp;
898a56f8 6622 struct tg3_hw_status *sblk = tnapi->hw_status;
1da177e4
LT
6623 unsigned int handled = 1;
6624
1da177e4
LT
6625 /* In INTx mode, it is possible for the interrupt to arrive at
6626 * the CPU before the status block posted prior to the interrupt.
6627 * Reading the PCI State register will confirm whether the
6628 * interrupt is ours and will flush the status block.
6629 */
d18edcb2 6630 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
63c3a66f 6631 if (tg3_flag(tp, CHIP_RESETTING) ||
d18edcb2
MC
6632 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6633 handled = 0;
f47c11ee 6634 goto out;
fac9b83e 6635 }
d18edcb2
MC
6636 }
6637
6638 /*
6639 * Writing any value to intr-mbox-0 clears PCI INTA# and
6640 * chip-internal interrupt pending events.
6641 * Writing non-zero to intr-mbox-0 additional tells the
6642 * NIC to stop sending us irqs, engaging "in-intr-handler"
6643 * event coalescing.
c04cb347
MC
6644 *
6645 * Flush the mailbox to de-assert the IRQ immediately to prevent
6646 * spurious interrupts. The flush impacts performance but
6647 * excessive spurious interrupts can be worse in some cases.
d18edcb2 6648 */
c04cb347 6649 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
d18edcb2
MC
6650 if (tg3_irq_sync(tp))
6651 goto out;
6652 sblk->status &= ~SD_STATUS_UPDATED;
17375d25 6653 if (likely(tg3_has_work(tnapi))) {
72334482 6654 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
09943a18 6655 napi_schedule(&tnapi->napi);
d18edcb2
MC
6656 } else {
6657 /* No work, shared interrupt perhaps? re-enable
6658 * interrupts, and flush that PCI write
6659 */
6660 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
6661 0x00000000);
fac9b83e 6662 }
f47c11ee 6663out:
fac9b83e
DM
6664 return IRQ_RETVAL(handled);
6665}
6666
7d12e780 6667static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
fac9b83e 6668{
09943a18
MC
6669 struct tg3_napi *tnapi = dev_id;
6670 struct tg3 *tp = tnapi->tp;
898a56f8 6671 struct tg3_hw_status *sblk = tnapi->hw_status;
fac9b83e
DM
6672 unsigned int handled = 1;
6673
fac9b83e
DM
6674 /* In INTx mode, it is possible for the interrupt to arrive at
6675 * the CPU before the status block posted prior to the interrupt.
6676 * Reading the PCI State register will confirm whether the
6677 * interrupt is ours and will flush the status block.
6678 */
898a56f8 6679 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
63c3a66f 6680 if (tg3_flag(tp, CHIP_RESETTING) ||
d18edcb2
MC
6681 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6682 handled = 0;
f47c11ee 6683 goto out;
1da177e4 6684 }
d18edcb2
MC
6685 }
6686
6687 /*
6688 * writing any value to intr-mbox-0 clears PCI INTA# and
6689 * chip-internal interrupt pending events.
6690 * writing non-zero to intr-mbox-0 additional tells the
6691 * NIC to stop sending us irqs, engaging "in-intr-handler"
6692 * event coalescing.
c04cb347
MC
6693 *
6694 * Flush the mailbox to de-assert the IRQ immediately to prevent
6695 * spurious interrupts. The flush impacts performance but
6696 * excessive spurious interrupts can be worse in some cases.
d18edcb2 6697 */
c04cb347 6698 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
624f8e50
MC
6699
6700 /*
6701 * In a shared interrupt configuration, sometimes other devices'
6702 * interrupts will scream. We record the current status tag here
6703 * so that the above check can report that the screaming interrupts
6704 * are unhandled. Eventually they will be silenced.
6705 */
898a56f8 6706 tnapi->last_irq_tag = sblk->status_tag;
624f8e50 6707
d18edcb2
MC
6708 if (tg3_irq_sync(tp))
6709 goto out;
624f8e50 6710
72334482 6711 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
624f8e50 6712
09943a18 6713 napi_schedule(&tnapi->napi);
624f8e50 6714
f47c11ee 6715out:
1da177e4
LT
6716 return IRQ_RETVAL(handled);
6717}
6718
7938109f 6719/* ISR for interrupt test */
7d12e780 6720static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7938109f 6721{
09943a18
MC
6722 struct tg3_napi *tnapi = dev_id;
6723 struct tg3 *tp = tnapi->tp;
898a56f8 6724 struct tg3_hw_status *sblk = tnapi->hw_status;
7938109f 6725
f9804ddb
MC
6726 if ((sblk->status & SD_STATUS_UPDATED) ||
6727 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
b16250e3 6728 tg3_disable_ints(tp);
7938109f
MC
6729 return IRQ_RETVAL(1);
6730 }
6731 return IRQ_RETVAL(0);
6732}
6733
1da177e4
LT
6734#ifdef CONFIG_NET_POLL_CONTROLLER
6735static void tg3_poll_controller(struct net_device *dev)
6736{
4f125f42 6737 int i;
88b06bc2
MC
6738 struct tg3 *tp = netdev_priv(dev);
6739
4f125f42 6740 for (i = 0; i < tp->irq_cnt; i++)
fe234f0e 6741 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
1da177e4
LT
6742}
6743#endif
6744
1da177e4
LT
6745static void tg3_tx_timeout(struct net_device *dev)
6746{
6747 struct tg3 *tp = netdev_priv(dev);
6748
b0408751 6749 if (netif_msg_tx_err(tp)) {
05dbe005 6750 netdev_err(dev, "transmit timed out, resetting\n");
97bd8e49 6751 tg3_dump_state(tp);
b0408751 6752 }
1da177e4 6753
db219973 6754 tg3_reset_task_schedule(tp);
1da177e4
LT
6755}
6756
c58ec932
MC
6757/* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
6758static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
6759{
6760 u32 base = (u32) mapping & 0xffffffff;
6761
807540ba 6762 return (base > 0xffffdcc0) && (base + len + 8 < base);
c58ec932
MC
6763}
6764
72f2afb8
MC
6765/* Test for DMA addresses > 40-bit */
6766static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
6767 int len)
6768{
6769#if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
63c3a66f 6770 if (tg3_flag(tp, 40BIT_DMA_BUG))
807540ba 6771 return ((u64) mapping + len) > DMA_BIT_MASK(40);
72f2afb8
MC
6772 return 0;
6773#else
6774 return 0;
6775#endif
6776}
6777
d1a3b737 6778static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
92cd3a17
MC
6779 dma_addr_t mapping, u32 len, u32 flags,
6780 u32 mss, u32 vlan)
2ffcc981 6781{
92cd3a17
MC
6782 txbd->addr_hi = ((u64) mapping >> 32);
6783 txbd->addr_lo = ((u64) mapping & 0xffffffff);
6784 txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
6785 txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
2ffcc981 6786}
1da177e4 6787
84b67b27 6788static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
d1a3b737
MC
6789 dma_addr_t map, u32 len, u32 flags,
6790 u32 mss, u32 vlan)
6791{
6792 struct tg3 *tp = tnapi->tp;
6793 bool hwbug = false;
6794
6795 if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
3db1cd5c 6796 hwbug = true;
d1a3b737
MC
6797
6798 if (tg3_4g_overflow_test(map, len))
3db1cd5c 6799 hwbug = true;
d1a3b737
MC
6800
6801 if (tg3_40bit_overflow_test(tp, map, len))
3db1cd5c 6802 hwbug = true;
d1a3b737 6803
a4cb428d 6804 if (tp->dma_limit) {
b9e45482 6805 u32 prvidx = *entry;
e31aa987 6806 u32 tmp_flag = flags & ~TXD_FLAG_END;
a4cb428d
MC
6807 while (len > tp->dma_limit && *budget) {
6808 u32 frag_len = tp->dma_limit;
6809 len -= tp->dma_limit;
e31aa987 6810
b9e45482
MC
6811 /* Avoid the 8byte DMA problem */
6812 if (len <= 8) {
a4cb428d
MC
6813 len += tp->dma_limit / 2;
6814 frag_len = tp->dma_limit / 2;
e31aa987
MC
6815 }
6816
b9e45482
MC
6817 tnapi->tx_buffers[*entry].fragmented = true;
6818
6819 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6820 frag_len, tmp_flag, mss, vlan);
6821 *budget -= 1;
6822 prvidx = *entry;
6823 *entry = NEXT_TX(*entry);
6824
e31aa987
MC
6825 map += frag_len;
6826 }
6827
6828 if (len) {
6829 if (*budget) {
6830 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6831 len, flags, mss, vlan);
b9e45482 6832 *budget -= 1;
e31aa987
MC
6833 *entry = NEXT_TX(*entry);
6834 } else {
3db1cd5c 6835 hwbug = true;
b9e45482 6836 tnapi->tx_buffers[prvidx].fragmented = false;
e31aa987
MC
6837 }
6838 }
6839 } else {
84b67b27
MC
6840 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6841 len, flags, mss, vlan);
e31aa987
MC
6842 *entry = NEXT_TX(*entry);
6843 }
d1a3b737
MC
6844
6845 return hwbug;
6846}
6847
0d681b27 6848static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
432aa7ed
MC
6849{
6850 int i;
0d681b27 6851 struct sk_buff *skb;
df8944cf 6852 struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
432aa7ed 6853
0d681b27
MC
6854 skb = txb->skb;
6855 txb->skb = NULL;
6856
432aa7ed
MC
6857 pci_unmap_single(tnapi->tp->pdev,
6858 dma_unmap_addr(txb, mapping),
6859 skb_headlen(skb),
6860 PCI_DMA_TODEVICE);
e01ee14d
MC
6861
6862 while (txb->fragmented) {
6863 txb->fragmented = false;
6864 entry = NEXT_TX(entry);
6865 txb = &tnapi->tx_buffers[entry];
6866 }
6867
ba1142e4 6868 for (i = 0; i <= last; i++) {
9e903e08 6869 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
432aa7ed
MC
6870
6871 entry = NEXT_TX(entry);
6872 txb = &tnapi->tx_buffers[entry];
6873
6874 pci_unmap_page(tnapi->tp->pdev,
6875 dma_unmap_addr(txb, mapping),
9e903e08 6876 skb_frag_size(frag), PCI_DMA_TODEVICE);
e01ee14d
MC
6877
6878 while (txb->fragmented) {
6879 txb->fragmented = false;
6880 entry = NEXT_TX(entry);
6881 txb = &tnapi->tx_buffers[entry];
6882 }
432aa7ed
MC
6883 }
6884}
6885
72f2afb8 6886/* Workaround 4GB and 40-bit hardware DMA bugs. */
24f4efd4 6887static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
f7ff1987 6888 struct sk_buff **pskb,
84b67b27 6889 u32 *entry, u32 *budget,
92cd3a17 6890 u32 base_flags, u32 mss, u32 vlan)
1da177e4 6891{
24f4efd4 6892 struct tg3 *tp = tnapi->tp;
f7ff1987 6893 struct sk_buff *new_skb, *skb = *pskb;
c58ec932 6894 dma_addr_t new_addr = 0;
432aa7ed 6895 int ret = 0;
1da177e4 6896
41588ba1
MC
6897 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
6898 new_skb = skb_copy(skb, GFP_ATOMIC);
6899 else {
6900 int more_headroom = 4 - ((unsigned long)skb->data & 3);
6901
6902 new_skb = skb_copy_expand(skb,
6903 skb_headroom(skb) + more_headroom,
6904 skb_tailroom(skb), GFP_ATOMIC);
6905 }
6906
1da177e4 6907 if (!new_skb) {
c58ec932
MC
6908 ret = -1;
6909 } else {
6910 /* New SKB is guaranteed to be linear. */
f4188d8a
AD
6911 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
6912 PCI_DMA_TODEVICE);
6913 /* Make sure the mapping succeeded */
6914 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
f4188d8a 6915 dev_kfree_skb(new_skb);
c58ec932 6916 ret = -1;
c58ec932 6917 } else {
b9e45482
MC
6918 u32 save_entry = *entry;
6919
92cd3a17
MC
6920 base_flags |= TXD_FLAG_END;
6921
84b67b27
MC
6922 tnapi->tx_buffers[*entry].skb = new_skb;
6923 dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
432aa7ed
MC
6924 mapping, new_addr);
6925
84b67b27 6926 if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
d1a3b737
MC
6927 new_skb->len, base_flags,
6928 mss, vlan)) {
ba1142e4 6929 tg3_tx_skb_unmap(tnapi, save_entry, -1);
d1a3b737
MC
6930 dev_kfree_skb(new_skb);
6931 ret = -1;
6932 }
f4188d8a 6933 }
1da177e4
LT
6934 }
6935
6936 dev_kfree_skb(skb);
f7ff1987 6937 *pskb = new_skb;
c58ec932 6938 return ret;
1da177e4
LT
6939}
6940
2ffcc981 6941static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
52c0fd83
MC
6942
6943/* Use GSO to workaround a rare TSO bug that may be triggered when the
6944 * TSO header is greater than 80 bytes.
6945 */
6946static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
6947{
6948 struct sk_buff *segs, *nskb;
f3f3f27e 6949 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
52c0fd83
MC
6950
6951 /* Estimate the number of fragments in the worst case */
f3f3f27e 6952 if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
52c0fd83 6953 netif_stop_queue(tp->dev);
f65aac16
MC
6954
6955 /* netif_tx_stop_queue() must be done before checking
6956 * checking tx index in tg3_tx_avail() below, because in
6957 * tg3_tx(), we update tx index before checking for
6958 * netif_tx_queue_stopped().
6959 */
6960 smp_mb();
f3f3f27e 6961 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
7f62ad5d
MC
6962 return NETDEV_TX_BUSY;
6963
6964 netif_wake_queue(tp->dev);
52c0fd83
MC
6965 }
6966
6967 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
801678c5 6968 if (IS_ERR(segs))
52c0fd83
MC
6969 goto tg3_tso_bug_end;
6970
6971 do {
6972 nskb = segs;
6973 segs = segs->next;
6974 nskb->next = NULL;
2ffcc981 6975 tg3_start_xmit(nskb, tp->dev);
52c0fd83
MC
6976 } while (segs);
6977
6978tg3_tso_bug_end:
6979 dev_kfree_skb(skb);
6980
6981 return NETDEV_TX_OK;
6982}
52c0fd83 6983
5a6f3074 6984/* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
63c3a66f 6985 * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
5a6f3074 6986 */
2ffcc981 6987static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
1da177e4
LT
6988{
6989 struct tg3 *tp = netdev_priv(dev);
92cd3a17 6990 u32 len, entry, base_flags, mss, vlan = 0;
84b67b27 6991 u32 budget;
432aa7ed 6992 int i = -1, would_hit_hwbug;
90079ce8 6993 dma_addr_t mapping;
24f4efd4
MC
6994 struct tg3_napi *tnapi;
6995 struct netdev_queue *txq;
432aa7ed 6996 unsigned int last;
f4188d8a 6997
24f4efd4
MC
6998 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
6999 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
63c3a66f 7000 if (tg3_flag(tp, ENABLE_TSS))
24f4efd4 7001 tnapi++;
1da177e4 7002
84b67b27
MC
7003 budget = tg3_tx_avail(tnapi);
7004
00b70504 7005 /* We are running in BH disabled context with netif_tx_lock
bea3348e 7006 * and TX reclaim runs via tp->napi.poll inside of a software
f47c11ee
DM
7007 * interrupt. Furthermore, IRQ processing runs lockless so we have
7008 * no IRQ context deadlocks to worry about either. Rejoice!
1da177e4 7009 */
84b67b27 7010 if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
24f4efd4
MC
7011 if (!netif_tx_queue_stopped(txq)) {
7012 netif_tx_stop_queue(txq);
1f064a87
SH
7013
7014 /* This is a hard error, log it. */
5129c3a3
MC
7015 netdev_err(dev,
7016 "BUG! Tx Ring full when queue awake!\n");
1f064a87 7017 }
1da177e4
LT
7018 return NETDEV_TX_BUSY;
7019 }
7020
f3f3f27e 7021 entry = tnapi->tx_prod;
1da177e4 7022 base_flags = 0;
84fa7933 7023 if (skb->ip_summed == CHECKSUM_PARTIAL)
1da177e4 7024 base_flags |= TXD_FLAG_TCPUDP_CSUM;
24f4efd4 7025
be98da6a
MC
7026 mss = skb_shinfo(skb)->gso_size;
7027 if (mss) {
eddc9ec5 7028 struct iphdr *iph;
34195c3d 7029 u32 tcp_opt_len, hdr_len;
1da177e4
LT
7030
7031 if (skb_header_cloned(skb) &&
48855432
ED
7032 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
7033 goto drop;
1da177e4 7034
34195c3d 7035 iph = ip_hdr(skb);
ab6a5bb6 7036 tcp_opt_len = tcp_optlen(skb);
1da177e4 7037
a5a11955 7038 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
34195c3d 7039
a5a11955 7040 if (!skb_is_gso_v6(skb)) {
34195c3d
MC
7041 iph->check = 0;
7042 iph->tot_len = htons(mss + hdr_len);
7043 }
7044
52c0fd83 7045 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
63c3a66f 7046 tg3_flag(tp, TSO_BUG))
de6f31eb 7047 return tg3_tso_bug(tp, skb);
52c0fd83 7048
1da177e4
LT
7049 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7050 TXD_FLAG_CPU_POST_DMA);
7051
63c3a66f
JP
7052 if (tg3_flag(tp, HW_TSO_1) ||
7053 tg3_flag(tp, HW_TSO_2) ||
7054 tg3_flag(tp, HW_TSO_3)) {
aa8223c7 7055 tcp_hdr(skb)->check = 0;
1da177e4 7056 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
aa8223c7
ACM
7057 } else
7058 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
7059 iph->daddr, 0,
7060 IPPROTO_TCP,
7061 0);
1da177e4 7062
63c3a66f 7063 if (tg3_flag(tp, HW_TSO_3)) {
615774fe
MC
7064 mss |= (hdr_len & 0xc) << 12;
7065 if (hdr_len & 0x10)
7066 base_flags |= 0x00000010;
7067 base_flags |= (hdr_len & 0x3e0) << 5;
63c3a66f 7068 } else if (tg3_flag(tp, HW_TSO_2))
92c6b8d1 7069 mss |= hdr_len << 9;
63c3a66f 7070 else if (tg3_flag(tp, HW_TSO_1) ||
92c6b8d1 7071 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
eddc9ec5 7072 if (tcp_opt_len || iph->ihl > 5) {
1da177e4
LT
7073 int tsflags;
7074
eddc9ec5 7075 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
1da177e4
LT
7076 mss |= (tsflags << 11);
7077 }
7078 } else {
eddc9ec5 7079 if (tcp_opt_len || iph->ihl > 5) {
1da177e4
LT
7080 int tsflags;
7081
eddc9ec5 7082 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
1da177e4
LT
7083 base_flags |= tsflags << 12;
7084 }
7085 }
7086 }
bf933c80 7087
93a700a9
MC
7088 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
7089 !mss && skb->len > VLAN_ETH_FRAME_LEN)
7090 base_flags |= TXD_FLAG_JMB_PKT;
7091
92cd3a17
MC
7092 if (vlan_tx_tag_present(skb)) {
7093 base_flags |= TXD_FLAG_VLAN;
7094 vlan = vlan_tx_tag_get(skb);
7095 }
1da177e4 7096
f4188d8a
AD
7097 len = skb_headlen(skb);
7098
7099 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
48855432
ED
7100 if (pci_dma_mapping_error(tp->pdev, mapping))
7101 goto drop;
7102
90079ce8 7103
f3f3f27e 7104 tnapi->tx_buffers[entry].skb = skb;
4e5e4f0d 7105 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
1da177e4
LT
7106
7107 would_hit_hwbug = 0;
7108
63c3a66f 7109 if (tg3_flag(tp, 5701_DMA_BUG))
c58ec932 7110 would_hit_hwbug = 1;
1da177e4 7111
84b67b27 7112 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
d1a3b737 7113 ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
ba1142e4 7114 mss, vlan)) {
d1a3b737 7115 would_hit_hwbug = 1;
ba1142e4 7116 } else if (skb_shinfo(skb)->nr_frags > 0) {
92cd3a17
MC
7117 u32 tmp_mss = mss;
7118
7119 if (!tg3_flag(tp, HW_TSO_1) &&
7120 !tg3_flag(tp, HW_TSO_2) &&
7121 !tg3_flag(tp, HW_TSO_3))
7122 tmp_mss = 0;
7123
c5665a53
MC
7124 /* Now loop through additional data
7125 * fragments, and queue them.
7126 */
1da177e4
LT
7127 last = skb_shinfo(skb)->nr_frags - 1;
7128 for (i = 0; i <= last; i++) {
7129 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7130
9e903e08 7131 len = skb_frag_size(frag);
dc234d0b 7132 mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
5d6bcdfe 7133 len, DMA_TO_DEVICE);
1da177e4 7134
f3f3f27e 7135 tnapi->tx_buffers[entry].skb = NULL;
4e5e4f0d 7136 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
f4188d8a 7137 mapping);
5d6bcdfe 7138 if (dma_mapping_error(&tp->pdev->dev, mapping))
f4188d8a 7139 goto dma_error;
1da177e4 7140
b9e45482
MC
7141 if (!budget ||
7142 tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
84b67b27
MC
7143 len, base_flags |
7144 ((i == last) ? TXD_FLAG_END : 0),
b9e45482 7145 tmp_mss, vlan)) {
72f2afb8 7146 would_hit_hwbug = 1;
b9e45482
MC
7147 break;
7148 }
1da177e4
LT
7149 }
7150 }
7151
7152 if (would_hit_hwbug) {
0d681b27 7153 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
1da177e4
LT
7154
7155 /* If the workaround fails due to memory/mapping
7156 * failure, silently drop this packet.
7157 */
84b67b27
MC
7158 entry = tnapi->tx_prod;
7159 budget = tg3_tx_avail(tnapi);
f7ff1987 7160 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
84b67b27 7161 base_flags, mss, vlan))
48855432 7162 goto drop_nofree;
1da177e4
LT
7163 }
7164
d515b450 7165 skb_tx_timestamp(skb);
5cb917bc 7166 netdev_tx_sent_queue(txq, skb->len);
d515b450 7167
6541b806
MC
7168 /* Sync BD data before updating mailbox */
7169 wmb();
7170
1da177e4 7171 /* Packets are ready, update Tx producer idx local and on card. */
24f4efd4 7172 tw32_tx_mbox(tnapi->prodmbox, entry);
1da177e4 7173
f3f3f27e
MC
7174 tnapi->tx_prod = entry;
7175 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
24f4efd4 7176 netif_tx_stop_queue(txq);
f65aac16
MC
7177
7178 /* netif_tx_stop_queue() must be done before checking
7179 * checking tx index in tg3_tx_avail() below, because in
7180 * tg3_tx(), we update tx index before checking for
7181 * netif_tx_queue_stopped().
7182 */
7183 smp_mb();
f3f3f27e 7184 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
24f4efd4 7185 netif_tx_wake_queue(txq);
51b91468 7186 }
1da177e4 7187
cdd0db05 7188 mmiowb();
1da177e4 7189 return NETDEV_TX_OK;
f4188d8a
AD
7190
7191dma_error:
ba1142e4 7192 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
432aa7ed 7193 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
48855432
ED
7194drop:
7195 dev_kfree_skb(skb);
7196drop_nofree:
7197 tp->tx_dropped++;
f4188d8a 7198 return NETDEV_TX_OK;
1da177e4
LT
7199}
7200
6e01b20b
MC
7201static void tg3_mac_loopback(struct tg3 *tp, bool enable)
7202{
7203 if (enable) {
7204 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
7205 MAC_MODE_PORT_MODE_MASK);
7206
7207 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
7208
7209 if (!tg3_flag(tp, 5705_PLUS))
7210 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7211
7212 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
7213 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
7214 else
7215 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7216 } else {
7217 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
7218
7219 if (tg3_flag(tp, 5705_PLUS) ||
7220 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
7221 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
7222 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
7223 }
7224
7225 tw32(MAC_MODE, tp->mac_mode);
7226 udelay(40);
7227}
7228
941ec90f 7229static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
5e5a7f37 7230{
941ec90f 7231 u32 val, bmcr, mac_mode, ptest = 0;
5e5a7f37
MC
7232
7233 tg3_phy_toggle_apd(tp, false);
7234 tg3_phy_toggle_automdix(tp, 0);
7235
941ec90f
MC
7236 if (extlpbk && tg3_phy_set_extloopbk(tp))
7237 return -EIO;
7238
7239 bmcr = BMCR_FULLDPLX;
5e5a7f37
MC
7240 switch (speed) {
7241 case SPEED_10:
7242 break;
7243 case SPEED_100:
7244 bmcr |= BMCR_SPEED100;
7245 break;
7246 case SPEED_1000:
7247 default:
7248 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
7249 speed = SPEED_100;
7250 bmcr |= BMCR_SPEED100;
7251 } else {
7252 speed = SPEED_1000;
7253 bmcr |= BMCR_SPEED1000;
7254 }
7255 }
7256
941ec90f
MC
7257 if (extlpbk) {
7258 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
7259 tg3_readphy(tp, MII_CTRL1000, &val);
7260 val |= CTL1000_AS_MASTER |
7261 CTL1000_ENABLE_MASTER;
7262 tg3_writephy(tp, MII_CTRL1000, val);
7263 } else {
7264 ptest = MII_TG3_FET_PTEST_TRIM_SEL |
7265 MII_TG3_FET_PTEST_TRIM_2;
7266 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
7267 }
7268 } else
7269 bmcr |= BMCR_LOOPBACK;
7270
5e5a7f37
MC
7271 tg3_writephy(tp, MII_BMCR, bmcr);
7272
7273 /* The write needs to be flushed for the FETs */
7274 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
7275 tg3_readphy(tp, MII_BMCR, &bmcr);
7276
7277 udelay(40);
7278
7279 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
7280 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
941ec90f 7281 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
5e5a7f37
MC
7282 MII_TG3_FET_PTEST_FRC_TX_LINK |
7283 MII_TG3_FET_PTEST_FRC_TX_LOCK);
7284
7285 /* The write needs to be flushed for the AC131 */
7286 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
7287 }
7288
7289 /* Reset to prevent losing 1st rx packet intermittently */
7290 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
7291 tg3_flag(tp, 5780_CLASS)) {
7292 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7293 udelay(10);
7294 tw32_f(MAC_RX_MODE, tp->rx_mode);
7295 }
7296
7297 mac_mode = tp->mac_mode &
7298 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
7299 if (speed == SPEED_1000)
7300 mac_mode |= MAC_MODE_PORT_MODE_GMII;
7301 else
7302 mac_mode |= MAC_MODE_PORT_MODE_MII;
7303
7304 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
7305 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
7306
7307 if (masked_phy_id == TG3_PHY_ID_BCM5401)
7308 mac_mode &= ~MAC_MODE_LINK_POLARITY;
7309 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
7310 mac_mode |= MAC_MODE_LINK_POLARITY;
7311
7312 tg3_writephy(tp, MII_TG3_EXT_CTRL,
7313 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
7314 }
7315
7316 tw32(MAC_MODE, mac_mode);
7317 udelay(40);
941ec90f
MC
7318
7319 return 0;
5e5a7f37
MC
7320}
7321
c8f44aff 7322static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
06c03c02
MB
7323{
7324 struct tg3 *tp = netdev_priv(dev);
7325
7326 if (features & NETIF_F_LOOPBACK) {
7327 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
7328 return;
7329
06c03c02 7330 spin_lock_bh(&tp->lock);
6e01b20b 7331 tg3_mac_loopback(tp, true);
06c03c02
MB
7332 netif_carrier_on(tp->dev);
7333 spin_unlock_bh(&tp->lock);
7334 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
7335 } else {
7336 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
7337 return;
7338
06c03c02 7339 spin_lock_bh(&tp->lock);
6e01b20b 7340 tg3_mac_loopback(tp, false);
06c03c02
MB
7341 /* Force link status check */
7342 tg3_setup_phy(tp, 1);
7343 spin_unlock_bh(&tp->lock);
7344 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
7345 }
7346}
7347
c8f44aff
MM
7348static netdev_features_t tg3_fix_features(struct net_device *dev,
7349 netdev_features_t features)
dc668910
MM
7350{
7351 struct tg3 *tp = netdev_priv(dev);
7352
63c3a66f 7353 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
dc668910
MM
7354 features &= ~NETIF_F_ALL_TSO;
7355
7356 return features;
7357}
7358
c8f44aff 7359static int tg3_set_features(struct net_device *dev, netdev_features_t features)
06c03c02 7360{
c8f44aff 7361 netdev_features_t changed = dev->features ^ features;
06c03c02
MB
7362
7363 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
7364 tg3_set_loopback(dev, features);
7365
7366 return 0;
7367}
7368
21f581a5
MC
7369static void tg3_rx_prodring_free(struct tg3 *tp,
7370 struct tg3_rx_prodring_set *tpr)
1da177e4 7371{
1da177e4
LT
7372 int i;
7373
8fea32b9 7374 if (tpr != &tp->napi[0].prodring) {
b196c7e4 7375 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
2c49a44d 7376 i = (i + 1) & tp->rx_std_ring_mask)
9205fd9c 7377 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
b196c7e4
MC
7378 tp->rx_pkt_map_sz);
7379
63c3a66f 7380 if (tg3_flag(tp, JUMBO_CAPABLE)) {
b196c7e4
MC
7381 for (i = tpr->rx_jmb_cons_idx;
7382 i != tpr->rx_jmb_prod_idx;
2c49a44d 7383 i = (i + 1) & tp->rx_jmb_ring_mask) {
9205fd9c 7384 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
b196c7e4
MC
7385 TG3_RX_JMB_MAP_SZ);
7386 }
7387 }
7388
2b2cdb65 7389 return;
b196c7e4 7390 }
1da177e4 7391
2c49a44d 7392 for (i = 0; i <= tp->rx_std_ring_mask; i++)
9205fd9c 7393 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
2b2cdb65 7394 tp->rx_pkt_map_sz);
1da177e4 7395
63c3a66f 7396 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
2c49a44d 7397 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
9205fd9c 7398 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
2b2cdb65 7399 TG3_RX_JMB_MAP_SZ);
1da177e4
LT
7400 }
7401}
7402
c6cdf436 7403/* Initialize rx rings for packet processing.
1da177e4
LT
7404 *
7405 * The chip has been shut down and the driver detached from
7406 * the networking, so no interrupts or new tx packets will
7407 * end up in the driver. tp->{tx,}lock are held and thus
7408 * we may not sleep.
7409 */
21f581a5
MC
7410static int tg3_rx_prodring_alloc(struct tg3 *tp,
7411 struct tg3_rx_prodring_set *tpr)
1da177e4 7412{
287be12e 7413 u32 i, rx_pkt_dma_sz;
1da177e4 7414
b196c7e4
MC
7415 tpr->rx_std_cons_idx = 0;
7416 tpr->rx_std_prod_idx = 0;
7417 tpr->rx_jmb_cons_idx = 0;
7418 tpr->rx_jmb_prod_idx = 0;
7419
8fea32b9 7420 if (tpr != &tp->napi[0].prodring) {
2c49a44d
MC
7421 memset(&tpr->rx_std_buffers[0], 0,
7422 TG3_RX_STD_BUFF_RING_SIZE(tp));
48035728 7423 if (tpr->rx_jmb_buffers)
2b2cdb65 7424 memset(&tpr->rx_jmb_buffers[0], 0,
2c49a44d 7425 TG3_RX_JMB_BUFF_RING_SIZE(tp));
2b2cdb65
MC
7426 goto done;
7427 }
7428
1da177e4 7429 /* Zero out all descriptors. */
2c49a44d 7430 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
1da177e4 7431
287be12e 7432 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
63c3a66f 7433 if (tg3_flag(tp, 5780_CLASS) &&
287be12e
MC
7434 tp->dev->mtu > ETH_DATA_LEN)
7435 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
7436 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
7e72aad4 7437
1da177e4
LT
7438 /* Initialize invariants of the rings, we only set this
7439 * stuff once. This works because the card does not
7440 * write into the rx buffer posting rings.
7441 */
2c49a44d 7442 for (i = 0; i <= tp->rx_std_ring_mask; i++) {
1da177e4
LT
7443 struct tg3_rx_buffer_desc *rxd;
7444
21f581a5 7445 rxd = &tpr->rx_std[i];
287be12e 7446 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
1da177e4
LT
7447 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
7448 rxd->opaque = (RXD_OPAQUE_RING_STD |
7449 (i << RXD_OPAQUE_INDEX_SHIFT));
7450 }
7451
1da177e4
LT
7452 /* Now allocate fresh SKBs for each rx ring. */
7453 for (i = 0; i < tp->rx_pending; i++) {
8d4057a9
ED
7454 unsigned int frag_size;
7455
7456 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
7457 &frag_size) < 0) {
5129c3a3
MC
7458 netdev_warn(tp->dev,
7459 "Using a smaller RX standard ring. Only "
7460 "%d out of %d buffers were allocated "
7461 "successfully\n", i, tp->rx_pending);
32d8c572 7462 if (i == 0)
cf7a7298 7463 goto initfail;
32d8c572 7464 tp->rx_pending = i;
1da177e4 7465 break;
32d8c572 7466 }
1da177e4
LT
7467 }
7468
63c3a66f 7469 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
cf7a7298
MC
7470 goto done;
7471
2c49a44d 7472 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
cf7a7298 7473
63c3a66f 7474 if (!tg3_flag(tp, JUMBO_RING_ENABLE))
0d86df80 7475 goto done;
cf7a7298 7476
2c49a44d 7477 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
0d86df80
MC
7478 struct tg3_rx_buffer_desc *rxd;
7479
7480 rxd = &tpr->rx_jmb[i].std;
7481 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
7482 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
7483 RXD_FLAG_JUMBO;
7484 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
7485 (i << RXD_OPAQUE_INDEX_SHIFT));
7486 }
7487
7488 for (i = 0; i < tp->rx_jumbo_pending; i++) {
8d4057a9
ED
7489 unsigned int frag_size;
7490
7491 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
7492 &frag_size) < 0) {
5129c3a3
MC
7493 netdev_warn(tp->dev,
7494 "Using a smaller RX jumbo ring. Only %d "
7495 "out of %d buffers were allocated "
7496 "successfully\n", i, tp->rx_jumbo_pending);
0d86df80
MC
7497 if (i == 0)
7498 goto initfail;
7499 tp->rx_jumbo_pending = i;
7500 break;
1da177e4
LT
7501 }
7502 }
cf7a7298
MC
7503
7504done:
32d8c572 7505 return 0;
cf7a7298
MC
7506
7507initfail:
21f581a5 7508 tg3_rx_prodring_free(tp, tpr);
cf7a7298 7509 return -ENOMEM;
1da177e4
LT
7510}
7511
21f581a5
MC
7512static void tg3_rx_prodring_fini(struct tg3 *tp,
7513 struct tg3_rx_prodring_set *tpr)
1da177e4 7514{
21f581a5
MC
7515 kfree(tpr->rx_std_buffers);
7516 tpr->rx_std_buffers = NULL;
7517 kfree(tpr->rx_jmb_buffers);
7518 tpr->rx_jmb_buffers = NULL;
7519 if (tpr->rx_std) {
4bae65c8
MC
7520 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
7521 tpr->rx_std, tpr->rx_std_mapping);
21f581a5 7522 tpr->rx_std = NULL;
1da177e4 7523 }
21f581a5 7524 if (tpr->rx_jmb) {
4bae65c8
MC
7525 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
7526 tpr->rx_jmb, tpr->rx_jmb_mapping);
21f581a5 7527 tpr->rx_jmb = NULL;
1da177e4 7528 }
cf7a7298
MC
7529}
7530
21f581a5
MC
7531static int tg3_rx_prodring_init(struct tg3 *tp,
7532 struct tg3_rx_prodring_set *tpr)
cf7a7298 7533{
2c49a44d
MC
7534 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
7535 GFP_KERNEL);
21f581a5 7536 if (!tpr->rx_std_buffers)
cf7a7298
MC
7537 return -ENOMEM;
7538
4bae65c8
MC
7539 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
7540 TG3_RX_STD_RING_BYTES(tp),
7541 &tpr->rx_std_mapping,
7542 GFP_KERNEL);
21f581a5 7543 if (!tpr->rx_std)
cf7a7298
MC
7544 goto err_out;
7545
63c3a66f 7546 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
2c49a44d 7547 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
21f581a5
MC
7548 GFP_KERNEL);
7549 if (!tpr->rx_jmb_buffers)
cf7a7298
MC
7550 goto err_out;
7551
4bae65c8
MC
7552 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
7553 TG3_RX_JMB_RING_BYTES(tp),
7554 &tpr->rx_jmb_mapping,
7555 GFP_KERNEL);
21f581a5 7556 if (!tpr->rx_jmb)
cf7a7298
MC
7557 goto err_out;
7558 }
7559
7560 return 0;
7561
7562err_out:
21f581a5 7563 tg3_rx_prodring_fini(tp, tpr);
cf7a7298
MC
7564 return -ENOMEM;
7565}
7566
7567/* Free up pending packets in all rx/tx rings.
7568 *
7569 * The chip has been shut down and the driver detached from
7570 * the networking, so no interrupts or new tx packets will
7571 * end up in the driver. tp->{tx,}lock is not held and we are not
7572 * in an interrupt context and thus may sleep.
7573 */
7574static void tg3_free_rings(struct tg3 *tp)
7575{
f77a6a8e 7576 int i, j;
cf7a7298 7577
f77a6a8e
MC
7578 for (j = 0; j < tp->irq_cnt; j++) {
7579 struct tg3_napi *tnapi = &tp->napi[j];
cf7a7298 7580
8fea32b9 7581 tg3_rx_prodring_free(tp, &tnapi->prodring);
b28f6428 7582
0c1d0e2b
MC
7583 if (!tnapi->tx_buffers)
7584 continue;
7585
0d681b27
MC
7586 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
7587 struct sk_buff *skb = tnapi->tx_buffers[i].skb;
cf7a7298 7588
0d681b27 7589 if (!skb)
f77a6a8e 7590 continue;
cf7a7298 7591
ba1142e4
MC
7592 tg3_tx_skb_unmap(tnapi, i,
7593 skb_shinfo(skb)->nr_frags - 1);
f77a6a8e
MC
7594
7595 dev_kfree_skb_any(skb);
7596 }
5cb917bc 7597 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
2b2cdb65 7598 }
cf7a7298
MC
7599}
7600
7601/* Initialize tx/rx rings for packet processing.
7602 *
7603 * The chip has been shut down and the driver detached from
7604 * the networking, so no interrupts or new tx packets will
7605 * end up in the driver. tp->{tx,}lock are held and thus
7606 * we may not sleep.
7607 */
7608static int tg3_init_rings(struct tg3 *tp)
7609{
f77a6a8e 7610 int i;
72334482 7611
cf7a7298
MC
7612 /* Free up all the SKBs. */
7613 tg3_free_rings(tp);
7614
f77a6a8e
MC
7615 for (i = 0; i < tp->irq_cnt; i++) {
7616 struct tg3_napi *tnapi = &tp->napi[i];
7617
7618 tnapi->last_tag = 0;
7619 tnapi->last_irq_tag = 0;
7620 tnapi->hw_status->status = 0;
7621 tnapi->hw_status->status_tag = 0;
7622 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
cf7a7298 7623
f77a6a8e
MC
7624 tnapi->tx_prod = 0;
7625 tnapi->tx_cons = 0;
0c1d0e2b
MC
7626 if (tnapi->tx_ring)
7627 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
f77a6a8e
MC
7628
7629 tnapi->rx_rcb_ptr = 0;
0c1d0e2b
MC
7630 if (tnapi->rx_rcb)
7631 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
2b2cdb65 7632
8fea32b9 7633 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
e4af1af9 7634 tg3_free_rings(tp);
2b2cdb65 7635 return -ENOMEM;
e4af1af9 7636 }
f77a6a8e 7637 }
72334482 7638
2b2cdb65 7639 return 0;
cf7a7298
MC
7640}
7641
49a359e3 7642static void tg3_mem_tx_release(struct tg3 *tp)
cf7a7298 7643{
f77a6a8e 7644 int i;
898a56f8 7645
49a359e3 7646 for (i = 0; i < tp->irq_max; i++) {
f77a6a8e
MC
7647 struct tg3_napi *tnapi = &tp->napi[i];
7648
7649 if (tnapi->tx_ring) {
4bae65c8 7650 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
f77a6a8e
MC
7651 tnapi->tx_ring, tnapi->tx_desc_mapping);
7652 tnapi->tx_ring = NULL;
7653 }
7654
7655 kfree(tnapi->tx_buffers);
7656 tnapi->tx_buffers = NULL;
49a359e3
MC
7657 }
7658}
f77a6a8e 7659
49a359e3
MC
7660static int tg3_mem_tx_acquire(struct tg3 *tp)
7661{
7662 int i;
7663 struct tg3_napi *tnapi = &tp->napi[0];
7664
7665 /* If multivector TSS is enabled, vector 0 does not handle
7666 * tx interrupts. Don't allocate any resources for it.
7667 */
7668 if (tg3_flag(tp, ENABLE_TSS))
7669 tnapi++;
7670
7671 for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
7672 tnapi->tx_buffers = kzalloc(sizeof(struct tg3_tx_ring_info) *
7673 TG3_TX_RING_SIZE, GFP_KERNEL);
7674 if (!tnapi->tx_buffers)
7675 goto err_out;
7676
7677 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
7678 TG3_TX_RING_BYTES,
7679 &tnapi->tx_desc_mapping,
7680 GFP_KERNEL);
7681 if (!tnapi->tx_ring)
7682 goto err_out;
7683 }
7684
7685 return 0;
7686
7687err_out:
7688 tg3_mem_tx_release(tp);
7689 return -ENOMEM;
7690}
7691
7692static void tg3_mem_rx_release(struct tg3 *tp)
7693{
7694 int i;
7695
7696 for (i = 0; i < tp->irq_max; i++) {
7697 struct tg3_napi *tnapi = &tp->napi[i];
f77a6a8e 7698
8fea32b9
MC
7699 tg3_rx_prodring_fini(tp, &tnapi->prodring);
7700
49a359e3
MC
7701 if (!tnapi->rx_rcb)
7702 continue;
7703
7704 dma_free_coherent(&tp->pdev->dev,
7705 TG3_RX_RCB_RING_BYTES(tp),
7706 tnapi->rx_rcb,
7707 tnapi->rx_rcb_mapping);
7708 tnapi->rx_rcb = NULL;
7709 }
7710}
7711
7712static int tg3_mem_rx_acquire(struct tg3 *tp)
7713{
7714 unsigned int i, limit;
7715
7716 limit = tp->rxq_cnt;
7717
7718 /* If RSS is enabled, we need a (dummy) producer ring
7719 * set on vector zero. This is the true hw prodring.
7720 */
7721 if (tg3_flag(tp, ENABLE_RSS))
7722 limit++;
7723
7724 for (i = 0; i < limit; i++) {
7725 struct tg3_napi *tnapi = &tp->napi[i];
7726
7727 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
7728 goto err_out;
7729
7730 /* If multivector RSS is enabled, vector 0
7731 * does not handle rx or tx interrupts.
7732 * Don't allocate any resources for it.
7733 */
7734 if (!i && tg3_flag(tp, ENABLE_RSS))
7735 continue;
7736
7737 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
7738 TG3_RX_RCB_RING_BYTES(tp),
7739 &tnapi->rx_rcb_mapping,
7740 GFP_KERNEL);
7741 if (!tnapi->rx_rcb)
7742 goto err_out;
7743
7744 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7745 }
7746
7747 return 0;
7748
7749err_out:
7750 tg3_mem_rx_release(tp);
7751 return -ENOMEM;
7752}
7753
7754/*
7755 * Must not be invoked with interrupt sources disabled and
7756 * the hardware shutdown down.
7757 */
7758static void tg3_free_consistent(struct tg3 *tp)
7759{
7760 int i;
7761
7762 for (i = 0; i < tp->irq_cnt; i++) {
7763 struct tg3_napi *tnapi = &tp->napi[i];
7764
f77a6a8e 7765 if (tnapi->hw_status) {
4bae65c8
MC
7766 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
7767 tnapi->hw_status,
7768 tnapi->status_mapping);
f77a6a8e
MC
7769 tnapi->hw_status = NULL;
7770 }
1da177e4 7771 }
f77a6a8e 7772
49a359e3
MC
7773 tg3_mem_rx_release(tp);
7774 tg3_mem_tx_release(tp);
7775
1da177e4 7776 if (tp->hw_stats) {
4bae65c8
MC
7777 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
7778 tp->hw_stats, tp->stats_mapping);
1da177e4
LT
7779 tp->hw_stats = NULL;
7780 }
7781}
7782
7783/*
7784 * Must not be invoked with interrupt sources disabled and
7785 * the hardware shutdown down. Can sleep.
7786 */
7787static int tg3_alloc_consistent(struct tg3 *tp)
7788{
f77a6a8e 7789 int i;
898a56f8 7790
4bae65c8
MC
7791 tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
7792 sizeof(struct tg3_hw_stats),
7793 &tp->stats_mapping,
7794 GFP_KERNEL);
f77a6a8e 7795 if (!tp->hw_stats)
1da177e4
LT
7796 goto err_out;
7797
f77a6a8e 7798 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
1da177e4 7799
f77a6a8e
MC
7800 for (i = 0; i < tp->irq_cnt; i++) {
7801 struct tg3_napi *tnapi = &tp->napi[i];
8d9d7cfc 7802 struct tg3_hw_status *sblk;
1da177e4 7803
4bae65c8
MC
7804 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
7805 TG3_HW_STATUS_SIZE,
7806 &tnapi->status_mapping,
7807 GFP_KERNEL);
f77a6a8e
MC
7808 if (!tnapi->hw_status)
7809 goto err_out;
898a56f8 7810
f77a6a8e 7811 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8d9d7cfc
MC
7812 sblk = tnapi->hw_status;
7813
49a359e3 7814 if (tg3_flag(tp, ENABLE_RSS)) {
86449944 7815 u16 *prodptr = NULL;
8fea32b9 7816
49a359e3
MC
7817 /*
7818 * When RSS is enabled, the status block format changes
7819 * slightly. The "rx_jumbo_consumer", "reserved",
7820 * and "rx_mini_consumer" members get mapped to the
7821 * other three rx return ring producer indexes.
7822 */
7823 switch (i) {
7824 case 1:
7825 prodptr = &sblk->idx[0].rx_producer;
7826 break;
7827 case 2:
7828 prodptr = &sblk->rx_jumbo_consumer;
7829 break;
7830 case 3:
7831 prodptr = &sblk->reserved;
7832 break;
7833 case 4:
7834 prodptr = &sblk->rx_mini_consumer;
f891ea16
MC
7835 break;
7836 }
49a359e3
MC
7837 tnapi->rx_rcb_prod_idx = prodptr;
7838 } else {
8d9d7cfc 7839 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8d9d7cfc 7840 }
f77a6a8e 7841 }
1da177e4 7842
49a359e3
MC
7843 if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
7844 goto err_out;
7845
1da177e4
LT
7846 return 0;
7847
7848err_out:
7849 tg3_free_consistent(tp);
7850 return -ENOMEM;
7851}
7852
7853#define MAX_WAIT_CNT 1000
7854
7855/* To stop a block, clear the enable bit and poll till it
7856 * clears. tp->lock is held.
7857 */
b3b7d6be 7858static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
1da177e4
LT
7859{
7860 unsigned int i;
7861 u32 val;
7862
63c3a66f 7863 if (tg3_flag(tp, 5705_PLUS)) {
1da177e4
LT
7864 switch (ofs) {
7865 case RCVLSC_MODE:
7866 case DMAC_MODE:
7867 case MBFREE_MODE:
7868 case BUFMGR_MODE:
7869 case MEMARB_MODE:
7870 /* We can't enable/disable these bits of the
7871 * 5705/5750, just say success.
7872 */
7873 return 0;
7874
7875 default:
7876 break;
855e1111 7877 }
1da177e4
LT
7878 }
7879
7880 val = tr32(ofs);
7881 val &= ~enable_bit;
7882 tw32_f(ofs, val);
7883
7884 for (i = 0; i < MAX_WAIT_CNT; i++) {
7885 udelay(100);
7886 val = tr32(ofs);
7887 if ((val & enable_bit) == 0)
7888 break;
7889 }
7890
b3b7d6be 7891 if (i == MAX_WAIT_CNT && !silent) {
2445e461
MC
7892 dev_err(&tp->pdev->dev,
7893 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
7894 ofs, enable_bit);
1da177e4
LT
7895 return -ENODEV;
7896 }
7897
7898 return 0;
7899}
7900
7901/* tp->lock is held. */
b3b7d6be 7902static int tg3_abort_hw(struct tg3 *tp, int silent)
1da177e4
LT
7903{
7904 int i, err;
7905
7906 tg3_disable_ints(tp);
7907
7908 tp->rx_mode &= ~RX_MODE_ENABLE;
7909 tw32_f(MAC_RX_MODE, tp->rx_mode);
7910 udelay(10);
7911
b3b7d6be
DM
7912 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
7913 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
7914 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
7915 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
7916 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
7917 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
7918
7919 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
7920 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
7921 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
7922 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
7923 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
7924 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
7925 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
1da177e4
LT
7926
7927 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
7928 tw32_f(MAC_MODE, tp->mac_mode);
7929 udelay(40);
7930
7931 tp->tx_mode &= ~TX_MODE_ENABLE;
7932 tw32_f(MAC_TX_MODE, tp->tx_mode);
7933
7934 for (i = 0; i < MAX_WAIT_CNT; i++) {
7935 udelay(100);
7936 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
7937 break;
7938 }
7939 if (i >= MAX_WAIT_CNT) {
ab96b241
MC
7940 dev_err(&tp->pdev->dev,
7941 "%s timed out, TX_MODE_ENABLE will not clear "
7942 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
e6de8ad1 7943 err |= -ENODEV;
1da177e4
LT
7944 }
7945
e6de8ad1 7946 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
b3b7d6be
DM
7947 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
7948 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
1da177e4
LT
7949
7950 tw32(FTQ_RESET, 0xffffffff);
7951 tw32(FTQ_RESET, 0x00000000);
7952
b3b7d6be
DM
7953 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
7954 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
1da177e4 7955
f77a6a8e
MC
7956 for (i = 0; i < tp->irq_cnt; i++) {
7957 struct tg3_napi *tnapi = &tp->napi[i];
7958 if (tnapi->hw_status)
7959 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7960 }
1da177e4 7961
1da177e4
LT
7962 return err;
7963}
7964
ee6a99b5
MC
7965/* Save PCI command register before chip reset */
7966static void tg3_save_pci_state(struct tg3 *tp)
7967{
8a6eac90 7968 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
ee6a99b5
MC
7969}
7970
7971/* Restore PCI state after chip reset */
7972static void tg3_restore_pci_state(struct tg3 *tp)
7973{
7974 u32 val;
7975
7976 /* Re-enable indirect register accesses. */
7977 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7978 tp->misc_host_ctrl);
7979
7980 /* Set MAX PCI retry to zero. */
7981 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
7982 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
63c3a66f 7983 tg3_flag(tp, PCIX_MODE))
ee6a99b5 7984 val |= PCISTATE_RETRY_SAME_DMA;
0d3031d9 7985 /* Allow reads and writes to the APE register and memory space. */
63c3a66f 7986 if (tg3_flag(tp, ENABLE_APE))
0d3031d9 7987 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
f92d9dc1
MC
7988 PCISTATE_ALLOW_APE_SHMEM_WR |
7989 PCISTATE_ALLOW_APE_PSPACE_WR;
ee6a99b5
MC
7990 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
7991
8a6eac90 7992 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
ee6a99b5 7993
2c55a3d0
MC
7994 if (!tg3_flag(tp, PCI_EXPRESS)) {
7995 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
7996 tp->pci_cacheline_sz);
7997 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
7998 tp->pci_lat_timer);
114342f2 7999 }
5f5c51e3 8000
ee6a99b5 8001 /* Make sure PCI-X relaxed ordering bit is clear. */
63c3a66f 8002 if (tg3_flag(tp, PCIX_MODE)) {
9974a356
MC
8003 u16 pcix_cmd;
8004
8005 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8006 &pcix_cmd);
8007 pcix_cmd &= ~PCI_X_CMD_ERO;
8008 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8009 pcix_cmd);
8010 }
ee6a99b5 8011
63c3a66f 8012 if (tg3_flag(tp, 5780_CLASS)) {
ee6a99b5
MC
8013
8014 /* Chip reset on 5780 will reset MSI enable bit,
8015 * so need to restore it.
8016 */
63c3a66f 8017 if (tg3_flag(tp, USING_MSI)) {
ee6a99b5
MC
8018 u16 ctrl;
8019
8020 pci_read_config_word(tp->pdev,
8021 tp->msi_cap + PCI_MSI_FLAGS,
8022 &ctrl);
8023 pci_write_config_word(tp->pdev,
8024 tp->msi_cap + PCI_MSI_FLAGS,
8025 ctrl | PCI_MSI_FLAGS_ENABLE);
8026 val = tr32(MSGINT_MODE);
8027 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
8028 }
8029 }
8030}
8031
1da177e4
LT
8032/* tp->lock is held. */
8033static int tg3_chip_reset(struct tg3 *tp)
8034{
8035 u32 val;
1ee582d8 8036 void (*write_op)(struct tg3 *, u32, u32);
4f125f42 8037 int i, err;
1da177e4 8038
f49639e6
DM
8039 tg3_nvram_lock(tp);
8040
77b483f1
MC
8041 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
8042
f49639e6
DM
8043 /* No matching tg3_nvram_unlock() after this because
8044 * chip reset below will undo the nvram lock.
8045 */
8046 tp->nvram_lock_cnt = 0;
1da177e4 8047
ee6a99b5
MC
8048 /* GRC_MISC_CFG core clock reset will clear the memory
8049 * enable bit in PCI register 4 and the MSI enable bit
8050 * on some chips, so we save relevant registers here.
8051 */
8052 tg3_save_pci_state(tp);
8053
d9ab5ad1 8054 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
63c3a66f 8055 tg3_flag(tp, 5755_PLUS))
d9ab5ad1
MC
8056 tw32(GRC_FASTBOOT_PC, 0);
8057
1da177e4
LT
8058 /*
8059 * We must avoid the readl() that normally takes place.
8060 * It locks machines, causes machine checks, and other
8061 * fun things. So, temporarily disable the 5701
8062 * hardware workaround, while we do the reset.
8063 */
1ee582d8
MC
8064 write_op = tp->write32;
8065 if (write_op == tg3_write_flush_reg32)
8066 tp->write32 = tg3_write32;
1da177e4 8067
d18edcb2
MC
8068 /* Prevent the irq handler from reading or writing PCI registers
8069 * during chip reset when the memory enable bit in the PCI command
8070 * register may be cleared. The chip does not generate interrupt
8071 * at this time, but the irq handler may still be called due to irq
8072 * sharing or irqpoll.
8073 */
63c3a66f 8074 tg3_flag_set(tp, CHIP_RESETTING);
f77a6a8e
MC
8075 for (i = 0; i < tp->irq_cnt; i++) {
8076 struct tg3_napi *tnapi = &tp->napi[i];
8077 if (tnapi->hw_status) {
8078 tnapi->hw_status->status = 0;
8079 tnapi->hw_status->status_tag = 0;
8080 }
8081 tnapi->last_tag = 0;
8082 tnapi->last_irq_tag = 0;
b8fa2f3a 8083 }
d18edcb2 8084 smp_mb();
4f125f42
MC
8085
8086 for (i = 0; i < tp->irq_cnt; i++)
8087 synchronize_irq(tp->napi[i].irq_vec);
d18edcb2 8088
255ca311
MC
8089 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
8090 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8091 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8092 }
8093
1da177e4
LT
8094 /* do the reset */
8095 val = GRC_MISC_CFG_CORECLK_RESET;
8096
63c3a66f 8097 if (tg3_flag(tp, PCI_EXPRESS)) {
88075d91
MC
8098 /* Force PCIe 1.0a mode */
8099 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
63c3a66f 8100 !tg3_flag(tp, 57765_PLUS) &&
88075d91
MC
8101 tr32(TG3_PCIE_PHY_TSTCTL) ==
8102 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
8103 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
8104
1da177e4
LT
8105 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
8106 tw32(GRC_MISC_CFG, (1 << 29));
8107 val |= (1 << 29);
8108 }
8109 }
8110
b5d3772c
MC
8111 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
8112 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
8113 tw32(GRC_VCPU_EXT_CTRL,
8114 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
8115 }
8116
f37500d3 8117 /* Manage gphy power for all CPMU absent PCIe devices. */
63c3a66f 8118 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
1da177e4 8119 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
f37500d3 8120
1da177e4
LT
8121 tw32(GRC_MISC_CFG, val);
8122
1ee582d8
MC
8123 /* restore 5701 hardware bug workaround write method */
8124 tp->write32 = write_op;
1da177e4
LT
8125
8126 /* Unfortunately, we have to delay before the PCI read back.
8127 * Some 575X chips even will not respond to a PCI cfg access
8128 * when the reset command is given to the chip.
8129 *
8130 * How do these hardware designers expect things to work
8131 * properly if the PCI write is posted for a long period
8132 * of time? It is always necessary to have some method by
8133 * which a register read back can occur to push the write
8134 * out which does the reset.
8135 *
8136 * For most tg3 variants the trick below was working.
8137 * Ho hum...
8138 */
8139 udelay(120);
8140
8141 /* Flush PCI posted writes. The normal MMIO registers
8142 * are inaccessible at this time so this is the only
8143 * way to make this reliably (actually, this is no longer
8144 * the case, see above). I tried to use indirect
8145 * register read/write but this upset some 5701 variants.
8146 */
8147 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
8148
8149 udelay(120);
8150
0f49bfbd 8151 if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
e7126997
MC
8152 u16 val16;
8153
1da177e4 8154 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
86449944 8155 int j;
1da177e4
LT
8156 u32 cfg_val;
8157
8158 /* Wait for link training to complete. */
86449944 8159 for (j = 0; j < 5000; j++)
1da177e4
LT
8160 udelay(100);
8161
8162 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
8163 pci_write_config_dword(tp->pdev, 0xc4,
8164 cfg_val | (1 << 15));
8165 }
5e7dfd0f 8166
e7126997 8167 /* Clear the "no snoop" and "relaxed ordering" bits. */
0f49bfbd 8168 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
e7126997
MC
8169 /*
8170 * Older PCIe devices only support the 128 byte
8171 * MPS setting. Enforce the restriction.
5e7dfd0f 8172 */
63c3a66f 8173 if (!tg3_flag(tp, CPMU_PRESENT))
0f49bfbd
JL
8174 val16 |= PCI_EXP_DEVCTL_PAYLOAD;
8175 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
5e7dfd0f 8176
5e7dfd0f 8177 /* Clear error status */
0f49bfbd 8178 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
5e7dfd0f
MC
8179 PCI_EXP_DEVSTA_CED |
8180 PCI_EXP_DEVSTA_NFED |
8181 PCI_EXP_DEVSTA_FED |
8182 PCI_EXP_DEVSTA_URD);
1da177e4
LT
8183 }
8184
ee6a99b5 8185 tg3_restore_pci_state(tp);
1da177e4 8186
63c3a66f
JP
8187 tg3_flag_clear(tp, CHIP_RESETTING);
8188 tg3_flag_clear(tp, ERROR_PROCESSED);
d18edcb2 8189
ee6a99b5 8190 val = 0;
63c3a66f 8191 if (tg3_flag(tp, 5780_CLASS))
4cf78e4f 8192 val = tr32(MEMARB_MODE);
ee6a99b5 8193 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
1da177e4
LT
8194
8195 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
8196 tg3_stop_fw(tp);
8197 tw32(0x5000, 0x400);
8198 }
8199
8200 tw32(GRC_MODE, tp->grc_mode);
8201
8202 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
ab0049b4 8203 val = tr32(0xc4);
1da177e4
LT
8204
8205 tw32(0xc4, val | (1 << 15));
8206 }
8207
8208 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
8209 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8210 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
8211 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
8212 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
8213 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8214 }
8215
f07e9af3 8216 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9e975cc2 8217 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
d2394e6b 8218 val = tp->mac_mode;
f07e9af3 8219 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9e975cc2 8220 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
d2394e6b 8221 val = tp->mac_mode;
1da177e4 8222 } else
d2394e6b
MC
8223 val = 0;
8224
8225 tw32_f(MAC_MODE, val);
1da177e4
LT
8226 udelay(40);
8227
77b483f1
MC
8228 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
8229
7a6f4369
MC
8230 err = tg3_poll_fw(tp);
8231 if (err)
8232 return err;
1da177e4 8233
0a9140cf
MC
8234 tg3_mdio_start(tp);
8235
63c3a66f 8236 if (tg3_flag(tp, PCI_EXPRESS) &&
f6eb9b1f
MC
8237 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
8238 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
63c3a66f 8239 !tg3_flag(tp, 57765_PLUS)) {
ab0049b4 8240 val = tr32(0x7c00);
1da177e4
LT
8241
8242 tw32(0x7c00, val | (1 << 25));
8243 }
8244
d78b59f5
MC
8245 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8246 val = tr32(TG3_CPMU_CLCK_ORIDE);
8247 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
8248 }
8249
1da177e4 8250 /* Reprobe ASF enable state. */
63c3a66f
JP
8251 tg3_flag_clear(tp, ENABLE_ASF);
8252 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
1da177e4
LT
8253 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
8254 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
8255 u32 nic_cfg;
8256
8257 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
8258 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
63c3a66f 8259 tg3_flag_set(tp, ENABLE_ASF);
4ba526ce 8260 tp->last_event_jiffies = jiffies;
63c3a66f
JP
8261 if (tg3_flag(tp, 5750_PLUS))
8262 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
1da177e4
LT
8263 }
8264 }
8265
8266 return 0;
8267}
8268
65ec698d
MC
8269static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
8270static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
92feeabf 8271
1da177e4 8272/* tp->lock is held. */
944d980e 8273static int tg3_halt(struct tg3 *tp, int kind, int silent)
1da177e4
LT
8274{
8275 int err;
8276
8277 tg3_stop_fw(tp);
8278
944d980e 8279 tg3_write_sig_pre_reset(tp, kind);
1da177e4 8280
b3b7d6be 8281 tg3_abort_hw(tp, silent);
1da177e4
LT
8282 err = tg3_chip_reset(tp);
8283
daba2a63
MC
8284 __tg3_set_mac_addr(tp, 0);
8285
944d980e
MC
8286 tg3_write_sig_legacy(tp, kind);
8287 tg3_write_sig_post_reset(tp, kind);
1da177e4 8288
92feeabf
MC
8289 if (tp->hw_stats) {
8290 /* Save the stats across chip resets... */
b4017c53 8291 tg3_get_nstats(tp, &tp->net_stats_prev);
92feeabf
MC
8292 tg3_get_estats(tp, &tp->estats_prev);
8293
8294 /* And make sure the next sample is new data */
8295 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
8296 }
8297
1da177e4
LT
8298 if (err)
8299 return err;
8300
8301 return 0;
8302}
8303
1da177e4
LT
8304static int tg3_set_mac_addr(struct net_device *dev, void *p)
8305{
8306 struct tg3 *tp = netdev_priv(dev);
8307 struct sockaddr *addr = p;
986e0aeb 8308 int err = 0, skip_mac_1 = 0;
1da177e4 8309
f9804ddb 8310 if (!is_valid_ether_addr(addr->sa_data))
504f9b5a 8311 return -EADDRNOTAVAIL;
f9804ddb 8312
1da177e4
LT
8313 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
8314
e75f7c90
MC
8315 if (!netif_running(dev))
8316 return 0;
8317
63c3a66f 8318 if (tg3_flag(tp, ENABLE_ASF)) {
986e0aeb 8319 u32 addr0_high, addr0_low, addr1_high, addr1_low;
58712ef9 8320
986e0aeb
MC
8321 addr0_high = tr32(MAC_ADDR_0_HIGH);
8322 addr0_low = tr32(MAC_ADDR_0_LOW);
8323 addr1_high = tr32(MAC_ADDR_1_HIGH);
8324 addr1_low = tr32(MAC_ADDR_1_LOW);
8325
8326 /* Skip MAC addr 1 if ASF is using it. */
8327 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
8328 !(addr1_high == 0 && addr1_low == 0))
8329 skip_mac_1 = 1;
58712ef9 8330 }
986e0aeb
MC
8331 spin_lock_bh(&tp->lock);
8332 __tg3_set_mac_addr(tp, skip_mac_1);
8333 spin_unlock_bh(&tp->lock);
1da177e4 8334
b9ec6c1b 8335 return err;
1da177e4
LT
8336}
8337
8338/* tp->lock is held. */
8339static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
8340 dma_addr_t mapping, u32 maxlen_flags,
8341 u32 nic_addr)
8342{
8343 tg3_write_mem(tp,
8344 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
8345 ((u64) mapping >> 32));
8346 tg3_write_mem(tp,
8347 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
8348 ((u64) mapping & 0xffffffff));
8349 tg3_write_mem(tp,
8350 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
8351 maxlen_flags);
8352
63c3a66f 8353 if (!tg3_flag(tp, 5705_PLUS))
1da177e4
LT
8354 tg3_write_mem(tp,
8355 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
8356 nic_addr);
8357}
8358
a489b6d9
MC
8359
8360static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
15f9850d 8361{
a489b6d9 8362 int i = 0;
b6080e12 8363
63c3a66f 8364 if (!tg3_flag(tp, ENABLE_TSS)) {
b6080e12
MC
8365 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
8366 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
8367 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
b6080e12
MC
8368 } else {
8369 tw32(HOSTCC_TXCOL_TICKS, 0);
8370 tw32(HOSTCC_TXMAX_FRAMES, 0);
8371 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
a489b6d9
MC
8372
8373 for (; i < tp->txq_cnt; i++) {
8374 u32 reg;
8375
8376 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
8377 tw32(reg, ec->tx_coalesce_usecs);
8378 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
8379 tw32(reg, ec->tx_max_coalesced_frames);
8380 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
8381 tw32(reg, ec->tx_max_coalesced_frames_irq);
8382 }
19cfaecc 8383 }
b6080e12 8384
a489b6d9
MC
8385 for (; i < tp->irq_max - 1; i++) {
8386 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
8387 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
8388 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8389 }
8390}
8391
8392static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
8393{
8394 int i = 0;
8395 u32 limit = tp->rxq_cnt;
8396
63c3a66f 8397 if (!tg3_flag(tp, ENABLE_RSS)) {
19cfaecc
MC
8398 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
8399 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
8400 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
a489b6d9 8401 limit--;
19cfaecc 8402 } else {
b6080e12
MC
8403 tw32(HOSTCC_RXCOL_TICKS, 0);
8404 tw32(HOSTCC_RXMAX_FRAMES, 0);
8405 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
15f9850d 8406 }
b6080e12 8407
a489b6d9 8408 for (; i < limit; i++) {
b6080e12
MC
8409 u32 reg;
8410
8411 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
8412 tw32(reg, ec->rx_coalesce_usecs);
b6080e12
MC
8413 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
8414 tw32(reg, ec->rx_max_coalesced_frames);
b6080e12
MC
8415 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
8416 tw32(reg, ec->rx_max_coalesced_frames_irq);
b6080e12
MC
8417 }
8418
8419 for (; i < tp->irq_max - 1; i++) {
8420 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
b6080e12 8421 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
b6080e12 8422 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
a489b6d9
MC
8423 }
8424}
19cfaecc 8425
a489b6d9
MC
8426static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
8427{
8428 tg3_coal_tx_init(tp, ec);
8429 tg3_coal_rx_init(tp, ec);
8430
8431 if (!tg3_flag(tp, 5705_PLUS)) {
8432 u32 val = ec->stats_block_coalesce_usecs;
8433
8434 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
8435 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
8436
f4a46d1f 8437 if (!tp->link_up)
a489b6d9
MC
8438 val = 0;
8439
8440 tw32(HOSTCC_STAT_COAL_TICKS, val);
b6080e12 8441 }
15f9850d 8442}
1da177e4 8443
2d31ecaf
MC
8444/* tp->lock is held. */
8445static void tg3_rings_reset(struct tg3 *tp)
8446{
8447 int i;
f77a6a8e 8448 u32 stblk, txrcb, rxrcb, limit;
2d31ecaf
MC
8449 struct tg3_napi *tnapi = &tp->napi[0];
8450
8451 /* Disable all transmit rings but the first. */
63c3a66f 8452 if (!tg3_flag(tp, 5705_PLUS))
2d31ecaf 8453 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
63c3a66f 8454 else if (tg3_flag(tp, 5717_PLUS))
3d37728b 8455 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
55086ad9 8456 else if (tg3_flag(tp, 57765_CLASS))
b703df6f 8457 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
2d31ecaf
MC
8458 else
8459 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8460
8461 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8462 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
8463 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
8464 BDINFO_FLAGS_DISABLED);
8465
8466
8467 /* Disable all receive return rings but the first. */
63c3a66f 8468 if (tg3_flag(tp, 5717_PLUS))
f6eb9b1f 8469 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
63c3a66f 8470 else if (!tg3_flag(tp, 5705_PLUS))
2d31ecaf 8471 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
b703df6f 8472 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
55086ad9 8473 tg3_flag(tp, 57765_CLASS))
2d31ecaf
MC
8474 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
8475 else
8476 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8477
8478 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8479 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
8480 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
8481 BDINFO_FLAGS_DISABLED);
8482
8483 /* Disable interrupts */
8484 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
0e6cf6a9
MC
8485 tp->napi[0].chk_msi_cnt = 0;
8486 tp->napi[0].last_rx_cons = 0;
8487 tp->napi[0].last_tx_cons = 0;
2d31ecaf
MC
8488
8489 /* Zero mailbox registers. */
63c3a66f 8490 if (tg3_flag(tp, SUPPORT_MSIX)) {
6fd45cb8 8491 for (i = 1; i < tp->irq_max; i++) {
f77a6a8e
MC
8492 tp->napi[i].tx_prod = 0;
8493 tp->napi[i].tx_cons = 0;
63c3a66f 8494 if (tg3_flag(tp, ENABLE_TSS))
c2353a32 8495 tw32_mailbox(tp->napi[i].prodmbox, 0);
f77a6a8e
MC
8496 tw32_rx_mbox(tp->napi[i].consmbox, 0);
8497 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
7f230735 8498 tp->napi[i].chk_msi_cnt = 0;
0e6cf6a9
MC
8499 tp->napi[i].last_rx_cons = 0;
8500 tp->napi[i].last_tx_cons = 0;
f77a6a8e 8501 }
63c3a66f 8502 if (!tg3_flag(tp, ENABLE_TSS))
c2353a32 8503 tw32_mailbox(tp->napi[0].prodmbox, 0);
f77a6a8e
MC
8504 } else {
8505 tp->napi[0].tx_prod = 0;
8506 tp->napi[0].tx_cons = 0;
8507 tw32_mailbox(tp->napi[0].prodmbox, 0);
8508 tw32_rx_mbox(tp->napi[0].consmbox, 0);
8509 }
2d31ecaf
MC
8510
8511 /* Make sure the NIC-based send BD rings are disabled. */
63c3a66f 8512 if (!tg3_flag(tp, 5705_PLUS)) {
2d31ecaf
MC
8513 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
8514 for (i = 0; i < 16; i++)
8515 tw32_tx_mbox(mbox + i * 8, 0);
8516 }
8517
8518 txrcb = NIC_SRAM_SEND_RCB;
8519 rxrcb = NIC_SRAM_RCV_RET_RCB;
8520
8521 /* Clear status block in ram. */
8522 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8523
8524 /* Set status block DMA address */
8525 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8526 ((u64) tnapi->status_mapping >> 32));
8527 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8528 ((u64) tnapi->status_mapping & 0xffffffff));
8529
f77a6a8e
MC
8530 if (tnapi->tx_ring) {
8531 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8532 (TG3_TX_RING_SIZE <<
8533 BDINFO_FLAGS_MAXLEN_SHIFT),
8534 NIC_SRAM_TX_BUFFER_DESC);
8535 txrcb += TG3_BDINFO_SIZE;
8536 }
8537
8538 if (tnapi->rx_rcb) {
8539 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7cb32cf2
MC
8540 (tp->rx_ret_ring_mask + 1) <<
8541 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
f77a6a8e
MC
8542 rxrcb += TG3_BDINFO_SIZE;
8543 }
8544
8545 stblk = HOSTCC_STATBLCK_RING1;
2d31ecaf 8546
f77a6a8e
MC
8547 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
8548 u64 mapping = (u64)tnapi->status_mapping;
8549 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
8550 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
8551
8552 /* Clear status block in ram. */
8553 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8554
19cfaecc
MC
8555 if (tnapi->tx_ring) {
8556 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8557 (TG3_TX_RING_SIZE <<
8558 BDINFO_FLAGS_MAXLEN_SHIFT),
8559 NIC_SRAM_TX_BUFFER_DESC);
8560 txrcb += TG3_BDINFO_SIZE;
8561 }
f77a6a8e
MC
8562
8563 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7cb32cf2 8564 ((tp->rx_ret_ring_mask + 1) <<
f77a6a8e
MC
8565 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
8566
8567 stblk += 8;
f77a6a8e
MC
8568 rxrcb += TG3_BDINFO_SIZE;
8569 }
2d31ecaf
MC
8570}
8571
eb07a940
MC
8572static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
8573{
8574 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
8575
63c3a66f
JP
8576 if (!tg3_flag(tp, 5750_PLUS) ||
8577 tg3_flag(tp, 5780_CLASS) ||
eb07a940 8578 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
513aa6ea
MC
8579 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
8580 tg3_flag(tp, 57765_PLUS))
eb07a940
MC
8581 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
8582 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8583 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8584 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
8585 else
8586 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
8587
8588 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
8589 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
8590
8591 val = min(nic_rep_thresh, host_rep_thresh);
8592 tw32(RCVBDI_STD_THRESH, val);
8593
63c3a66f 8594 if (tg3_flag(tp, 57765_PLUS))
eb07a940
MC
8595 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
8596
63c3a66f 8597 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
eb07a940
MC
8598 return;
8599
513aa6ea 8600 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
eb07a940
MC
8601
8602 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
8603
8604 val = min(bdcache_maxcnt / 2, host_rep_thresh);
8605 tw32(RCVBDI_JUMBO_THRESH, val);
8606
63c3a66f 8607 if (tg3_flag(tp, 57765_PLUS))
eb07a940
MC
8608 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
8609}
8610
ccd5ba9d
MC
8611static inline u32 calc_crc(unsigned char *buf, int len)
8612{
8613 u32 reg;
8614 u32 tmp;
8615 int j, k;
8616
8617 reg = 0xffffffff;
8618
8619 for (j = 0; j < len; j++) {
8620 reg ^= buf[j];
8621
8622 for (k = 0; k < 8; k++) {
8623 tmp = reg & 0x01;
8624
8625 reg >>= 1;
8626
8627 if (tmp)
8628 reg ^= 0xedb88320;
8629 }
8630 }
8631
8632 return ~reg;
8633}
8634
8635static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
8636{
8637 /* accept or reject all multicast frames */
8638 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
8639 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
8640 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
8641 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
8642}
8643
8644static void __tg3_set_rx_mode(struct net_device *dev)
8645{
8646 struct tg3 *tp = netdev_priv(dev);
8647 u32 rx_mode;
8648
8649 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
8650 RX_MODE_KEEP_VLAN_TAG);
8651
8652#if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
8653 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8654 * flag clear.
8655 */
8656 if (!tg3_flag(tp, ENABLE_ASF))
8657 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8658#endif
8659
8660 if (dev->flags & IFF_PROMISC) {
8661 /* Promiscuous mode. */
8662 rx_mode |= RX_MODE_PROMISC;
8663 } else if (dev->flags & IFF_ALLMULTI) {
8664 /* Accept all multicast. */
8665 tg3_set_multi(tp, 1);
8666 } else if (netdev_mc_empty(dev)) {
8667 /* Reject all multicast. */
8668 tg3_set_multi(tp, 0);
8669 } else {
8670 /* Accept one or more multicast(s). */
8671 struct netdev_hw_addr *ha;
8672 u32 mc_filter[4] = { 0, };
8673 u32 regidx;
8674 u32 bit;
8675 u32 crc;
8676
8677 netdev_for_each_mc_addr(ha, dev) {
8678 crc = calc_crc(ha->addr, ETH_ALEN);
8679 bit = ~crc & 0x7f;
8680 regidx = (bit & 0x60) >> 5;
8681 bit &= 0x1f;
8682 mc_filter[regidx] |= (1 << bit);
8683 }
8684
8685 tw32(MAC_HASH_REG_0, mc_filter[0]);
8686 tw32(MAC_HASH_REG_1, mc_filter[1]);
8687 tw32(MAC_HASH_REG_2, mc_filter[2]);
8688 tw32(MAC_HASH_REG_3, mc_filter[3]);
8689 }
8690
8691 if (rx_mode != tp->rx_mode) {
8692 tp->rx_mode = rx_mode;
8693 tw32_f(MAC_RX_MODE, rx_mode);
8694 udelay(10);
8695 }
8696}
8697
9102426a 8698static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
90415477
MC
8699{
8700 int i;
8701
8702 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9102426a 8703 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
90415477
MC
8704}
8705
8706static void tg3_rss_check_indir_tbl(struct tg3 *tp)
bcebcc46
MC
8707{
8708 int i;
8709
8710 if (!tg3_flag(tp, SUPPORT_MSIX))
8711 return;
8712
0b3ba055 8713 if (tp->rxq_cnt == 1) {
bcebcc46 8714 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
90415477
MC
8715 return;
8716 }
8717
8718 /* Validate table against current IRQ count */
8719 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
0b3ba055 8720 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
90415477
MC
8721 break;
8722 }
8723
8724 if (i != TG3_RSS_INDIR_TBL_SIZE)
9102426a 8725 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
bcebcc46
MC
8726}
8727
90415477 8728static void tg3_rss_write_indir_tbl(struct tg3 *tp)
bcebcc46
MC
8729{
8730 int i = 0;
8731 u32 reg = MAC_RSS_INDIR_TBL_0;
8732
8733 while (i < TG3_RSS_INDIR_TBL_SIZE) {
8734 u32 val = tp->rss_ind_tbl[i];
8735 i++;
8736 for (; i % 8; i++) {
8737 val <<= 4;
8738 val |= tp->rss_ind_tbl[i];
8739 }
8740 tw32(reg, val);
8741 reg += 4;
8742 }
8743}
8744
1da177e4 8745/* tp->lock is held. */
8e7a22e3 8746static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
1da177e4
LT
8747{
8748 u32 val, rdmac_mode;
8749 int i, err, limit;
8fea32b9 8750 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
1da177e4
LT
8751
8752 tg3_disable_ints(tp);
8753
8754 tg3_stop_fw(tp);
8755
8756 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
8757
63c3a66f 8758 if (tg3_flag(tp, INIT_COMPLETE))
e6de8ad1 8759 tg3_abort_hw(tp, 1);
1da177e4 8760
699c0193
MC
8761 /* Enable MAC control of LPI */
8762 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
8763 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
8764 TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
8765 TG3_CPMU_EEE_LNKIDL_UART_IDL);
8766
8767 tw32_f(TG3_CPMU_EEE_CTRL,
8768 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
8769
a386b901
MC
8770 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
8771 TG3_CPMU_EEEMD_LPI_IN_TX |
8772 TG3_CPMU_EEEMD_LPI_IN_RX |
8773 TG3_CPMU_EEEMD_EEE_ENABLE;
8774
8775 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8776 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
8777
63c3a66f 8778 if (tg3_flag(tp, ENABLE_APE))
a386b901
MC
8779 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
8780
8781 tw32_f(TG3_CPMU_EEE_MODE, val);
8782
8783 tw32_f(TG3_CPMU_EEE_DBTMR1,
8784 TG3_CPMU_DBTMR1_PCIEXIT_2047US |
8785 TG3_CPMU_DBTMR1_LNKIDLE_2047US);
8786
8787 tw32_f(TG3_CPMU_EEE_DBTMR2,
d7f2ab20 8788 TG3_CPMU_DBTMR2_APE_TX_2047US |
a386b901 8789 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
699c0193
MC
8790 }
8791
603f1173 8792 if (reset_phy)
d4d2c558
MC
8793 tg3_phy_reset(tp);
8794
1da177e4
LT
8795 err = tg3_chip_reset(tp);
8796 if (err)
8797 return err;
8798
8799 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
8800
bcb37f6c 8801 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
d30cdd28
MC
8802 val = tr32(TG3_CPMU_CTRL);
8803 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
8804 tw32(TG3_CPMU_CTRL, val);
9acb961e
MC
8805
8806 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8807 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8808 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8809 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8810
8811 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
8812 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
8813 val |= CPMU_LNK_AWARE_MACCLK_6_25;
8814 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
8815
8816 val = tr32(TG3_CPMU_HST_ACC);
8817 val &= ~CPMU_HST_ACC_MACCLK_MASK;
8818 val |= CPMU_HST_ACC_MACCLK_6_25;
8819 tw32(TG3_CPMU_HST_ACC, val);
d30cdd28
MC
8820 }
8821
33466d93
MC
8822 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
8823 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
8824 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
8825 PCIE_PWR_MGMT_L1_THRESH_4MS;
8826 tw32(PCIE_PWR_MGMT_THRESH, val);
521e6b90
MC
8827
8828 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
8829 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
8830
8831 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
33466d93 8832
f40386c8
MC
8833 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8834 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
255ca311
MC
8835 }
8836
63c3a66f 8837 if (tg3_flag(tp, L1PLLPD_EN)) {
614b0590
MC
8838 u32 grc_mode = tr32(GRC_MODE);
8839
8840 /* Access the lower 1K of PL PCIE block registers. */
8841 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8842 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8843
8844 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
8845 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
8846 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
8847
8848 tw32(GRC_MODE, grc_mode);
8849 }
8850
55086ad9 8851 if (tg3_flag(tp, 57765_CLASS)) {
5093eedc
MC
8852 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
8853 u32 grc_mode = tr32(GRC_MODE);
cea46462 8854
5093eedc
MC
8855 /* Access the lower 1K of PL PCIE block registers. */
8856 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8857 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
cea46462 8858
5093eedc
MC
8859 val = tr32(TG3_PCIE_TLDLPL_PORT +
8860 TG3_PCIE_PL_LO_PHYCTL5);
8861 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
8862 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
cea46462 8863
5093eedc
MC
8864 tw32(GRC_MODE, grc_mode);
8865 }
a977dbe8 8866
1ff30a59
MC
8867 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
8868 u32 grc_mode = tr32(GRC_MODE);
8869
8870 /* Access the lower 1K of DL PCIE block registers. */
8871 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8872 tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
8873
8874 val = tr32(TG3_PCIE_TLDLPL_PORT +
8875 TG3_PCIE_DL_LO_FTSMAX);
8876 val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
8877 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
8878 val | TG3_PCIE_DL_LO_FTSMAX_VAL);
8879
8880 tw32(GRC_MODE, grc_mode);
8881 }
8882
a977dbe8
MC
8883 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8884 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8885 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8886 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
cea46462
MC
8887 }
8888
1da177e4
LT
8889 /* This works around an issue with Athlon chipsets on
8890 * B3 tigon3 silicon. This bit has no effect on any
8891 * other revision. But do not set this on PCI Express
795d01c5 8892 * chips and don't even touch the clocks if the CPMU is present.
1da177e4 8893 */
63c3a66f
JP
8894 if (!tg3_flag(tp, CPMU_PRESENT)) {
8895 if (!tg3_flag(tp, PCI_EXPRESS))
795d01c5
MC
8896 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
8897 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8898 }
1da177e4
LT
8899
8900 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
63c3a66f 8901 tg3_flag(tp, PCIX_MODE)) {
1da177e4
LT
8902 val = tr32(TG3PCI_PCISTATE);
8903 val |= PCISTATE_RETRY_SAME_DMA;
8904 tw32(TG3PCI_PCISTATE, val);
8905 }
8906
63c3a66f 8907 if (tg3_flag(tp, ENABLE_APE)) {
0d3031d9
MC
8908 /* Allow reads and writes to the
8909 * APE register and memory space.
8910 */
8911 val = tr32(TG3PCI_PCISTATE);
8912 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
f92d9dc1
MC
8913 PCISTATE_ALLOW_APE_SHMEM_WR |
8914 PCISTATE_ALLOW_APE_PSPACE_WR;
0d3031d9
MC
8915 tw32(TG3PCI_PCISTATE, val);
8916 }
8917
1da177e4
LT
8918 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
8919 /* Enable some hw fixes. */
8920 val = tr32(TG3PCI_MSI_DATA);
8921 val |= (1 << 26) | (1 << 28) | (1 << 29);
8922 tw32(TG3PCI_MSI_DATA, val);
8923 }
8924
8925 /* Descriptor ring init may make accesses to the
8926 * NIC SRAM area to setup the TX descriptors, so we
8927 * can only do this after the hardware has been
8928 * successfully reset.
8929 */
32d8c572
MC
8930 err = tg3_init_rings(tp);
8931 if (err)
8932 return err;
1da177e4 8933
63c3a66f 8934 if (tg3_flag(tp, 57765_PLUS)) {
cbf9ca6c
MC
8935 val = tr32(TG3PCI_DMA_RW_CTRL) &
8936 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
1a319025
MC
8937 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
8938 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
55086ad9 8939 if (!tg3_flag(tp, 57765_CLASS) &&
0aebff48
MC
8940 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8941 val |= DMA_RWCTRL_TAGGED_STAT_WA;
cbf9ca6c
MC
8942 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
8943 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
8944 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
d30cdd28
MC
8945 /* This value is determined during the probe time DMA
8946 * engine test, tg3_test_dma.
8947 */
8948 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8949 }
1da177e4
LT
8950
8951 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
8952 GRC_MODE_4X_NIC_SEND_RINGS |
8953 GRC_MODE_NO_TX_PHDR_CSUM |
8954 GRC_MODE_NO_RX_PHDR_CSUM);
8955 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
d2d746f8
MC
8956
8957 /* Pseudo-header checksum is done by hardware logic and not
8958 * the offload processers, so make the chip do the pseudo-
8959 * header checksums on receive. For transmit it is more
8960 * convenient to do the pseudo-header checksum in software
8961 * as Linux does that on transmit for us in all cases.
8962 */
8963 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
1da177e4
LT
8964
8965 tw32(GRC_MODE,
8966 tp->grc_mode |
8967 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
8968
8969 /* Setup the timer prescalar register. Clock is always 66Mhz. */
8970 val = tr32(GRC_MISC_CFG);
8971 val &= ~0xff;
8972 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
8973 tw32(GRC_MISC_CFG, val);
8974
8975 /* Initialize MBUF/DESC pool. */
63c3a66f 8976 if (tg3_flag(tp, 5750_PLUS)) {
1da177e4
LT
8977 /* Do nothing. */
8978 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
8979 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
8980 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8981 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
8982 else
8983 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
8984 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
8985 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
63c3a66f 8986 } else if (tg3_flag(tp, TSO_CAPABLE)) {
1da177e4
LT
8987 int fw_len;
8988
077f849d 8989 fw_len = tp->fw_len;
1da177e4
LT
8990 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
8991 tw32(BUFMGR_MB_POOL_ADDR,
8992 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
8993 tw32(BUFMGR_MB_POOL_SIZE,
8994 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
8995 }
1da177e4 8996
0f893dc6 8997 if (tp->dev->mtu <= ETH_DATA_LEN) {
1da177e4
LT
8998 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8999 tp->bufmgr_config.mbuf_read_dma_low_water);
9000 tw32(BUFMGR_MB_MACRX_LOW_WATER,
9001 tp->bufmgr_config.mbuf_mac_rx_low_water);
9002 tw32(BUFMGR_MB_HIGH_WATER,
9003 tp->bufmgr_config.mbuf_high_water);
9004 } else {
9005 tw32(BUFMGR_MB_RDMA_LOW_WATER,
9006 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
9007 tw32(BUFMGR_MB_MACRX_LOW_WATER,
9008 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
9009 tw32(BUFMGR_MB_HIGH_WATER,
9010 tp->bufmgr_config.mbuf_high_water_jumbo);
9011 }
9012 tw32(BUFMGR_DMA_LOW_WATER,
9013 tp->bufmgr_config.dma_low_water);
9014 tw32(BUFMGR_DMA_HIGH_WATER,
9015 tp->bufmgr_config.dma_high_water);
9016
d309a46e
MC
9017 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
9018 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
9019 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
4d958473
MC
9020 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
9021 tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
9022 tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
9023 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
d309a46e 9024 tw32(BUFMGR_MODE, val);
1da177e4
LT
9025 for (i = 0; i < 2000; i++) {
9026 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
9027 break;
9028 udelay(10);
9029 }
9030 if (i >= 2000) {
05dbe005 9031 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
1da177e4
LT
9032 return -ENODEV;
9033 }
9034
eb07a940
MC
9035 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
9036 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
b5d3772c 9037
eb07a940 9038 tg3_setup_rxbd_thresholds(tp);
1da177e4
LT
9039
9040 /* Initialize TG3_BDINFO's at:
9041 * RCVDBDI_STD_BD: standard eth size rx ring
9042 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
9043 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
9044 *
9045 * like so:
9046 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
9047 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
9048 * ring attribute flags
9049 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
9050 *
9051 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
9052 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
9053 *
9054 * The size of each ring is fixed in the firmware, but the location is
9055 * configurable.
9056 */
9057 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
21f581a5 9058 ((u64) tpr->rx_std_mapping >> 32));
1da177e4 9059 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
21f581a5 9060 ((u64) tpr->rx_std_mapping & 0xffffffff));
63c3a66f 9061 if (!tg3_flag(tp, 5717_PLUS))
87668d35
MC
9062 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
9063 NIC_SRAM_RX_BUFFER_DESC);
1da177e4 9064
fdb72b38 9065 /* Disable the mini ring */
63c3a66f 9066 if (!tg3_flag(tp, 5705_PLUS))
1da177e4
LT
9067 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
9068 BDINFO_FLAGS_DISABLED);
9069
fdb72b38
MC
9070 /* Program the jumbo buffer descriptor ring control
9071 * blocks on those devices that have them.
9072 */
a0512944 9073 if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
63c3a66f 9074 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
1da177e4 9075
63c3a66f 9076 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
1da177e4 9077 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
21f581a5 9078 ((u64) tpr->rx_jmb_mapping >> 32));
1da177e4 9079 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
21f581a5 9080 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
de9f5230
MC
9081 val = TG3_RX_JMB_RING_SIZE(tp) <<
9082 BDINFO_FLAGS_MAXLEN_SHIFT;
1da177e4 9083 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
de9f5230 9084 val | BDINFO_FLAGS_USE_EXT_RECV);
63c3a66f 9085 if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
55086ad9 9086 tg3_flag(tp, 57765_CLASS))
87668d35
MC
9087 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
9088 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
1da177e4
LT
9089 } else {
9090 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9091 BDINFO_FLAGS_DISABLED);
9092 }
9093
63c3a66f 9094 if (tg3_flag(tp, 57765_PLUS)) {
fa6b2aae 9095 val = TG3_RX_STD_RING_SIZE(tp);
7cb32cf2
MC
9096 val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
9097 val |= (TG3_RX_STD_DMA_SZ << 2);
9098 } else
04380d40 9099 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
fdb72b38 9100 } else
de9f5230 9101 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
fdb72b38
MC
9102
9103 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
1da177e4 9104
411da640 9105 tpr->rx_std_prod_idx = tp->rx_pending;
66711e66 9106 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
1da177e4 9107
63c3a66f
JP
9108 tpr->rx_jmb_prod_idx =
9109 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
66711e66 9110 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
1da177e4 9111
2d31ecaf
MC
9112 tg3_rings_reset(tp);
9113
1da177e4 9114 /* Initialize MAC address and backoff seed. */
986e0aeb 9115 __tg3_set_mac_addr(tp, 0);
1da177e4
LT
9116
9117 /* MTU + ethernet header + FCS + optional VLAN tag */
f7b493e0
MC
9118 tw32(MAC_RX_MTU_SIZE,
9119 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
1da177e4
LT
9120
9121 /* The slot time is changed by tg3_setup_phy if we
9122 * run at gigabit with half duplex.
9123 */
f2096f94
MC
9124 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
9125 (6 << TX_LENGTHS_IPG_SHIFT) |
9126 (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
9127
9128 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
9129 val |= tr32(MAC_TX_LENGTHS) &
9130 (TX_LENGTHS_JMB_FRM_LEN_MSK |
9131 TX_LENGTHS_CNT_DWN_VAL_MSK);
9132
9133 tw32(MAC_TX_LENGTHS, val);
1da177e4
LT
9134
9135 /* Receive rules. */
9136 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
9137 tw32(RCVLPC_CONFIG, 0x0181);
9138
9139 /* Calculate RDMAC_MODE setting early, we need it to determine
9140 * the RCVLPC_STATE_ENABLE mask.
9141 */
9142 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
9143 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
9144 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
9145 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
9146 RDMAC_MODE_LNGREAD_ENAB);
85e94ced 9147
deabaac8 9148 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
0339e4e3
MC
9149 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
9150
57e6983c 9151 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
321d32a0
MC
9152 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
9153 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
d30cdd28
MC
9154 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
9155 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
9156 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
9157
c5908939
MC
9158 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9159 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
63c3a66f 9160 if (tg3_flag(tp, TSO_CAPABLE) &&
c13e3713 9161 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1da177e4
LT
9162 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
9163 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
63c3a66f 9164 !tg3_flag(tp, IS_5788)) {
1da177e4
LT
9165 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9166 }
9167 }
9168
63c3a66f 9169 if (tg3_flag(tp, PCI_EXPRESS))
85e94ced
MC
9170 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9171
63c3a66f
JP
9172 if (tg3_flag(tp, HW_TSO_1) ||
9173 tg3_flag(tp, HW_TSO_2) ||
9174 tg3_flag(tp, HW_TSO_3))
027455ad
MC
9175 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
9176
108a6c16 9177 if (tg3_flag(tp, 57765_PLUS) ||
e849cdc3 9178 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
027455ad
MC
9179 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
9180 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
1da177e4 9181
f2096f94
MC
9182 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
9183 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
9184
41a8a7ee
MC
9185 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9186 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9187 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
9188 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
63c3a66f 9189 tg3_flag(tp, 57765_PLUS)) {
41a8a7ee 9190 val = tr32(TG3_RDMA_RSRVCTRL_REG);
10ce95d6 9191 if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0) {
b4495ed8
MC
9192 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
9193 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
9194 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
9195 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
9196 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
9197 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
b75cc0e4 9198 }
41a8a7ee
MC
9199 tw32(TG3_RDMA_RSRVCTRL_REG,
9200 val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
9201 }
9202
d78b59f5
MC
9203 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9204 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
d309a46e
MC
9205 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
9206 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
9207 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
9208 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
9209 }
9210
1da177e4 9211 /* Receive/send statistics. */
63c3a66f 9212 if (tg3_flag(tp, 5750_PLUS)) {
1661394e
MC
9213 val = tr32(RCVLPC_STATS_ENABLE);
9214 val &= ~RCVLPC_STATSENAB_DACK_FIX;
9215 tw32(RCVLPC_STATS_ENABLE, val);
9216 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
63c3a66f 9217 tg3_flag(tp, TSO_CAPABLE)) {
1da177e4
LT
9218 val = tr32(RCVLPC_STATS_ENABLE);
9219 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
9220 tw32(RCVLPC_STATS_ENABLE, val);
9221 } else {
9222 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
9223 }
9224 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
9225 tw32(SNDDATAI_STATSENAB, 0xffffff);
9226 tw32(SNDDATAI_STATSCTRL,
9227 (SNDDATAI_SCTRL_ENABLE |
9228 SNDDATAI_SCTRL_FASTUPD));
9229
9230 /* Setup host coalescing engine. */
9231 tw32(HOSTCC_MODE, 0);
9232 for (i = 0; i < 2000; i++) {
9233 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
9234 break;
9235 udelay(10);
9236 }
9237
d244c892 9238 __tg3_set_coalesce(tp, &tp->coal);
1da177e4 9239
63c3a66f 9240 if (!tg3_flag(tp, 5705_PLUS)) {
1da177e4
LT
9241 /* Status/statistics block address. See tg3_timer,
9242 * the tg3_periodic_fetch_stats call there, and
9243 * tg3_get_stats to see how this works for 5705/5750 chips.
9244 */
1da177e4
LT
9245 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9246 ((u64) tp->stats_mapping >> 32));
9247 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9248 ((u64) tp->stats_mapping & 0xffffffff));
9249 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
2d31ecaf 9250
1da177e4 9251 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
2d31ecaf
MC
9252
9253 /* Clear statistics and status block memory areas */
9254 for (i = NIC_SRAM_STATS_BLK;
9255 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
9256 i += sizeof(u32)) {
9257 tg3_write_mem(tp, i, 0);
9258 udelay(40);
9259 }
1da177e4
LT
9260 }
9261
9262 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
9263
9264 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
9265 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
63c3a66f 9266 if (!tg3_flag(tp, 5705_PLUS))
1da177e4
LT
9267 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
9268
f07e9af3
MC
9269 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9270 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
c94e3941
MC
9271 /* reset to prevent losing 1st rx packet intermittently */
9272 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9273 udelay(10);
9274 }
9275
3bda1258 9276 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
9e975cc2
MC
9277 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
9278 MAC_MODE_FHDE_ENABLE;
9279 if (tg3_flag(tp, ENABLE_APE))
9280 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
63c3a66f 9281 if (!tg3_flag(tp, 5705_PLUS) &&
f07e9af3 9282 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
e8f3f6ca
MC
9283 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
9284 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1da177e4
LT
9285 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
9286 udelay(40);
9287
314fba34 9288 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
63c3a66f 9289 * If TG3_FLAG_IS_NIC is zero, we should read the
314fba34
MC
9290 * register to preserve the GPIO settings for LOMs. The GPIOs,
9291 * whether used as inputs or outputs, are set by boot code after
9292 * reset.
9293 */
63c3a66f 9294 if (!tg3_flag(tp, IS_NIC)) {
314fba34
MC
9295 u32 gpio_mask;
9296
9d26e213
MC
9297 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
9298 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
9299 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
3e7d83bc
MC
9300
9301 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9302 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
9303 GRC_LCLCTRL_GPIO_OUTPUT3;
9304
af36e6b6
MC
9305 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
9306 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
9307
aaf84465 9308 tp->grc_local_ctrl &= ~gpio_mask;
314fba34
MC
9309 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
9310
9311 /* GPIO1 must be driven high for eeprom write protect */
63c3a66f 9312 if (tg3_flag(tp, EEPROM_WRITE_PROT))
9d26e213
MC
9313 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
9314 GRC_LCLCTRL_GPIO_OUTPUT1);
314fba34 9315 }
1da177e4
LT
9316 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9317 udelay(100);
9318
c3b5003b 9319 if (tg3_flag(tp, USING_MSIX)) {
baf8a94a 9320 val = tr32(MSGINT_MODE);
c3b5003b
MC
9321 val |= MSGINT_MODE_ENABLE;
9322 if (tp->irq_cnt > 1)
9323 val |= MSGINT_MODE_MULTIVEC_EN;
5b39de91
MC
9324 if (!tg3_flag(tp, 1SHOT_MSI))
9325 val |= MSGINT_MODE_ONE_SHOT_DISABLE;
baf8a94a
MC
9326 tw32(MSGINT_MODE, val);
9327 }
9328
63c3a66f 9329 if (!tg3_flag(tp, 5705_PLUS)) {
1da177e4
LT
9330 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
9331 udelay(40);
9332 }
9333
9334 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
9335 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
9336 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
9337 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
9338 WDMAC_MODE_LNGREAD_ENAB);
9339
c5908939
MC
9340 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9341 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
63c3a66f 9342 if (tg3_flag(tp, TSO_CAPABLE) &&
1da177e4
LT
9343 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
9344 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
9345 /* nothing */
9346 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
63c3a66f 9347 !tg3_flag(tp, IS_5788)) {
1da177e4
LT
9348 val |= WDMAC_MODE_RX_ACCEL;
9349 }
9350 }
9351
d9ab5ad1 9352 /* Enable host coalescing bug fix */
63c3a66f 9353 if (tg3_flag(tp, 5755_PLUS))
f51f3562 9354 val |= WDMAC_MODE_STATUS_TAG_FIX;
d9ab5ad1 9355
788a035e
MC
9356 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
9357 val |= WDMAC_MODE_BURST_ALL_DATA;
9358
1da177e4
LT
9359 tw32_f(WDMAC_MODE, val);
9360 udelay(40);
9361
63c3a66f 9362 if (tg3_flag(tp, PCIX_MODE)) {
9974a356
MC
9363 u16 pcix_cmd;
9364
9365 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9366 &pcix_cmd);
1da177e4 9367 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
9974a356
MC
9368 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
9369 pcix_cmd |= PCI_X_CMD_READ_2K;
1da177e4 9370 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
9974a356
MC
9371 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
9372 pcix_cmd |= PCI_X_CMD_READ_2K;
1da177e4 9373 }
9974a356
MC
9374 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9375 pcix_cmd);
1da177e4
LT
9376 }
9377
9378 tw32_f(RDMAC_MODE, rdmac_mode);
9379 udelay(40);
9380
091f0ea3
MC
9381 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
9382 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
9383 if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
9384 break;
9385 }
9386 if (i < TG3_NUM_RDMA_CHANNELS) {
9387 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
9388 val |= TG3_LSO_RD_DMA_TX_LENGTH_WA;
9389 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
9390 tg3_flag_set(tp, 5719_RDMA_BUG);
9391 }
9392 }
9393
1da177e4 9394 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
63c3a66f 9395 if (!tg3_flag(tp, 5705_PLUS))
1da177e4 9396 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
9936bcf6
MC
9397
9398 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
9399 tw32(SNDDATAC_MODE,
9400 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
9401 else
9402 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
9403
1da177e4
LT
9404 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
9405 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
7cb32cf2 9406 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
63c3a66f 9407 if (tg3_flag(tp, LRG_PROD_RING_CAP))
7cb32cf2
MC
9408 val |= RCVDBDI_MODE_LRG_RING_SZ;
9409 tw32(RCVDBDI_MODE, val);
1da177e4 9410 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
63c3a66f
JP
9411 if (tg3_flag(tp, HW_TSO_1) ||
9412 tg3_flag(tp, HW_TSO_2) ||
9413 tg3_flag(tp, HW_TSO_3))
1da177e4 9414 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
baf8a94a 9415 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
63c3a66f 9416 if (tg3_flag(tp, ENABLE_TSS))
baf8a94a
MC
9417 val |= SNDBDI_MODE_MULTI_TXQ_EN;
9418 tw32(SNDBDI_MODE, val);
1da177e4
LT
9419 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
9420
9421 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9422 err = tg3_load_5701_a0_firmware_fix(tp);
9423 if (err)
9424 return err;
9425 }
9426
63c3a66f 9427 if (tg3_flag(tp, TSO_CAPABLE)) {
1da177e4
LT
9428 err = tg3_load_tso_firmware(tp);
9429 if (err)
9430 return err;
9431 }
1da177e4
LT
9432
9433 tp->tx_mode = TX_MODE_ENABLE;
f2096f94 9434
63c3a66f 9435 if (tg3_flag(tp, 5755_PLUS) ||
b1d05210
MC
9436 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9437 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
f2096f94
MC
9438
9439 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9440 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
9441 tp->tx_mode &= ~val;
9442 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
9443 }
9444
1da177e4
LT
9445 tw32_f(MAC_TX_MODE, tp->tx_mode);
9446 udelay(100);
9447
63c3a66f 9448 if (tg3_flag(tp, ENABLE_RSS)) {
bcebcc46 9449 tg3_rss_write_indir_tbl(tp);
baf8a94a
MC
9450
9451 /* Setup the "secret" hash key. */
9452 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
9453 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
9454 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
9455 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
9456 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
9457 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
9458 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
9459 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
9460 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
9461 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
9462 }
9463
1da177e4 9464 tp->rx_mode = RX_MODE_ENABLE;
63c3a66f 9465 if (tg3_flag(tp, 5755_PLUS))
af36e6b6
MC
9466 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
9467
63c3a66f 9468 if (tg3_flag(tp, ENABLE_RSS))
baf8a94a
MC
9469 tp->rx_mode |= RX_MODE_RSS_ENABLE |
9470 RX_MODE_RSS_ITBL_HASH_BITS_7 |
9471 RX_MODE_RSS_IPV6_HASH_EN |
9472 RX_MODE_RSS_TCP_IPV6_HASH_EN |
9473 RX_MODE_RSS_IPV4_HASH_EN |
9474 RX_MODE_RSS_TCP_IPV4_HASH_EN;
9475
1da177e4
LT
9476 tw32_f(MAC_RX_MODE, tp->rx_mode);
9477 udelay(10);
9478
1da177e4
LT
9479 tw32(MAC_LED_CTRL, tp->led_ctrl);
9480
9481 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
f07e9af3 9482 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
1da177e4
LT
9483 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9484 udelay(10);
9485 }
9486 tw32_f(MAC_RX_MODE, tp->rx_mode);
9487 udelay(10);
9488
f07e9af3 9489 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
1da177e4 9490 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
f07e9af3 9491 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
1da177e4
LT
9492 /* Set drive transmission level to 1.2V */
9493 /* only if the signal pre-emphasis bit is not set */
9494 val = tr32(MAC_SERDES_CFG);
9495 val &= 0xfffff000;
9496 val |= 0x880;
9497 tw32(MAC_SERDES_CFG, val);
9498 }
9499 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
9500 tw32(MAC_SERDES_CFG, 0x616000);
9501 }
9502
9503 /* Prevent chip from dropping frames when flow control
9504 * is enabled.
9505 */
55086ad9 9506 if (tg3_flag(tp, 57765_CLASS))
666bc831
MC
9507 val = 1;
9508 else
9509 val = 2;
9510 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
1da177e4
LT
9511
9512 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
f07e9af3 9513 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
1da177e4 9514 /* Use hardware link auto-negotiation */
63c3a66f 9515 tg3_flag_set(tp, HW_AUTONEG);
1da177e4
LT
9516 }
9517
f07e9af3 9518 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
6ff6f81d 9519 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
d4d2c558
MC
9520 u32 tmp;
9521
9522 tmp = tr32(SERDES_RX_CTRL);
9523 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
9524 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
9525 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
9526 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9527 }
9528
63c3a66f 9529 if (!tg3_flag(tp, USE_PHYLIB)) {
c6700ce2 9530 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
80096068 9531 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
1da177e4 9532
dd477003
MC
9533 err = tg3_setup_phy(tp, 0);
9534 if (err)
9535 return err;
1da177e4 9536
f07e9af3
MC
9537 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9538 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
dd477003
MC
9539 u32 tmp;
9540
9541 /* Clear CRC stats. */
9542 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
9543 tg3_writephy(tp, MII_TG3_TEST1,
9544 tmp | MII_TG3_TEST1_CRC_EN);
f08aa1a8 9545 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
dd477003 9546 }
1da177e4
LT
9547 }
9548 }
9549
9550 __tg3_set_rx_mode(tp->dev);
9551
9552 /* Initialize receive rules. */
9553 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
9554 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
9555 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
9556 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
9557
63c3a66f 9558 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
1da177e4
LT
9559 limit = 8;
9560 else
9561 limit = 16;
63c3a66f 9562 if (tg3_flag(tp, ENABLE_ASF))
1da177e4
LT
9563 limit -= 4;
9564 switch (limit) {
9565 case 16:
9566 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
9567 case 15:
9568 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
9569 case 14:
9570 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
9571 case 13:
9572 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
9573 case 12:
9574 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
9575 case 11:
9576 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
9577 case 10:
9578 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
9579 case 9:
9580 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
9581 case 8:
9582 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
9583 case 7:
9584 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
9585 case 6:
9586 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
9587 case 5:
9588 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
9589 case 4:
9590 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
9591 case 3:
9592 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
9593 case 2:
9594 case 1:
9595
9596 default:
9597 break;
855e1111 9598 }
1da177e4 9599
63c3a66f 9600 if (tg3_flag(tp, ENABLE_APE))
9ce768ea
MC
9601 /* Write our heartbeat update interval to APE. */
9602 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
9603 APE_HOST_HEARTBEAT_INT_DISABLE);
0d3031d9 9604
1da177e4
LT
9605 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
9606
1da177e4
LT
9607 return 0;
9608}
9609
9610/* Called at device open time to get the chip ready for
9611 * packet processing. Invoked with tp->lock held.
9612 */
8e7a22e3 9613static int tg3_init_hw(struct tg3 *tp, int reset_phy)
1da177e4 9614{
1da177e4
LT
9615 tg3_switch_clocks(tp);
9616
9617 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
9618
2f751b67 9619 return tg3_reset_hw(tp, reset_phy);
1da177e4
LT
9620}
9621
aed93e0b
MC
9622static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
9623{
9624 int i;
9625
9626 for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
9627 u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
9628
9629 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
9630 off += len;
9631
9632 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
9633 !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
9634 memset(ocir, 0, TG3_OCIR_LEN);
9635 }
9636}
9637
9638/* sysfs attributes for hwmon */
9639static ssize_t tg3_show_temp(struct device *dev,
9640 struct device_attribute *devattr, char *buf)
9641{
9642 struct pci_dev *pdev = to_pci_dev(dev);
9643 struct net_device *netdev = pci_get_drvdata(pdev);
9644 struct tg3 *tp = netdev_priv(netdev);
9645 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
9646 u32 temperature;
9647
9648 spin_lock_bh(&tp->lock);
9649 tg3_ape_scratchpad_read(tp, &temperature, attr->index,
9650 sizeof(temperature));
9651 spin_unlock_bh(&tp->lock);
9652 return sprintf(buf, "%u\n", temperature);
9653}
9654
9655
9656static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL,
9657 TG3_TEMP_SENSOR_OFFSET);
9658static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL,
9659 TG3_TEMP_CAUTION_OFFSET);
9660static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL,
9661 TG3_TEMP_MAX_OFFSET);
9662
9663static struct attribute *tg3_attributes[] = {
9664 &sensor_dev_attr_temp1_input.dev_attr.attr,
9665 &sensor_dev_attr_temp1_crit.dev_attr.attr,
9666 &sensor_dev_attr_temp1_max.dev_attr.attr,
9667 NULL
9668};
9669
9670static const struct attribute_group tg3_group = {
9671 .attrs = tg3_attributes,
9672};
9673
aed93e0b
MC
9674static void tg3_hwmon_close(struct tg3 *tp)
9675{
aed93e0b
MC
9676 if (tp->hwmon_dev) {
9677 hwmon_device_unregister(tp->hwmon_dev);
9678 tp->hwmon_dev = NULL;
9679 sysfs_remove_group(&tp->pdev->dev.kobj, &tg3_group);
9680 }
aed93e0b
MC
9681}
9682
9683static void tg3_hwmon_open(struct tg3 *tp)
9684{
aed93e0b
MC
9685 int i, err;
9686 u32 size = 0;
9687 struct pci_dev *pdev = tp->pdev;
9688 struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
9689
9690 tg3_sd_scan_scratchpad(tp, ocirs);
9691
9692 for (i = 0; i < TG3_SD_NUM_RECS; i++) {
9693 if (!ocirs[i].src_data_length)
9694 continue;
9695
9696 size += ocirs[i].src_hdr_length;
9697 size += ocirs[i].src_data_length;
9698 }
9699
9700 if (!size)
9701 return;
9702
9703 /* Register hwmon sysfs hooks */
9704 err = sysfs_create_group(&pdev->dev.kobj, &tg3_group);
9705 if (err) {
9706 dev_err(&pdev->dev, "Cannot create sysfs group, aborting\n");
9707 return;
9708 }
9709
9710 tp->hwmon_dev = hwmon_device_register(&pdev->dev);
9711 if (IS_ERR(tp->hwmon_dev)) {
9712 tp->hwmon_dev = NULL;
9713 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
9714 sysfs_remove_group(&pdev->dev.kobj, &tg3_group);
9715 }
aed93e0b
MC
9716}
9717
9718
1da177e4
LT
9719#define TG3_STAT_ADD32(PSTAT, REG) \
9720do { u32 __val = tr32(REG); \
9721 (PSTAT)->low += __val; \
9722 if ((PSTAT)->low < __val) \
9723 (PSTAT)->high += 1; \
9724} while (0)
9725
9726static void tg3_periodic_fetch_stats(struct tg3 *tp)
9727{
9728 struct tg3_hw_stats *sp = tp->hw_stats;
9729
f4a46d1f 9730 if (!tp->link_up)
1da177e4
LT
9731 return;
9732
9733 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
9734 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
9735 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
9736 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
9737 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
9738 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
9739 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
9740 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
9741 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
9742 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
9743 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
9744 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
9745 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
091f0ea3
MC
9746 if (unlikely(tg3_flag(tp, 5719_RDMA_BUG) &&
9747 (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
9748 sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
9749 u32 val;
9750
9751 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
9752 val &= ~TG3_LSO_RD_DMA_TX_LENGTH_WA;
9753 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
9754 tg3_flag_clear(tp, 5719_RDMA_BUG);
9755 }
1da177e4
LT
9756
9757 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
9758 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
9759 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
9760 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
9761 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
9762 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
9763 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
9764 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
9765 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
9766 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
9767 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
9768 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
9769 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
9770 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
463d305b
MC
9771
9772 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
310050fa
MC
9773 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9774 tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 &&
9775 tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) {
4d958473
MC
9776 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
9777 } else {
9778 u32 val = tr32(HOSTCC_FLOW_ATTN);
9779 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
9780 if (val) {
9781 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
9782 sp->rx_discards.low += val;
9783 if (sp->rx_discards.low < val)
9784 sp->rx_discards.high += 1;
9785 }
9786 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
9787 }
463d305b 9788 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
1da177e4
LT
9789}
9790
0e6cf6a9
MC
9791static void tg3_chk_missed_msi(struct tg3 *tp)
9792{
9793 u32 i;
9794
9795 for (i = 0; i < tp->irq_cnt; i++) {
9796 struct tg3_napi *tnapi = &tp->napi[i];
9797
9798 if (tg3_has_work(tnapi)) {
9799 if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
9800 tnapi->last_tx_cons == tnapi->tx_cons) {
9801 if (tnapi->chk_msi_cnt < 1) {
9802 tnapi->chk_msi_cnt++;
9803 return;
9804 }
7f230735 9805 tg3_msi(0, tnapi);
0e6cf6a9
MC
9806 }
9807 }
9808 tnapi->chk_msi_cnt = 0;
9809 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
9810 tnapi->last_tx_cons = tnapi->tx_cons;
9811 }
9812}
9813
1da177e4
LT
9814static void tg3_timer(unsigned long __opaque)
9815{
9816 struct tg3 *tp = (struct tg3 *) __opaque;
1da177e4 9817
5b190624 9818 if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
f475f163
MC
9819 goto restart_timer;
9820
f47c11ee 9821 spin_lock(&tp->lock);
1da177e4 9822
0e6cf6a9 9823 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
55086ad9 9824 tg3_flag(tp, 57765_CLASS))
0e6cf6a9
MC
9825 tg3_chk_missed_msi(tp);
9826
63c3a66f 9827 if (!tg3_flag(tp, TAGGED_STATUS)) {
fac9b83e
DM
9828 /* All of this garbage is because when using non-tagged
9829 * IRQ status the mailbox/status_block protocol the chip
9830 * uses with the cpu is race prone.
9831 */
898a56f8 9832 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
fac9b83e
DM
9833 tw32(GRC_LOCAL_CTRL,
9834 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
9835 } else {
9836 tw32(HOSTCC_MODE, tp->coalesce_mode |
fd2ce37f 9837 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
fac9b83e 9838 }
1da177e4 9839
fac9b83e 9840 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
f47c11ee 9841 spin_unlock(&tp->lock);
db219973 9842 tg3_reset_task_schedule(tp);
5b190624 9843 goto restart_timer;
fac9b83e 9844 }
1da177e4
LT
9845 }
9846
1da177e4
LT
9847 /* This part only runs once per second. */
9848 if (!--tp->timer_counter) {
63c3a66f 9849 if (tg3_flag(tp, 5705_PLUS))
fac9b83e
DM
9850 tg3_periodic_fetch_stats(tp);
9851
b0c5943f
MC
9852 if (tp->setlpicnt && !--tp->setlpicnt)
9853 tg3_phy_eee_enable(tp);
52b02d04 9854
63c3a66f 9855 if (tg3_flag(tp, USE_LINKCHG_REG)) {
1da177e4
LT
9856 u32 mac_stat;
9857 int phy_event;
9858
9859 mac_stat = tr32(MAC_STATUS);
9860
9861 phy_event = 0;
f07e9af3 9862 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
1da177e4
LT
9863 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
9864 phy_event = 1;
9865 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
9866 phy_event = 1;
9867
9868 if (phy_event)
9869 tg3_setup_phy(tp, 0);
63c3a66f 9870 } else if (tg3_flag(tp, POLL_SERDES)) {
1da177e4
LT
9871 u32 mac_stat = tr32(MAC_STATUS);
9872 int need_setup = 0;
9873
f4a46d1f 9874 if (tp->link_up &&
1da177e4
LT
9875 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
9876 need_setup = 1;
9877 }
f4a46d1f 9878 if (!tp->link_up &&
1da177e4
LT
9879 (mac_stat & (MAC_STATUS_PCS_SYNCED |
9880 MAC_STATUS_SIGNAL_DET))) {
9881 need_setup = 1;
9882 }
9883 if (need_setup) {
3d3ebe74
MC
9884 if (!tp->serdes_counter) {
9885 tw32_f(MAC_MODE,
9886 (tp->mac_mode &
9887 ~MAC_MODE_PORT_MODE_MASK));
9888 udelay(40);
9889 tw32_f(MAC_MODE, tp->mac_mode);
9890 udelay(40);
9891 }
1da177e4
LT
9892 tg3_setup_phy(tp, 0);
9893 }
f07e9af3 9894 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
63c3a66f 9895 tg3_flag(tp, 5780_CLASS)) {
747e8f8b 9896 tg3_serdes_parallel_detect(tp);
57d8b880 9897 }
1da177e4
LT
9898
9899 tp->timer_counter = tp->timer_multiplier;
9900 }
9901
130b8e4d
MC
9902 /* Heartbeat is only sent once every 2 seconds.
9903 *
9904 * The heartbeat is to tell the ASF firmware that the host
9905 * driver is still alive. In the event that the OS crashes,
9906 * ASF needs to reset the hardware to free up the FIFO space
9907 * that may be filled with rx packets destined for the host.
9908 * If the FIFO is full, ASF will no longer function properly.
9909 *
9910 * Unintended resets have been reported on real time kernels
9911 * where the timer doesn't run on time. Netpoll will also have
9912 * same problem.
9913 *
9914 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
9915 * to check the ring condition when the heartbeat is expiring
9916 * before doing the reset. This will prevent most unintended
9917 * resets.
9918 */
1da177e4 9919 if (!--tp->asf_counter) {
63c3a66f 9920 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
7c5026aa
MC
9921 tg3_wait_for_event_ack(tp);
9922
bbadf503 9923 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
130b8e4d 9924 FWCMD_NICDRV_ALIVE3);
bbadf503 9925 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
c6cdf436
MC
9926 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
9927 TG3_FW_UPDATE_TIMEOUT_SEC);
4ba526ce
MC
9928
9929 tg3_generate_fw_event(tp);
1da177e4
LT
9930 }
9931 tp->asf_counter = tp->asf_multiplier;
9932 }
9933
f47c11ee 9934 spin_unlock(&tp->lock);
1da177e4 9935
f475f163 9936restart_timer:
1da177e4
LT
9937 tp->timer.expires = jiffies + tp->timer_offset;
9938 add_timer(&tp->timer);
9939}
9940
229b1ad1 9941static void tg3_timer_init(struct tg3 *tp)
21f7638e
MC
9942{
9943 if (tg3_flag(tp, TAGGED_STATUS) &&
9944 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9945 !tg3_flag(tp, 57765_CLASS))
9946 tp->timer_offset = HZ;
9947 else
9948 tp->timer_offset = HZ / 10;
9949
9950 BUG_ON(tp->timer_offset > HZ);
9951
9952 tp->timer_multiplier = (HZ / tp->timer_offset);
9953 tp->asf_multiplier = (HZ / tp->timer_offset) *
9954 TG3_FW_UPDATE_FREQ_SEC;
9955
9956 init_timer(&tp->timer);
9957 tp->timer.data = (unsigned long) tp;
9958 tp->timer.function = tg3_timer;
9959}
9960
9961static void tg3_timer_start(struct tg3 *tp)
9962{
9963 tp->asf_counter = tp->asf_multiplier;
9964 tp->timer_counter = tp->timer_multiplier;
9965
9966 tp->timer.expires = jiffies + tp->timer_offset;
9967 add_timer(&tp->timer);
9968}
9969
9970static void tg3_timer_stop(struct tg3 *tp)
9971{
9972 del_timer_sync(&tp->timer);
9973}
9974
9975/* Restart hardware after configuration changes, self-test, etc.
9976 * Invoked with tp->lock held.
9977 */
9978static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
9979 __releases(tp->lock)
9980 __acquires(tp->lock)
9981{
9982 int err;
9983
9984 err = tg3_init_hw(tp, reset_phy);
9985 if (err) {
9986 netdev_err(tp->dev,
9987 "Failed to re-initialize device, aborting\n");
9988 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9989 tg3_full_unlock(tp);
9990 tg3_timer_stop(tp);
9991 tp->irq_sync = 0;
9992 tg3_napi_enable(tp);
9993 dev_close(tp->dev);
9994 tg3_full_lock(tp, 0);
9995 }
9996 return err;
9997}
9998
9999static void tg3_reset_task(struct work_struct *work)
10000{
10001 struct tg3 *tp = container_of(work, struct tg3, reset_task);
10002 int err;
10003
10004 tg3_full_lock(tp, 0);
10005
10006 if (!netif_running(tp->dev)) {
10007 tg3_flag_clear(tp, RESET_TASK_PENDING);
10008 tg3_full_unlock(tp);
10009 return;
10010 }
10011
10012 tg3_full_unlock(tp);
10013
10014 tg3_phy_stop(tp);
10015
10016 tg3_netif_stop(tp);
10017
10018 tg3_full_lock(tp, 1);
10019
10020 if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
10021 tp->write32_tx_mbox = tg3_write32_tx_mbox;
10022 tp->write32_rx_mbox = tg3_write_flush_reg32;
10023 tg3_flag_set(tp, MBOX_WRITE_REORDER);
10024 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
10025 }
10026
10027 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
10028 err = tg3_init_hw(tp, 1);
10029 if (err)
10030 goto out;
10031
10032 tg3_netif_start(tp);
10033
10034out:
10035 tg3_full_unlock(tp);
10036
10037 if (!err)
10038 tg3_phy_start(tp);
10039
10040 tg3_flag_clear(tp, RESET_TASK_PENDING);
10041}
10042
4f125f42 10043static int tg3_request_irq(struct tg3 *tp, int irq_num)
fcfa0a32 10044{
7d12e780 10045 irq_handler_t fn;
fcfa0a32 10046 unsigned long flags;
4f125f42
MC
10047 char *name;
10048 struct tg3_napi *tnapi = &tp->napi[irq_num];
10049
10050 if (tp->irq_cnt == 1)
10051 name = tp->dev->name;
10052 else {
10053 name = &tnapi->irq_lbl[0];
10054 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
10055 name[IFNAMSIZ-1] = 0;
10056 }
fcfa0a32 10057
63c3a66f 10058 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
fcfa0a32 10059 fn = tg3_msi;
63c3a66f 10060 if (tg3_flag(tp, 1SHOT_MSI))
fcfa0a32 10061 fn = tg3_msi_1shot;
ab392d2d 10062 flags = 0;
fcfa0a32
MC
10063 } else {
10064 fn = tg3_interrupt;
63c3a66f 10065 if (tg3_flag(tp, TAGGED_STATUS))
fcfa0a32 10066 fn = tg3_interrupt_tagged;
ab392d2d 10067 flags = IRQF_SHARED;
fcfa0a32 10068 }
4f125f42
MC
10069
10070 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
fcfa0a32
MC
10071}
10072
7938109f
MC
10073static int tg3_test_interrupt(struct tg3 *tp)
10074{
09943a18 10075 struct tg3_napi *tnapi = &tp->napi[0];
7938109f 10076 struct net_device *dev = tp->dev;
b16250e3 10077 int err, i, intr_ok = 0;
f6eb9b1f 10078 u32 val;
7938109f 10079
d4bc3927
MC
10080 if (!netif_running(dev))
10081 return -ENODEV;
10082
7938109f
MC
10083 tg3_disable_ints(tp);
10084
4f125f42 10085 free_irq(tnapi->irq_vec, tnapi);
7938109f 10086
f6eb9b1f
MC
10087 /*
10088 * Turn off MSI one shot mode. Otherwise this test has no
10089 * observable way to know whether the interrupt was delivered.
10090 */
3aa1cdf8 10091 if (tg3_flag(tp, 57765_PLUS)) {
f6eb9b1f
MC
10092 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
10093 tw32(MSGINT_MODE, val);
10094 }
10095
4f125f42 10096 err = request_irq(tnapi->irq_vec, tg3_test_isr,
f274fd9a 10097 IRQF_SHARED, dev->name, tnapi);
7938109f
MC
10098 if (err)
10099 return err;
10100
898a56f8 10101 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
7938109f
MC
10102 tg3_enable_ints(tp);
10103
10104 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
fd2ce37f 10105 tnapi->coal_now);
7938109f
MC
10106
10107 for (i = 0; i < 5; i++) {
b16250e3
MC
10108 u32 int_mbox, misc_host_ctrl;
10109
898a56f8 10110 int_mbox = tr32_mailbox(tnapi->int_mbox);
b16250e3
MC
10111 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
10112
10113 if ((int_mbox != 0) ||
10114 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
10115 intr_ok = 1;
7938109f 10116 break;
b16250e3
MC
10117 }
10118
3aa1cdf8
MC
10119 if (tg3_flag(tp, 57765_PLUS) &&
10120 tnapi->hw_status->status_tag != tnapi->last_tag)
10121 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
10122
7938109f
MC
10123 msleep(10);
10124 }
10125
10126 tg3_disable_ints(tp);
10127
4f125f42 10128 free_irq(tnapi->irq_vec, tnapi);
6aa20a22 10129
4f125f42 10130 err = tg3_request_irq(tp, 0);
7938109f
MC
10131
10132 if (err)
10133 return err;
10134
f6eb9b1f
MC
10135 if (intr_ok) {
10136 /* Reenable MSI one shot mode. */
5b39de91 10137 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
f6eb9b1f
MC
10138 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
10139 tw32(MSGINT_MODE, val);
10140 }
7938109f 10141 return 0;
f6eb9b1f 10142 }
7938109f
MC
10143
10144 return -EIO;
10145}
10146
10147/* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
10148 * successfully restored
10149 */
10150static int tg3_test_msi(struct tg3 *tp)
10151{
7938109f
MC
10152 int err;
10153 u16 pci_cmd;
10154
63c3a66f 10155 if (!tg3_flag(tp, USING_MSI))
7938109f
MC
10156 return 0;
10157
10158 /* Turn off SERR reporting in case MSI terminates with Master
10159 * Abort.
10160 */
10161 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10162 pci_write_config_word(tp->pdev, PCI_COMMAND,
10163 pci_cmd & ~PCI_COMMAND_SERR);
10164
10165 err = tg3_test_interrupt(tp);
10166
10167 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10168
10169 if (!err)
10170 return 0;
10171
10172 /* other failures */
10173 if (err != -EIO)
10174 return err;
10175
10176 /* MSI test failed, go back to INTx mode */
5129c3a3
MC
10177 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
10178 "to INTx mode. Please report this failure to the PCI "
10179 "maintainer and include system chipset information\n");
7938109f 10180
4f125f42 10181 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
09943a18 10182
7938109f
MC
10183 pci_disable_msi(tp->pdev);
10184
63c3a66f 10185 tg3_flag_clear(tp, USING_MSI);
dc8bf1b1 10186 tp->napi[0].irq_vec = tp->pdev->irq;
7938109f 10187
4f125f42 10188 err = tg3_request_irq(tp, 0);
7938109f
MC
10189 if (err)
10190 return err;
10191
10192 /* Need to reset the chip because the MSI cycle may have terminated
10193 * with Master Abort.
10194 */
f47c11ee 10195 tg3_full_lock(tp, 1);
7938109f 10196
944d980e 10197 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8e7a22e3 10198 err = tg3_init_hw(tp, 1);
7938109f 10199
f47c11ee 10200 tg3_full_unlock(tp);
7938109f
MC
10201
10202 if (err)
4f125f42 10203 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
7938109f
MC
10204
10205 return err;
10206}
10207
9e9fd12d
MC
10208static int tg3_request_firmware(struct tg3 *tp)
10209{
10210 const __be32 *fw_data;
10211
10212 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
05dbe005
JP
10213 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
10214 tp->fw_needed);
9e9fd12d
MC
10215 return -ENOENT;
10216 }
10217
10218 fw_data = (void *)tp->fw->data;
10219
10220 /* Firmware blob starts with version numbers, followed by
10221 * start address and _full_ length including BSS sections
10222 * (which must be longer than the actual data, of course
10223 */
10224
10225 tp->fw_len = be32_to_cpu(fw_data[2]); /* includes bss */
10226 if (tp->fw_len < (tp->fw->size - 12)) {
05dbe005
JP
10227 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
10228 tp->fw_len, tp->fw_needed);
9e9fd12d
MC
10229 release_firmware(tp->fw);
10230 tp->fw = NULL;
10231 return -EINVAL;
10232 }
10233
10234 /* We no longer need firmware; we have it. */
10235 tp->fw_needed = NULL;
10236 return 0;
10237}
10238
9102426a 10239static u32 tg3_irq_count(struct tg3 *tp)
679563f4 10240{
9102426a 10241 u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
679563f4 10242
9102426a 10243 if (irq_cnt > 1) {
c3b5003b
MC
10244 /* We want as many rx rings enabled as there are cpus.
10245 * In multiqueue MSI-X mode, the first MSI-X vector
10246 * only deals with link interrupts, etc, so we add
10247 * one to the number of vectors we are requesting.
10248 */
9102426a 10249 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
c3b5003b 10250 }
679563f4 10251
9102426a
MC
10252 return irq_cnt;
10253}
10254
10255static bool tg3_enable_msix(struct tg3 *tp)
10256{
10257 int i, rc;
86449944 10258 struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
9102426a 10259
0968169c
MC
10260 tp->txq_cnt = tp->txq_req;
10261 tp->rxq_cnt = tp->rxq_req;
10262 if (!tp->rxq_cnt)
10263 tp->rxq_cnt = netif_get_num_default_rss_queues();
9102426a
MC
10264 if (tp->rxq_cnt > tp->rxq_max)
10265 tp->rxq_cnt = tp->rxq_max;
cf6d6ea6
MC
10266
10267 /* Disable multiple TX rings by default. Simple round-robin hardware
10268 * scheduling of the TX rings can cause starvation of rings with
10269 * small packets when other rings have TSO or jumbo packets.
10270 */
10271 if (!tp->txq_req)
10272 tp->txq_cnt = 1;
9102426a
MC
10273
10274 tp->irq_cnt = tg3_irq_count(tp);
10275
679563f4
MC
10276 for (i = 0; i < tp->irq_max; i++) {
10277 msix_ent[i].entry = i;
10278 msix_ent[i].vector = 0;
10279 }
10280
10281 rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
2430b031
MC
10282 if (rc < 0) {
10283 return false;
10284 } else if (rc != 0) {
679563f4
MC
10285 if (pci_enable_msix(tp->pdev, msix_ent, rc))
10286 return false;
05dbe005
JP
10287 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
10288 tp->irq_cnt, rc);
679563f4 10289 tp->irq_cnt = rc;
49a359e3 10290 tp->rxq_cnt = max(rc - 1, 1);
9102426a
MC
10291 if (tp->txq_cnt)
10292 tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
679563f4
MC
10293 }
10294
10295 for (i = 0; i < tp->irq_max; i++)
10296 tp->napi[i].irq_vec = msix_ent[i].vector;
10297
49a359e3 10298 if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
2ddaad39
BH
10299 pci_disable_msix(tp->pdev);
10300 return false;
10301 }
b92b9040 10302
9102426a
MC
10303 if (tp->irq_cnt == 1)
10304 return true;
d78b59f5 10305
9102426a
MC
10306 tg3_flag_set(tp, ENABLE_RSS);
10307
10308 if (tp->txq_cnt > 1)
10309 tg3_flag_set(tp, ENABLE_TSS);
10310
10311 netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
2430b031 10312
679563f4
MC
10313 return true;
10314}
10315
07b0173c
MC
10316static void tg3_ints_init(struct tg3 *tp)
10317{
63c3a66f
JP
10318 if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
10319 !tg3_flag(tp, TAGGED_STATUS)) {
07b0173c
MC
10320 /* All MSI supporting chips should support tagged
10321 * status. Assert that this is the case.
10322 */
5129c3a3
MC
10323 netdev_warn(tp->dev,
10324 "MSI without TAGGED_STATUS? Not using MSI\n");
679563f4 10325 goto defcfg;
07b0173c 10326 }
4f125f42 10327
63c3a66f
JP
10328 if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
10329 tg3_flag_set(tp, USING_MSIX);
10330 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
10331 tg3_flag_set(tp, USING_MSI);
679563f4 10332
63c3a66f 10333 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
679563f4 10334 u32 msi_mode = tr32(MSGINT_MODE);
63c3a66f 10335 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
baf8a94a 10336 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
5b39de91
MC
10337 if (!tg3_flag(tp, 1SHOT_MSI))
10338 msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
679563f4
MC
10339 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
10340 }
10341defcfg:
63c3a66f 10342 if (!tg3_flag(tp, USING_MSIX)) {
679563f4
MC
10343 tp->irq_cnt = 1;
10344 tp->napi[0].irq_vec = tp->pdev->irq;
49a359e3
MC
10345 }
10346
10347 if (tp->irq_cnt == 1) {
10348 tp->txq_cnt = 1;
10349 tp->rxq_cnt = 1;
2ddaad39 10350 netif_set_real_num_tx_queues(tp->dev, 1);
85407885 10351 netif_set_real_num_rx_queues(tp->dev, 1);
679563f4 10352 }
07b0173c
MC
10353}
10354
10355static void tg3_ints_fini(struct tg3 *tp)
10356{
63c3a66f 10357 if (tg3_flag(tp, USING_MSIX))
679563f4 10358 pci_disable_msix(tp->pdev);
63c3a66f 10359 else if (tg3_flag(tp, USING_MSI))
679563f4 10360 pci_disable_msi(tp->pdev);
63c3a66f
JP
10361 tg3_flag_clear(tp, USING_MSI);
10362 tg3_flag_clear(tp, USING_MSIX);
10363 tg3_flag_clear(tp, ENABLE_RSS);
10364 tg3_flag_clear(tp, ENABLE_TSS);
07b0173c
MC
10365}
10366
d8f4cd38 10367static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq)
1da177e4 10368{
d8f4cd38 10369 struct net_device *dev = tp->dev;
4f125f42 10370 int i, err;
1da177e4 10371
679563f4
MC
10372 /*
10373 * Setup interrupts first so we know how
10374 * many NAPI resources to allocate
10375 */
10376 tg3_ints_init(tp);
10377
90415477 10378 tg3_rss_check_indir_tbl(tp);
bcebcc46 10379
1da177e4
LT
10380 /* The placement of this call is tied
10381 * to the setup and use of Host TX descriptors.
10382 */
10383 err = tg3_alloc_consistent(tp);
10384 if (err)
679563f4 10385 goto err_out1;
88b06bc2 10386
66cfd1bd
MC
10387 tg3_napi_init(tp);
10388
fed97810 10389 tg3_napi_enable(tp);
1da177e4 10390
4f125f42
MC
10391 for (i = 0; i < tp->irq_cnt; i++) {
10392 struct tg3_napi *tnapi = &tp->napi[i];
10393 err = tg3_request_irq(tp, i);
10394 if (err) {
5bc09186
MC
10395 for (i--; i >= 0; i--) {
10396 tnapi = &tp->napi[i];
4f125f42 10397 free_irq(tnapi->irq_vec, tnapi);
5bc09186
MC
10398 }
10399 goto err_out2;
4f125f42
MC
10400 }
10401 }
1da177e4 10402
f47c11ee 10403 tg3_full_lock(tp, 0);
1da177e4 10404
d8f4cd38 10405 err = tg3_init_hw(tp, reset_phy);
1da177e4 10406 if (err) {
944d980e 10407 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
1da177e4 10408 tg3_free_rings(tp);
1da177e4
LT
10409 }
10410
f47c11ee 10411 tg3_full_unlock(tp);
1da177e4 10412
07b0173c 10413 if (err)
679563f4 10414 goto err_out3;
1da177e4 10415
d8f4cd38 10416 if (test_irq && tg3_flag(tp, USING_MSI)) {
7938109f 10417 err = tg3_test_msi(tp);
fac9b83e 10418
7938109f 10419 if (err) {
f47c11ee 10420 tg3_full_lock(tp, 0);
944d980e 10421 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7938109f 10422 tg3_free_rings(tp);
f47c11ee 10423 tg3_full_unlock(tp);
7938109f 10424
679563f4 10425 goto err_out2;
7938109f 10426 }
fcfa0a32 10427
63c3a66f 10428 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
f6eb9b1f 10429 u32 val = tr32(PCIE_TRANSACTION_CFG);
fcfa0a32 10430
f6eb9b1f
MC
10431 tw32(PCIE_TRANSACTION_CFG,
10432 val | PCIE_TRANS_CFG_1SHOT_MSI);
fcfa0a32 10433 }
7938109f
MC
10434 }
10435
b02fd9e3
MC
10436 tg3_phy_start(tp);
10437
aed93e0b
MC
10438 tg3_hwmon_open(tp);
10439
f47c11ee 10440 tg3_full_lock(tp, 0);
1da177e4 10441
21f7638e 10442 tg3_timer_start(tp);
63c3a66f 10443 tg3_flag_set(tp, INIT_COMPLETE);
1da177e4
LT
10444 tg3_enable_ints(tp);
10445
f47c11ee 10446 tg3_full_unlock(tp);
1da177e4 10447
fe5f5787 10448 netif_tx_start_all_queues(dev);
1da177e4 10449
06c03c02
MB
10450 /*
10451 * Reset loopback feature if it was turned on while the device was down
10452 * make sure that it's installed properly now.
10453 */
10454 if (dev->features & NETIF_F_LOOPBACK)
10455 tg3_set_loopback(dev, dev->features);
10456
1da177e4 10457 return 0;
07b0173c 10458
679563f4 10459err_out3:
4f125f42
MC
10460 for (i = tp->irq_cnt - 1; i >= 0; i--) {
10461 struct tg3_napi *tnapi = &tp->napi[i];
10462 free_irq(tnapi->irq_vec, tnapi);
10463 }
07b0173c 10464
679563f4 10465err_out2:
fed97810 10466 tg3_napi_disable(tp);
66cfd1bd 10467 tg3_napi_fini(tp);
07b0173c 10468 tg3_free_consistent(tp);
679563f4
MC
10469
10470err_out1:
10471 tg3_ints_fini(tp);
d8f4cd38 10472
07b0173c 10473 return err;
1da177e4
LT
10474}
10475
65138594 10476static void tg3_stop(struct tg3 *tp)
1da177e4 10477{
4f125f42 10478 int i;
1da177e4 10479
db219973 10480 tg3_reset_task_cancel(tp);
bd473da3 10481 tg3_netif_stop(tp);
1da177e4 10482
21f7638e 10483 tg3_timer_stop(tp);
1da177e4 10484
aed93e0b
MC
10485 tg3_hwmon_close(tp);
10486
24bb4fb6
MC
10487 tg3_phy_stop(tp);
10488
f47c11ee 10489 tg3_full_lock(tp, 1);
1da177e4
LT
10490
10491 tg3_disable_ints(tp);
10492
944d980e 10493 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
1da177e4 10494 tg3_free_rings(tp);
63c3a66f 10495 tg3_flag_clear(tp, INIT_COMPLETE);
1da177e4 10496
f47c11ee 10497 tg3_full_unlock(tp);
1da177e4 10498
4f125f42
MC
10499 for (i = tp->irq_cnt - 1; i >= 0; i--) {
10500 struct tg3_napi *tnapi = &tp->napi[i];
10501 free_irq(tnapi->irq_vec, tnapi);
10502 }
07b0173c
MC
10503
10504 tg3_ints_fini(tp);
1da177e4 10505
66cfd1bd
MC
10506 tg3_napi_fini(tp);
10507
1da177e4 10508 tg3_free_consistent(tp);
65138594
MC
10509}
10510
d8f4cd38
MC
10511static int tg3_open(struct net_device *dev)
10512{
10513 struct tg3 *tp = netdev_priv(dev);
10514 int err;
10515
10516 if (tp->fw_needed) {
10517 err = tg3_request_firmware(tp);
10518 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
10519 if (err)
10520 return err;
10521 } else if (err) {
10522 netdev_warn(tp->dev, "TSO capability disabled\n");
10523 tg3_flag_clear(tp, TSO_CAPABLE);
10524 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
10525 netdev_notice(tp->dev, "TSO capability restored\n");
10526 tg3_flag_set(tp, TSO_CAPABLE);
10527 }
10528 }
10529
f4a46d1f 10530 tg3_carrier_off(tp);
d8f4cd38
MC
10531
10532 err = tg3_power_up(tp);
10533 if (err)
10534 return err;
10535
10536 tg3_full_lock(tp, 0);
10537
10538 tg3_disable_ints(tp);
10539 tg3_flag_clear(tp, INIT_COMPLETE);
10540
10541 tg3_full_unlock(tp);
10542
10543 err = tg3_start(tp, true, true);
10544 if (err) {
10545 tg3_frob_aux_power(tp, false);
10546 pci_set_power_state(tp->pdev, PCI_D3hot);
10547 }
07b0173c 10548 return err;
1da177e4
LT
10549}
10550
1da177e4
LT
10551static int tg3_close(struct net_device *dev)
10552{
10553 struct tg3 *tp = netdev_priv(dev);
10554
65138594 10555 tg3_stop(tp);
1da177e4 10556
92feeabf
MC
10557 /* Clear stats across close / open calls */
10558 memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
10559 memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
1da177e4 10560
c866b7ea 10561 tg3_power_down(tp);
bc1c7567 10562
f4a46d1f 10563 tg3_carrier_off(tp);
bc1c7567 10564
1da177e4
LT
10565 return 0;
10566}
10567
511d2224 10568static inline u64 get_stat64(tg3_stat64_t *val)
816f8b86
SB
10569{
10570 return ((u64)val->high << 32) | ((u64)val->low);
10571}
10572
65ec698d 10573static u64 tg3_calc_crc_errors(struct tg3 *tp)
1da177e4
LT
10574{
10575 struct tg3_hw_stats *hw_stats = tp->hw_stats;
10576
f07e9af3 10577 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
1da177e4
LT
10578 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10579 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1da177e4
LT
10580 u32 val;
10581
569a5df8
MC
10582 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
10583 tg3_writephy(tp, MII_TG3_TEST1,
10584 val | MII_TG3_TEST1_CRC_EN);
f08aa1a8 10585 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
1da177e4
LT
10586 } else
10587 val = 0;
1da177e4
LT
10588
10589 tp->phy_crc_errors += val;
10590
10591 return tp->phy_crc_errors;
10592 }
10593
10594 return get_stat64(&hw_stats->rx_fcs_errors);
10595}
10596
10597#define ESTAT_ADD(member) \
10598 estats->member = old_estats->member + \
511d2224 10599 get_stat64(&hw_stats->member)
1da177e4 10600
65ec698d 10601static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
1da177e4 10602{
1da177e4
LT
10603 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
10604 struct tg3_hw_stats *hw_stats = tp->hw_stats;
10605
1da177e4
LT
10606 ESTAT_ADD(rx_octets);
10607 ESTAT_ADD(rx_fragments);
10608 ESTAT_ADD(rx_ucast_packets);
10609 ESTAT_ADD(rx_mcast_packets);
10610 ESTAT_ADD(rx_bcast_packets);
10611 ESTAT_ADD(rx_fcs_errors);
10612 ESTAT_ADD(rx_align_errors);
10613 ESTAT_ADD(rx_xon_pause_rcvd);
10614 ESTAT_ADD(rx_xoff_pause_rcvd);
10615 ESTAT_ADD(rx_mac_ctrl_rcvd);
10616 ESTAT_ADD(rx_xoff_entered);
10617 ESTAT_ADD(rx_frame_too_long_errors);
10618 ESTAT_ADD(rx_jabbers);
10619 ESTAT_ADD(rx_undersize_packets);
10620 ESTAT_ADD(rx_in_length_errors);
10621 ESTAT_ADD(rx_out_length_errors);
10622 ESTAT_ADD(rx_64_or_less_octet_packets);
10623 ESTAT_ADD(rx_65_to_127_octet_packets);
10624 ESTAT_ADD(rx_128_to_255_octet_packets);
10625 ESTAT_ADD(rx_256_to_511_octet_packets);
10626 ESTAT_ADD(rx_512_to_1023_octet_packets);
10627 ESTAT_ADD(rx_1024_to_1522_octet_packets);
10628 ESTAT_ADD(rx_1523_to_2047_octet_packets);
10629 ESTAT_ADD(rx_2048_to_4095_octet_packets);
10630 ESTAT_ADD(rx_4096_to_8191_octet_packets);
10631 ESTAT_ADD(rx_8192_to_9022_octet_packets);
10632
10633 ESTAT_ADD(tx_octets);
10634 ESTAT_ADD(tx_collisions);
10635 ESTAT_ADD(tx_xon_sent);
10636 ESTAT_ADD(tx_xoff_sent);
10637 ESTAT_ADD(tx_flow_control);
10638 ESTAT_ADD(tx_mac_errors);
10639 ESTAT_ADD(tx_single_collisions);
10640 ESTAT_ADD(tx_mult_collisions);
10641 ESTAT_ADD(tx_deferred);
10642 ESTAT_ADD(tx_excessive_collisions);
10643 ESTAT_ADD(tx_late_collisions);
10644 ESTAT_ADD(tx_collide_2times);
10645 ESTAT_ADD(tx_collide_3times);
10646 ESTAT_ADD(tx_collide_4times);
10647 ESTAT_ADD(tx_collide_5times);
10648 ESTAT_ADD(tx_collide_6times);
10649 ESTAT_ADD(tx_collide_7times);
10650 ESTAT_ADD(tx_collide_8times);
10651 ESTAT_ADD(tx_collide_9times);
10652 ESTAT_ADD(tx_collide_10times);
10653 ESTAT_ADD(tx_collide_11times);
10654 ESTAT_ADD(tx_collide_12times);
10655 ESTAT_ADD(tx_collide_13times);
10656 ESTAT_ADD(tx_collide_14times);
10657 ESTAT_ADD(tx_collide_15times);
10658 ESTAT_ADD(tx_ucast_packets);
10659 ESTAT_ADD(tx_mcast_packets);
10660 ESTAT_ADD(tx_bcast_packets);
10661 ESTAT_ADD(tx_carrier_sense_errors);
10662 ESTAT_ADD(tx_discards);
10663 ESTAT_ADD(tx_errors);
10664
10665 ESTAT_ADD(dma_writeq_full);
10666 ESTAT_ADD(dma_write_prioq_full);
10667 ESTAT_ADD(rxbds_empty);
10668 ESTAT_ADD(rx_discards);
10669 ESTAT_ADD(rx_errors);
10670 ESTAT_ADD(rx_threshold_hit);
10671
10672 ESTAT_ADD(dma_readq_full);
10673 ESTAT_ADD(dma_read_prioq_full);
10674 ESTAT_ADD(tx_comp_queue_full);
10675
10676 ESTAT_ADD(ring_set_send_prod_index);
10677 ESTAT_ADD(ring_status_update);
10678 ESTAT_ADD(nic_irqs);
10679 ESTAT_ADD(nic_avoided_irqs);
10680 ESTAT_ADD(nic_tx_threshold_hit);
10681
4452d099 10682 ESTAT_ADD(mbuf_lwm_thresh_hit);
1da177e4
LT
10683}
10684
65ec698d 10685static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
1da177e4 10686{
511d2224 10687 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
1da177e4
LT
10688 struct tg3_hw_stats *hw_stats = tp->hw_stats;
10689
1da177e4
LT
10690 stats->rx_packets = old_stats->rx_packets +
10691 get_stat64(&hw_stats->rx_ucast_packets) +
10692 get_stat64(&hw_stats->rx_mcast_packets) +
10693 get_stat64(&hw_stats->rx_bcast_packets);
6aa20a22 10694
1da177e4
LT
10695 stats->tx_packets = old_stats->tx_packets +
10696 get_stat64(&hw_stats->tx_ucast_packets) +
10697 get_stat64(&hw_stats->tx_mcast_packets) +
10698 get_stat64(&hw_stats->tx_bcast_packets);
10699
10700 stats->rx_bytes = old_stats->rx_bytes +
10701 get_stat64(&hw_stats->rx_octets);
10702 stats->tx_bytes = old_stats->tx_bytes +
10703 get_stat64(&hw_stats->tx_octets);
10704
10705 stats->rx_errors = old_stats->rx_errors +
4f63b877 10706 get_stat64(&hw_stats->rx_errors);
1da177e4
LT
10707 stats->tx_errors = old_stats->tx_errors +
10708 get_stat64(&hw_stats->tx_errors) +
10709 get_stat64(&hw_stats->tx_mac_errors) +
10710 get_stat64(&hw_stats->tx_carrier_sense_errors) +
10711 get_stat64(&hw_stats->tx_discards);
10712
10713 stats->multicast = old_stats->multicast +
10714 get_stat64(&hw_stats->rx_mcast_packets);
10715 stats->collisions = old_stats->collisions +
10716 get_stat64(&hw_stats->tx_collisions);
10717
10718 stats->rx_length_errors = old_stats->rx_length_errors +
10719 get_stat64(&hw_stats->rx_frame_too_long_errors) +
10720 get_stat64(&hw_stats->rx_undersize_packets);
10721
10722 stats->rx_over_errors = old_stats->rx_over_errors +
10723 get_stat64(&hw_stats->rxbds_empty);
10724 stats->rx_frame_errors = old_stats->rx_frame_errors +
10725 get_stat64(&hw_stats->rx_align_errors);
10726 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
10727 get_stat64(&hw_stats->tx_discards);
10728 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
10729 get_stat64(&hw_stats->tx_carrier_sense_errors);
10730
10731 stats->rx_crc_errors = old_stats->rx_crc_errors +
65ec698d 10732 tg3_calc_crc_errors(tp);
1da177e4 10733
4f63b877
JL
10734 stats->rx_missed_errors = old_stats->rx_missed_errors +
10735 get_stat64(&hw_stats->rx_discards);
10736
b0057c51 10737 stats->rx_dropped = tp->rx_dropped;
48855432 10738 stats->tx_dropped = tp->tx_dropped;
1da177e4
LT
10739}
10740
1da177e4
LT
10741static int tg3_get_regs_len(struct net_device *dev)
10742{
97bd8e49 10743 return TG3_REG_BLK_SIZE;
1da177e4
LT
10744}
10745
10746static void tg3_get_regs(struct net_device *dev,
10747 struct ethtool_regs *regs, void *_p)
10748{
1da177e4 10749 struct tg3 *tp = netdev_priv(dev);
1da177e4
LT
10750
10751 regs->version = 0;
10752
97bd8e49 10753 memset(_p, 0, TG3_REG_BLK_SIZE);
1da177e4 10754
80096068 10755 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
bc1c7567
MC
10756 return;
10757
f47c11ee 10758 tg3_full_lock(tp, 0);
1da177e4 10759
97bd8e49 10760 tg3_dump_legacy_regs(tp, (u32 *)_p);
1da177e4 10761
f47c11ee 10762 tg3_full_unlock(tp);
1da177e4
LT
10763}
10764
10765static int tg3_get_eeprom_len(struct net_device *dev)
10766{
10767 struct tg3 *tp = netdev_priv(dev);
10768
10769 return tp->nvram_size;
10770}
10771
1da177e4
LT
10772static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10773{
10774 struct tg3 *tp = netdev_priv(dev);
10775 int ret;
10776 u8 *pd;
b9fc7dc5 10777 u32 i, offset, len, b_offset, b_count;
a9dc529d 10778 __be32 val;
1da177e4 10779
63c3a66f 10780 if (tg3_flag(tp, NO_NVRAM))
df259d8c
MC
10781 return -EINVAL;
10782
80096068 10783 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
bc1c7567
MC
10784 return -EAGAIN;
10785
1da177e4
LT
10786 offset = eeprom->offset;
10787 len = eeprom->len;
10788 eeprom->len = 0;
10789
10790 eeprom->magic = TG3_EEPROM_MAGIC;
10791
10792 if (offset & 3) {
10793 /* adjustments to start on required 4 byte boundary */
10794 b_offset = offset & 3;
10795 b_count = 4 - b_offset;
10796 if (b_count > len) {
10797 /* i.e. offset=1 len=2 */
10798 b_count = len;
10799 }
a9dc529d 10800 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
1da177e4
LT
10801 if (ret)
10802 return ret;
be98da6a 10803 memcpy(data, ((char *)&val) + b_offset, b_count);
1da177e4
LT
10804 len -= b_count;
10805 offset += b_count;
c6cdf436 10806 eeprom->len += b_count;
1da177e4
LT
10807 }
10808
25985edc 10809 /* read bytes up to the last 4 byte boundary */
1da177e4
LT
10810 pd = &data[eeprom->len];
10811 for (i = 0; i < (len - (len & 3)); i += 4) {
a9dc529d 10812 ret = tg3_nvram_read_be32(tp, offset + i, &val);
1da177e4
LT
10813 if (ret) {
10814 eeprom->len += i;
10815 return ret;
10816 }
1da177e4
LT
10817 memcpy(pd + i, &val, 4);
10818 }
10819 eeprom->len += i;
10820
10821 if (len & 3) {
10822 /* read last bytes not ending on 4 byte boundary */
10823 pd = &data[eeprom->len];
10824 b_count = len & 3;
10825 b_offset = offset + len - b_count;
a9dc529d 10826 ret = tg3_nvram_read_be32(tp, b_offset, &val);
1da177e4
LT
10827 if (ret)
10828 return ret;
b9fc7dc5 10829 memcpy(pd, &val, b_count);
1da177e4
LT
10830 eeprom->len += b_count;
10831 }
10832 return 0;
10833}
10834
1da177e4
LT
10835static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10836{
10837 struct tg3 *tp = netdev_priv(dev);
10838 int ret;
b9fc7dc5 10839 u32 offset, len, b_offset, odd_len;
1da177e4 10840 u8 *buf;
a9dc529d 10841 __be32 start, end;
1da177e4 10842
80096068 10843 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
bc1c7567
MC
10844 return -EAGAIN;
10845
63c3a66f 10846 if (tg3_flag(tp, NO_NVRAM) ||
df259d8c 10847 eeprom->magic != TG3_EEPROM_MAGIC)
1da177e4
LT
10848 return -EINVAL;
10849
10850 offset = eeprom->offset;
10851 len = eeprom->len;
10852
10853 if ((b_offset = (offset & 3))) {
10854 /* adjustments to start on required 4 byte boundary */
a9dc529d 10855 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
1da177e4
LT
10856 if (ret)
10857 return ret;
1da177e4
LT
10858 len += b_offset;
10859 offset &= ~3;
1c8594b4
MC
10860 if (len < 4)
10861 len = 4;
1da177e4
LT
10862 }
10863
10864 odd_len = 0;
1c8594b4 10865 if (len & 3) {
1da177e4
LT
10866 /* adjustments to end on required 4 byte boundary */
10867 odd_len = 1;
10868 len = (len + 3) & ~3;
a9dc529d 10869 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
1da177e4
LT
10870 if (ret)
10871 return ret;
1da177e4
LT
10872 }
10873
10874 buf = data;
10875 if (b_offset || odd_len) {
10876 buf = kmalloc(len, GFP_KERNEL);
ab0049b4 10877 if (!buf)
1da177e4
LT
10878 return -ENOMEM;
10879 if (b_offset)
10880 memcpy(buf, &start, 4);
10881 if (odd_len)
10882 memcpy(buf+len-4, &end, 4);
10883 memcpy(buf + b_offset, data, eeprom->len);
10884 }
10885
10886 ret = tg3_nvram_write_block(tp, offset, len, buf);
10887
10888 if (buf != data)
10889 kfree(buf);
10890
10891 return ret;
10892}
10893
10894static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10895{
b02fd9e3
MC
10896 struct tg3 *tp = netdev_priv(dev);
10897
63c3a66f 10898 if (tg3_flag(tp, USE_PHYLIB)) {
3f0e3ad7 10899 struct phy_device *phydev;
f07e9af3 10900 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
b02fd9e3 10901 return -EAGAIN;
3f0e3ad7
MC
10902 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10903 return phy_ethtool_gset(phydev, cmd);
b02fd9e3 10904 }
6aa20a22 10905
1da177e4
LT
10906 cmd->supported = (SUPPORTED_Autoneg);
10907
f07e9af3 10908 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
1da177e4
LT
10909 cmd->supported |= (SUPPORTED_1000baseT_Half |
10910 SUPPORTED_1000baseT_Full);
10911
f07e9af3 10912 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
1da177e4
LT
10913 cmd->supported |= (SUPPORTED_100baseT_Half |
10914 SUPPORTED_100baseT_Full |
10915 SUPPORTED_10baseT_Half |
10916 SUPPORTED_10baseT_Full |
3bebab59 10917 SUPPORTED_TP);
ef348144
KK
10918 cmd->port = PORT_TP;
10919 } else {
1da177e4 10920 cmd->supported |= SUPPORTED_FIBRE;
ef348144
KK
10921 cmd->port = PORT_FIBRE;
10922 }
6aa20a22 10923
1da177e4 10924 cmd->advertising = tp->link_config.advertising;
5bb09778
MC
10925 if (tg3_flag(tp, PAUSE_AUTONEG)) {
10926 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
10927 if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10928 cmd->advertising |= ADVERTISED_Pause;
10929 } else {
10930 cmd->advertising |= ADVERTISED_Pause |
10931 ADVERTISED_Asym_Pause;
10932 }
10933 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10934 cmd->advertising |= ADVERTISED_Asym_Pause;
10935 }
10936 }
f4a46d1f 10937 if (netif_running(dev) && tp->link_up) {
70739497 10938 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
1da177e4 10939 cmd->duplex = tp->link_config.active_duplex;
859edb26 10940 cmd->lp_advertising = tp->link_config.rmt_adv;
e348c5e7
MC
10941 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10942 if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
10943 cmd->eth_tp_mdix = ETH_TP_MDI_X;
10944 else
10945 cmd->eth_tp_mdix = ETH_TP_MDI;
10946 }
64c22182 10947 } else {
e740522e
MC
10948 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
10949 cmd->duplex = DUPLEX_UNKNOWN;
e348c5e7 10950 cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
1da177e4 10951 }
882e9793 10952 cmd->phy_address = tp->phy_addr;
7e5856bd 10953 cmd->transceiver = XCVR_INTERNAL;
1da177e4
LT
10954 cmd->autoneg = tp->link_config.autoneg;
10955 cmd->maxtxpkt = 0;
10956 cmd->maxrxpkt = 0;
10957 return 0;
10958}
6aa20a22 10959
1da177e4
LT
10960static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10961{
10962 struct tg3 *tp = netdev_priv(dev);
25db0338 10963 u32 speed = ethtool_cmd_speed(cmd);
6aa20a22 10964
63c3a66f 10965 if (tg3_flag(tp, USE_PHYLIB)) {
3f0e3ad7 10966 struct phy_device *phydev;
f07e9af3 10967 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
b02fd9e3 10968 return -EAGAIN;
3f0e3ad7
MC
10969 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10970 return phy_ethtool_sset(phydev, cmd);
b02fd9e3
MC
10971 }
10972
7e5856bd
MC
10973 if (cmd->autoneg != AUTONEG_ENABLE &&
10974 cmd->autoneg != AUTONEG_DISABLE)
37ff238d 10975 return -EINVAL;
7e5856bd
MC
10976
10977 if (cmd->autoneg == AUTONEG_DISABLE &&
10978 cmd->duplex != DUPLEX_FULL &&
10979 cmd->duplex != DUPLEX_HALF)
37ff238d 10980 return -EINVAL;
1da177e4 10981
7e5856bd
MC
10982 if (cmd->autoneg == AUTONEG_ENABLE) {
10983 u32 mask = ADVERTISED_Autoneg |
10984 ADVERTISED_Pause |
10985 ADVERTISED_Asym_Pause;
10986
f07e9af3 10987 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
7e5856bd
MC
10988 mask |= ADVERTISED_1000baseT_Half |
10989 ADVERTISED_1000baseT_Full;
10990
f07e9af3 10991 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
7e5856bd
MC
10992 mask |= ADVERTISED_100baseT_Half |
10993 ADVERTISED_100baseT_Full |
10994 ADVERTISED_10baseT_Half |
10995 ADVERTISED_10baseT_Full |
10996 ADVERTISED_TP;
10997 else
10998 mask |= ADVERTISED_FIBRE;
10999
11000 if (cmd->advertising & ~mask)
11001 return -EINVAL;
11002
11003 mask &= (ADVERTISED_1000baseT_Half |
11004 ADVERTISED_1000baseT_Full |
11005 ADVERTISED_100baseT_Half |
11006 ADVERTISED_100baseT_Full |
11007 ADVERTISED_10baseT_Half |
11008 ADVERTISED_10baseT_Full);
11009
11010 cmd->advertising &= mask;
11011 } else {
f07e9af3 11012 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
25db0338 11013 if (speed != SPEED_1000)
7e5856bd
MC
11014 return -EINVAL;
11015
11016 if (cmd->duplex != DUPLEX_FULL)
11017 return -EINVAL;
11018 } else {
25db0338
DD
11019 if (speed != SPEED_100 &&
11020 speed != SPEED_10)
7e5856bd
MC
11021 return -EINVAL;
11022 }
11023 }
11024
f47c11ee 11025 tg3_full_lock(tp, 0);
1da177e4
LT
11026
11027 tp->link_config.autoneg = cmd->autoneg;
11028 if (cmd->autoneg == AUTONEG_ENABLE) {
405d8e5c
AG
11029 tp->link_config.advertising = (cmd->advertising |
11030 ADVERTISED_Autoneg);
e740522e
MC
11031 tp->link_config.speed = SPEED_UNKNOWN;
11032 tp->link_config.duplex = DUPLEX_UNKNOWN;
1da177e4
LT
11033 } else {
11034 tp->link_config.advertising = 0;
25db0338 11035 tp->link_config.speed = speed;
1da177e4 11036 tp->link_config.duplex = cmd->duplex;
b02fd9e3 11037 }
6aa20a22 11038
1da177e4
LT
11039 if (netif_running(dev))
11040 tg3_setup_phy(tp, 1);
11041
f47c11ee 11042 tg3_full_unlock(tp);
6aa20a22 11043
1da177e4
LT
11044 return 0;
11045}
6aa20a22 11046
1da177e4
LT
11047static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
11048{
11049 struct tg3 *tp = netdev_priv(dev);
6aa20a22 11050
68aad78c
RJ
11051 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
11052 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
11053 strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
11054 strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
1da177e4 11055}
6aa20a22 11056
1da177e4
LT
11057static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11058{
11059 struct tg3 *tp = netdev_priv(dev);
6aa20a22 11060
63c3a66f 11061 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
a85feb8c
GZ
11062 wol->supported = WAKE_MAGIC;
11063 else
11064 wol->supported = 0;
1da177e4 11065 wol->wolopts = 0;
63c3a66f 11066 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
1da177e4
LT
11067 wol->wolopts = WAKE_MAGIC;
11068 memset(&wol->sopass, 0, sizeof(wol->sopass));
11069}
6aa20a22 11070
1da177e4
LT
11071static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11072{
11073 struct tg3 *tp = netdev_priv(dev);
12dac075 11074 struct device *dp = &tp->pdev->dev;
6aa20a22 11075
1da177e4
LT
11076 if (wol->wolopts & ~WAKE_MAGIC)
11077 return -EINVAL;
11078 if ((wol->wolopts & WAKE_MAGIC) &&
63c3a66f 11079 !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
1da177e4 11080 return -EINVAL;
6aa20a22 11081
f2dc0d18
RW
11082 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
11083
f47c11ee 11084 spin_lock_bh(&tp->lock);
f2dc0d18 11085 if (device_may_wakeup(dp))
63c3a66f 11086 tg3_flag_set(tp, WOL_ENABLE);
f2dc0d18 11087 else
63c3a66f 11088 tg3_flag_clear(tp, WOL_ENABLE);
f47c11ee 11089 spin_unlock_bh(&tp->lock);
6aa20a22 11090
1da177e4
LT
11091 return 0;
11092}
6aa20a22 11093
1da177e4
LT
11094static u32 tg3_get_msglevel(struct net_device *dev)
11095{
11096 struct tg3 *tp = netdev_priv(dev);
11097 return tp->msg_enable;
11098}
6aa20a22 11099
1da177e4
LT
11100static void tg3_set_msglevel(struct net_device *dev, u32 value)
11101{
11102 struct tg3 *tp = netdev_priv(dev);
11103 tp->msg_enable = value;
11104}
6aa20a22 11105
1da177e4
LT
11106static int tg3_nway_reset(struct net_device *dev)
11107{
11108 struct tg3 *tp = netdev_priv(dev);
1da177e4 11109 int r;
6aa20a22 11110
1da177e4
LT
11111 if (!netif_running(dev))
11112 return -EAGAIN;
11113
f07e9af3 11114 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
c94e3941
MC
11115 return -EINVAL;
11116
63c3a66f 11117 if (tg3_flag(tp, USE_PHYLIB)) {
f07e9af3 11118 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
b02fd9e3 11119 return -EAGAIN;
3f0e3ad7 11120 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
b02fd9e3
MC
11121 } else {
11122 u32 bmcr;
11123
11124 spin_lock_bh(&tp->lock);
11125 r = -EINVAL;
11126 tg3_readphy(tp, MII_BMCR, &bmcr);
11127 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
11128 ((bmcr & BMCR_ANENABLE) ||
f07e9af3 11129 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
b02fd9e3
MC
11130 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
11131 BMCR_ANENABLE);
11132 r = 0;
11133 }
11134 spin_unlock_bh(&tp->lock);
1da177e4 11135 }
6aa20a22 11136
1da177e4
LT
11137 return r;
11138}
6aa20a22 11139
1da177e4
LT
11140static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
11141{
11142 struct tg3 *tp = netdev_priv(dev);
6aa20a22 11143
2c49a44d 11144 ering->rx_max_pending = tp->rx_std_ring_mask;
63c3a66f 11145 if (tg3_flag(tp, JUMBO_RING_ENABLE))
2c49a44d 11146 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
4f81c32b
MC
11147 else
11148 ering->rx_jumbo_max_pending = 0;
11149
11150 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
1da177e4
LT
11151
11152 ering->rx_pending = tp->rx_pending;
63c3a66f 11153 if (tg3_flag(tp, JUMBO_RING_ENABLE))
4f81c32b
MC
11154 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
11155 else
11156 ering->rx_jumbo_pending = 0;
11157
f3f3f27e 11158 ering->tx_pending = tp->napi[0].tx_pending;
1da177e4 11159}
6aa20a22 11160
1da177e4
LT
11161static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
11162{
11163 struct tg3 *tp = netdev_priv(dev);
646c9edd 11164 int i, irq_sync = 0, err = 0;
6aa20a22 11165
2c49a44d
MC
11166 if ((ering->rx_pending > tp->rx_std_ring_mask) ||
11167 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
bc3a9254
MC
11168 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
11169 (ering->tx_pending <= MAX_SKB_FRAGS) ||
63c3a66f 11170 (tg3_flag(tp, TSO_BUG) &&
bc3a9254 11171 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
1da177e4 11172 return -EINVAL;
6aa20a22 11173
bbe832c0 11174 if (netif_running(dev)) {
b02fd9e3 11175 tg3_phy_stop(tp);
1da177e4 11176 tg3_netif_stop(tp);
bbe832c0
MC
11177 irq_sync = 1;
11178 }
1da177e4 11179
bbe832c0 11180 tg3_full_lock(tp, irq_sync);
6aa20a22 11181
1da177e4
LT
11182 tp->rx_pending = ering->rx_pending;
11183
63c3a66f 11184 if (tg3_flag(tp, MAX_RXPEND_64) &&
1da177e4
LT
11185 tp->rx_pending > 63)
11186 tp->rx_pending = 63;
11187 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
646c9edd 11188
6fd45cb8 11189 for (i = 0; i < tp->irq_max; i++)
646c9edd 11190 tp->napi[i].tx_pending = ering->tx_pending;
1da177e4
LT
11191
11192 if (netif_running(dev)) {
944d980e 11193 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
b9ec6c1b
MC
11194 err = tg3_restart_hw(tp, 1);
11195 if (!err)
11196 tg3_netif_start(tp);
1da177e4
LT
11197 }
11198
f47c11ee 11199 tg3_full_unlock(tp);
6aa20a22 11200
b02fd9e3
MC
11201 if (irq_sync && !err)
11202 tg3_phy_start(tp);
11203
b9ec6c1b 11204 return err;
1da177e4 11205}
6aa20a22 11206
1da177e4
LT
11207static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
11208{
11209 struct tg3 *tp = netdev_priv(dev);
6aa20a22 11210
63c3a66f 11211 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
8d018621 11212
4a2db503 11213 if (tp->link_config.flowctrl & FLOW_CTRL_RX)
8d018621
MC
11214 epause->rx_pause = 1;
11215 else
11216 epause->rx_pause = 0;
11217
4a2db503 11218 if (tp->link_config.flowctrl & FLOW_CTRL_TX)
8d018621
MC
11219 epause->tx_pause = 1;
11220 else
11221 epause->tx_pause = 0;
1da177e4 11222}
6aa20a22 11223
1da177e4
LT
11224static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
11225{
11226 struct tg3 *tp = netdev_priv(dev);
b02fd9e3 11227 int err = 0;
6aa20a22 11228
63c3a66f 11229 if (tg3_flag(tp, USE_PHYLIB)) {
2712168f
MC
11230 u32 newadv;
11231 struct phy_device *phydev;
1da177e4 11232
2712168f 11233 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
f47c11ee 11234
2712168f
MC
11235 if (!(phydev->supported & SUPPORTED_Pause) ||
11236 (!(phydev->supported & SUPPORTED_Asym_Pause) &&
2259dca3 11237 (epause->rx_pause != epause->tx_pause)))
2712168f 11238 return -EINVAL;
1da177e4 11239
2712168f
MC
11240 tp->link_config.flowctrl = 0;
11241 if (epause->rx_pause) {
11242 tp->link_config.flowctrl |= FLOW_CTRL_RX;
11243
11244 if (epause->tx_pause) {
11245 tp->link_config.flowctrl |= FLOW_CTRL_TX;
11246 newadv = ADVERTISED_Pause;
b02fd9e3 11247 } else
2712168f
MC
11248 newadv = ADVERTISED_Pause |
11249 ADVERTISED_Asym_Pause;
11250 } else if (epause->tx_pause) {
11251 tp->link_config.flowctrl |= FLOW_CTRL_TX;
11252 newadv = ADVERTISED_Asym_Pause;
11253 } else
11254 newadv = 0;
11255
11256 if (epause->autoneg)
63c3a66f 11257 tg3_flag_set(tp, PAUSE_AUTONEG);
2712168f 11258 else
63c3a66f 11259 tg3_flag_clear(tp, PAUSE_AUTONEG);
2712168f 11260
f07e9af3 11261 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2712168f
MC
11262 u32 oldadv = phydev->advertising &
11263 (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
11264 if (oldadv != newadv) {
11265 phydev->advertising &=
11266 ~(ADVERTISED_Pause |
11267 ADVERTISED_Asym_Pause);
11268 phydev->advertising |= newadv;
11269 if (phydev->autoneg) {
11270 /*
11271 * Always renegotiate the link to
11272 * inform our link partner of our
11273 * flow control settings, even if the
11274 * flow control is forced. Let
11275 * tg3_adjust_link() do the final
11276 * flow control setup.
11277 */
11278 return phy_start_aneg(phydev);
b02fd9e3 11279 }
b02fd9e3 11280 }
b02fd9e3 11281
2712168f 11282 if (!epause->autoneg)
b02fd9e3 11283 tg3_setup_flow_control(tp, 0, 0);
2712168f 11284 } else {
c6700ce2 11285 tp->link_config.advertising &=
2712168f
MC
11286 ~(ADVERTISED_Pause |
11287 ADVERTISED_Asym_Pause);
c6700ce2 11288 tp->link_config.advertising |= newadv;
b02fd9e3
MC
11289 }
11290 } else {
11291 int irq_sync = 0;
11292
11293 if (netif_running(dev)) {
11294 tg3_netif_stop(tp);
11295 irq_sync = 1;
11296 }
11297
11298 tg3_full_lock(tp, irq_sync);
11299
11300 if (epause->autoneg)
63c3a66f 11301 tg3_flag_set(tp, PAUSE_AUTONEG);
b02fd9e3 11302 else
63c3a66f 11303 tg3_flag_clear(tp, PAUSE_AUTONEG);
b02fd9e3 11304 if (epause->rx_pause)
e18ce346 11305 tp->link_config.flowctrl |= FLOW_CTRL_RX;
b02fd9e3 11306 else
e18ce346 11307 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
b02fd9e3 11308 if (epause->tx_pause)
e18ce346 11309 tp->link_config.flowctrl |= FLOW_CTRL_TX;
b02fd9e3 11310 else
e18ce346 11311 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
b02fd9e3
MC
11312
11313 if (netif_running(dev)) {
11314 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11315 err = tg3_restart_hw(tp, 1);
11316 if (!err)
11317 tg3_netif_start(tp);
11318 }
11319
11320 tg3_full_unlock(tp);
11321 }
6aa20a22 11322
b9ec6c1b 11323 return err;
1da177e4 11324}
6aa20a22 11325
de6f31eb 11326static int tg3_get_sset_count(struct net_device *dev, int sset)
1da177e4 11327{
b9f2c044
JG
11328 switch (sset) {
11329 case ETH_SS_TEST:
11330 return TG3_NUM_TEST;
11331 case ETH_SS_STATS:
11332 return TG3_NUM_STATS;
11333 default:
11334 return -EOPNOTSUPP;
11335 }
4cafd3f5
MC
11336}
11337
90415477
MC
11338static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
11339 u32 *rules __always_unused)
11340{
11341 struct tg3 *tp = netdev_priv(dev);
11342
11343 if (!tg3_flag(tp, SUPPORT_MSIX))
11344 return -EOPNOTSUPP;
11345
11346 switch (info->cmd) {
11347 case ETHTOOL_GRXRINGS:
11348 if (netif_running(tp->dev))
9102426a 11349 info->data = tp->rxq_cnt;
90415477
MC
11350 else {
11351 info->data = num_online_cpus();
9102426a
MC
11352 if (info->data > TG3_RSS_MAX_NUM_QS)
11353 info->data = TG3_RSS_MAX_NUM_QS;
90415477
MC
11354 }
11355
11356 /* The first interrupt vector only
11357 * handles link interrupts.
11358 */
11359 info->data -= 1;
11360 return 0;
11361
11362 default:
11363 return -EOPNOTSUPP;
11364 }
11365}
11366
11367static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
11368{
11369 u32 size = 0;
11370 struct tg3 *tp = netdev_priv(dev);
11371
11372 if (tg3_flag(tp, SUPPORT_MSIX))
11373 size = TG3_RSS_INDIR_TBL_SIZE;
11374
11375 return size;
11376}
11377
11378static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
11379{
11380 struct tg3 *tp = netdev_priv(dev);
11381 int i;
11382
11383 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
11384 indir[i] = tp->rss_ind_tbl[i];
11385
11386 return 0;
11387}
11388
11389static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
11390{
11391 struct tg3 *tp = netdev_priv(dev);
11392 size_t i;
11393
11394 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
11395 tp->rss_ind_tbl[i] = indir[i];
11396
11397 if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
11398 return 0;
11399
11400 /* It is legal to write the indirection
11401 * table while the device is running.
11402 */
11403 tg3_full_lock(tp, 0);
11404 tg3_rss_write_indir_tbl(tp);
11405 tg3_full_unlock(tp);
11406
11407 return 0;
11408}
11409
0968169c
MC
11410static void tg3_get_channels(struct net_device *dev,
11411 struct ethtool_channels *channel)
11412{
11413 struct tg3 *tp = netdev_priv(dev);
11414 u32 deflt_qs = netif_get_num_default_rss_queues();
11415
11416 channel->max_rx = tp->rxq_max;
11417 channel->max_tx = tp->txq_max;
11418
11419 if (netif_running(dev)) {
11420 channel->rx_count = tp->rxq_cnt;
11421 channel->tx_count = tp->txq_cnt;
11422 } else {
11423 if (tp->rxq_req)
11424 channel->rx_count = tp->rxq_req;
11425 else
11426 channel->rx_count = min(deflt_qs, tp->rxq_max);
11427
11428 if (tp->txq_req)
11429 channel->tx_count = tp->txq_req;
11430 else
11431 channel->tx_count = min(deflt_qs, tp->txq_max);
11432 }
11433}
11434
11435static int tg3_set_channels(struct net_device *dev,
11436 struct ethtool_channels *channel)
11437{
11438 struct tg3 *tp = netdev_priv(dev);
11439
11440 if (!tg3_flag(tp, SUPPORT_MSIX))
11441 return -EOPNOTSUPP;
11442
11443 if (channel->rx_count > tp->rxq_max ||
11444 channel->tx_count > tp->txq_max)
11445 return -EINVAL;
11446
11447 tp->rxq_req = channel->rx_count;
11448 tp->txq_req = channel->tx_count;
11449
11450 if (!netif_running(dev))
11451 return 0;
11452
11453 tg3_stop(tp);
11454
f4a46d1f 11455 tg3_carrier_off(tp);
0968169c
MC
11456
11457 tg3_start(tp, true, false);
11458
11459 return 0;
11460}
11461
de6f31eb 11462static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
1da177e4
LT
11463{
11464 switch (stringset) {
11465 case ETH_SS_STATS:
11466 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
11467 break;
4cafd3f5
MC
11468 case ETH_SS_TEST:
11469 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
11470 break;
1da177e4
LT
11471 default:
11472 WARN_ON(1); /* we need a WARN() */
11473 break;
11474 }
11475}
11476
81b8709c 11477static int tg3_set_phys_id(struct net_device *dev,
11478 enum ethtool_phys_id_state state)
4009a93d
MC
11479{
11480 struct tg3 *tp = netdev_priv(dev);
4009a93d
MC
11481
11482 if (!netif_running(tp->dev))
11483 return -EAGAIN;
11484
81b8709c 11485 switch (state) {
11486 case ETHTOOL_ID_ACTIVE:
fce55922 11487 return 1; /* cycle on/off once per second */
4009a93d 11488
81b8709c 11489 case ETHTOOL_ID_ON:
11490 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11491 LED_CTRL_1000MBPS_ON |
11492 LED_CTRL_100MBPS_ON |
11493 LED_CTRL_10MBPS_ON |
11494 LED_CTRL_TRAFFIC_OVERRIDE |
11495 LED_CTRL_TRAFFIC_BLINK |
11496 LED_CTRL_TRAFFIC_LED);
11497 break;
6aa20a22 11498
81b8709c 11499 case ETHTOOL_ID_OFF:
11500 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11501 LED_CTRL_TRAFFIC_OVERRIDE);
11502 break;
4009a93d 11503
81b8709c 11504 case ETHTOOL_ID_INACTIVE:
11505 tw32(MAC_LED_CTRL, tp->led_ctrl);
11506 break;
4009a93d 11507 }
81b8709c 11508
4009a93d
MC
11509 return 0;
11510}
11511
de6f31eb 11512static void tg3_get_ethtool_stats(struct net_device *dev,
1da177e4
LT
11513 struct ethtool_stats *estats, u64 *tmp_stats)
11514{
11515 struct tg3 *tp = netdev_priv(dev);
0e6c9da3 11516
b546e46f
MC
11517 if (tp->hw_stats)
11518 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
11519 else
11520 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
1da177e4
LT
11521}
11522
535a490e 11523static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
c3e94500
MC
11524{
11525 int i;
11526 __be32 *buf;
11527 u32 offset = 0, len = 0;
11528 u32 magic, val;
11529
63c3a66f 11530 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
c3e94500
MC
11531 return NULL;
11532
11533 if (magic == TG3_EEPROM_MAGIC) {
11534 for (offset = TG3_NVM_DIR_START;
11535 offset < TG3_NVM_DIR_END;
11536 offset += TG3_NVM_DIRENT_SIZE) {
11537 if (tg3_nvram_read(tp, offset, &val))
11538 return NULL;
11539
11540 if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
11541 TG3_NVM_DIRTYPE_EXTVPD)
11542 break;
11543 }
11544
11545 if (offset != TG3_NVM_DIR_END) {
11546 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
11547 if (tg3_nvram_read(tp, offset + 4, &offset))
11548 return NULL;
11549
11550 offset = tg3_nvram_logical_addr(tp, offset);
11551 }
11552 }
11553
11554 if (!offset || !len) {
11555 offset = TG3_NVM_VPD_OFF;
11556 len = TG3_NVM_VPD_LEN;
11557 }
11558
11559 buf = kmalloc(len, GFP_KERNEL);
11560 if (buf == NULL)
11561 return NULL;
11562
11563 if (magic == TG3_EEPROM_MAGIC) {
11564 for (i = 0; i < len; i += 4) {
11565 /* The data is in little-endian format in NVRAM.
11566 * Use the big-endian read routines to preserve
11567 * the byte order as it exists in NVRAM.
11568 */
11569 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
11570 goto error;
11571 }
11572 } else {
11573 u8 *ptr;
11574 ssize_t cnt;
11575 unsigned int pos = 0;
11576
11577 ptr = (u8 *)&buf[0];
11578 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
11579 cnt = pci_read_vpd(tp->pdev, pos,
11580 len - pos, ptr);
11581 if (cnt == -ETIMEDOUT || cnt == -EINTR)
11582 cnt = 0;
11583 else if (cnt < 0)
11584 goto error;
11585 }
11586 if (pos != len)
11587 goto error;
11588 }
11589
535a490e
MC
11590 *vpdlen = len;
11591
c3e94500
MC
11592 return buf;
11593
11594error:
11595 kfree(buf);
11596 return NULL;
11597}
11598
566f86ad 11599#define NVRAM_TEST_SIZE 0x100
a5767dec
MC
11600#define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
11601#define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
11602#define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
727a6d9f
MC
11603#define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
11604#define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
bda18faf 11605#define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
b16250e3
MC
11606#define NVRAM_SELFBOOT_HW_SIZE 0x20
11607#define NVRAM_SELFBOOT_DATA_SIZE 0x1c
566f86ad
MC
11608
11609static int tg3_test_nvram(struct tg3 *tp)
11610{
535a490e 11611 u32 csum, magic, len;
a9dc529d 11612 __be32 *buf;
ab0049b4 11613 int i, j, k, err = 0, size;
566f86ad 11614
63c3a66f 11615 if (tg3_flag(tp, NO_NVRAM))
df259d8c
MC
11616 return 0;
11617
e4f34110 11618 if (tg3_nvram_read(tp, 0, &magic) != 0)
1b27777a
MC
11619 return -EIO;
11620
1b27777a
MC
11621 if (magic == TG3_EEPROM_MAGIC)
11622 size = NVRAM_TEST_SIZE;
b16250e3 11623 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
a5767dec
MC
11624 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
11625 TG3_EEPROM_SB_FORMAT_1) {
11626 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
11627 case TG3_EEPROM_SB_REVISION_0:
11628 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
11629 break;
11630 case TG3_EEPROM_SB_REVISION_2:
11631 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
11632 break;
11633 case TG3_EEPROM_SB_REVISION_3:
11634 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
11635 break;
727a6d9f
MC
11636 case TG3_EEPROM_SB_REVISION_4:
11637 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
11638 break;
11639 case TG3_EEPROM_SB_REVISION_5:
11640 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
11641 break;
11642 case TG3_EEPROM_SB_REVISION_6:
11643 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
11644 break;
a5767dec 11645 default:
727a6d9f 11646 return -EIO;
a5767dec
MC
11647 }
11648 } else
1b27777a 11649 return 0;
b16250e3
MC
11650 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
11651 size = NVRAM_SELFBOOT_HW_SIZE;
11652 else
1b27777a
MC
11653 return -EIO;
11654
11655 buf = kmalloc(size, GFP_KERNEL);
566f86ad
MC
11656 if (buf == NULL)
11657 return -ENOMEM;
11658
1b27777a
MC
11659 err = -EIO;
11660 for (i = 0, j = 0; i < size; i += 4, j++) {
a9dc529d
MC
11661 err = tg3_nvram_read_be32(tp, i, &buf[j]);
11662 if (err)
566f86ad 11663 break;
566f86ad 11664 }
1b27777a 11665 if (i < size)
566f86ad
MC
11666 goto out;
11667
1b27777a 11668 /* Selfboot format */
a9dc529d 11669 magic = be32_to_cpu(buf[0]);
b9fc7dc5 11670 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
b16250e3 11671 TG3_EEPROM_MAGIC_FW) {
1b27777a
MC
11672 u8 *buf8 = (u8 *) buf, csum8 = 0;
11673
b9fc7dc5 11674 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
a5767dec
MC
11675 TG3_EEPROM_SB_REVISION_2) {
11676 /* For rev 2, the csum doesn't include the MBA. */
11677 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
11678 csum8 += buf8[i];
11679 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
11680 csum8 += buf8[i];
11681 } else {
11682 for (i = 0; i < size; i++)
11683 csum8 += buf8[i];
11684 }
1b27777a 11685
ad96b485
AB
11686 if (csum8 == 0) {
11687 err = 0;
11688 goto out;
11689 }
11690
11691 err = -EIO;
11692 goto out;
1b27777a 11693 }
566f86ad 11694
b9fc7dc5 11695 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
b16250e3
MC
11696 TG3_EEPROM_MAGIC_HW) {
11697 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
a9dc529d 11698 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
b16250e3 11699 u8 *buf8 = (u8 *) buf;
b16250e3
MC
11700
11701 /* Separate the parity bits and the data bytes. */
11702 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
11703 if ((i == 0) || (i == 8)) {
11704 int l;
11705 u8 msk;
11706
11707 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
11708 parity[k++] = buf8[i] & msk;
11709 i++;
859a5887 11710 } else if (i == 16) {
b16250e3
MC
11711 int l;
11712 u8 msk;
11713
11714 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
11715 parity[k++] = buf8[i] & msk;
11716 i++;
11717
11718 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
11719 parity[k++] = buf8[i] & msk;
11720 i++;
11721 }
11722 data[j++] = buf8[i];
11723 }
11724
11725 err = -EIO;
11726 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
11727 u8 hw8 = hweight8(data[i]);
11728
11729 if ((hw8 & 0x1) && parity[i])
11730 goto out;
11731 else if (!(hw8 & 0x1) && !parity[i])
11732 goto out;
11733 }
11734 err = 0;
11735 goto out;
11736 }
11737
01c3a392
MC
11738 err = -EIO;
11739
566f86ad
MC
11740 /* Bootstrap checksum at offset 0x10 */
11741 csum = calc_crc((unsigned char *) buf, 0x10);
01c3a392 11742 if (csum != le32_to_cpu(buf[0x10/4]))
566f86ad
MC
11743 goto out;
11744
11745 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
11746 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
01c3a392 11747 if (csum != le32_to_cpu(buf[0xfc/4]))
a9dc529d 11748 goto out;
566f86ad 11749
c3e94500
MC
11750 kfree(buf);
11751
535a490e 11752 buf = tg3_vpd_readblock(tp, &len);
c3e94500
MC
11753 if (!buf)
11754 return -ENOMEM;
d4894f3e 11755
535a490e 11756 i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
d4894f3e
MC
11757 if (i > 0) {
11758 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
11759 if (j < 0)
11760 goto out;
11761
535a490e 11762 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
d4894f3e
MC
11763 goto out;
11764
11765 i += PCI_VPD_LRDT_TAG_SIZE;
11766 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
11767 PCI_VPD_RO_KEYWORD_CHKSUM);
11768 if (j > 0) {
11769 u8 csum8 = 0;
11770
11771 j += PCI_VPD_INFO_FLD_HDR_SIZE;
11772
11773 for (i = 0; i <= j; i++)
11774 csum8 += ((u8 *)buf)[i];
11775
11776 if (csum8)
11777 goto out;
11778 }
11779 }
11780
566f86ad
MC
11781 err = 0;
11782
11783out:
11784 kfree(buf);
11785 return err;
11786}
11787
ca43007a
MC
11788#define TG3_SERDES_TIMEOUT_SEC 2
11789#define TG3_COPPER_TIMEOUT_SEC 6
11790
11791static int tg3_test_link(struct tg3 *tp)
11792{
11793 int i, max;
11794
11795 if (!netif_running(tp->dev))
11796 return -ENODEV;
11797
f07e9af3 11798 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
ca43007a
MC
11799 max = TG3_SERDES_TIMEOUT_SEC;
11800 else
11801 max = TG3_COPPER_TIMEOUT_SEC;
11802
11803 for (i = 0; i < max; i++) {
f4a46d1f 11804 if (tp->link_up)
ca43007a
MC
11805 return 0;
11806
11807 if (msleep_interruptible(1000))
11808 break;
11809 }
11810
11811 return -EIO;
11812}
11813
a71116d1 11814/* Only test the commonly used registers */
30ca3e37 11815static int tg3_test_registers(struct tg3 *tp)
a71116d1 11816{
b16250e3 11817 int i, is_5705, is_5750;
a71116d1
MC
11818 u32 offset, read_mask, write_mask, val, save_val, read_val;
11819 static struct {
11820 u16 offset;
11821 u16 flags;
11822#define TG3_FL_5705 0x1
11823#define TG3_FL_NOT_5705 0x2
11824#define TG3_FL_NOT_5788 0x4
b16250e3 11825#define TG3_FL_NOT_5750 0x8
a71116d1
MC
11826 u32 read_mask;
11827 u32 write_mask;
11828 } reg_tbl[] = {
11829 /* MAC Control Registers */
11830 { MAC_MODE, TG3_FL_NOT_5705,
11831 0x00000000, 0x00ef6f8c },
11832 { MAC_MODE, TG3_FL_5705,
11833 0x00000000, 0x01ef6b8c },
11834 { MAC_STATUS, TG3_FL_NOT_5705,
11835 0x03800107, 0x00000000 },
11836 { MAC_STATUS, TG3_FL_5705,
11837 0x03800100, 0x00000000 },
11838 { MAC_ADDR_0_HIGH, 0x0000,
11839 0x00000000, 0x0000ffff },
11840 { MAC_ADDR_0_LOW, 0x0000,
c6cdf436 11841 0x00000000, 0xffffffff },
a71116d1
MC
11842 { MAC_RX_MTU_SIZE, 0x0000,
11843 0x00000000, 0x0000ffff },
11844 { MAC_TX_MODE, 0x0000,
11845 0x00000000, 0x00000070 },
11846 { MAC_TX_LENGTHS, 0x0000,
11847 0x00000000, 0x00003fff },
11848 { MAC_RX_MODE, TG3_FL_NOT_5705,
11849 0x00000000, 0x000007fc },
11850 { MAC_RX_MODE, TG3_FL_5705,
11851 0x00000000, 0x000007dc },
11852 { MAC_HASH_REG_0, 0x0000,
11853 0x00000000, 0xffffffff },
11854 { MAC_HASH_REG_1, 0x0000,
11855 0x00000000, 0xffffffff },
11856 { MAC_HASH_REG_2, 0x0000,
11857 0x00000000, 0xffffffff },
11858 { MAC_HASH_REG_3, 0x0000,
11859 0x00000000, 0xffffffff },
11860
11861 /* Receive Data and Receive BD Initiator Control Registers. */
11862 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
11863 0x00000000, 0xffffffff },
11864 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
11865 0x00000000, 0xffffffff },
11866 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
11867 0x00000000, 0x00000003 },
11868 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
11869 0x00000000, 0xffffffff },
11870 { RCVDBDI_STD_BD+0, 0x0000,
11871 0x00000000, 0xffffffff },
11872 { RCVDBDI_STD_BD+4, 0x0000,
11873 0x00000000, 0xffffffff },
11874 { RCVDBDI_STD_BD+8, 0x0000,
11875 0x00000000, 0xffff0002 },
11876 { RCVDBDI_STD_BD+0xc, 0x0000,
11877 0x00000000, 0xffffffff },
6aa20a22 11878
a71116d1
MC
11879 /* Receive BD Initiator Control Registers. */
11880 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
11881 0x00000000, 0xffffffff },
11882 { RCVBDI_STD_THRESH, TG3_FL_5705,
11883 0x00000000, 0x000003ff },
11884 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
11885 0x00000000, 0xffffffff },
6aa20a22 11886
a71116d1
MC
11887 /* Host Coalescing Control Registers. */
11888 { HOSTCC_MODE, TG3_FL_NOT_5705,
11889 0x00000000, 0x00000004 },
11890 { HOSTCC_MODE, TG3_FL_5705,
11891 0x00000000, 0x000000f6 },
11892 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
11893 0x00000000, 0xffffffff },
11894 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
11895 0x00000000, 0x000003ff },
11896 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
11897 0x00000000, 0xffffffff },
11898 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
11899 0x00000000, 0x000003ff },
11900 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
11901 0x00000000, 0xffffffff },
11902 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11903 0x00000000, 0x000000ff },
11904 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
11905 0x00000000, 0xffffffff },
11906 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11907 0x00000000, 0x000000ff },
11908 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
11909 0x00000000, 0xffffffff },
11910 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
11911 0x00000000, 0xffffffff },
11912 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11913 0x00000000, 0xffffffff },
11914 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11915 0x00000000, 0x000000ff },
11916 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11917 0x00000000, 0xffffffff },
11918 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11919 0x00000000, 0x000000ff },
11920 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
11921 0x00000000, 0xffffffff },
11922 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
11923 0x00000000, 0xffffffff },
11924 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
11925 0x00000000, 0xffffffff },
11926 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
11927 0x00000000, 0xffffffff },
11928 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
11929 0x00000000, 0xffffffff },
11930 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
11931 0xffffffff, 0x00000000 },
11932 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
11933 0xffffffff, 0x00000000 },
11934
11935 /* Buffer Manager Control Registers. */
b16250e3 11936 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
a71116d1 11937 0x00000000, 0x007fff80 },
b16250e3 11938 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
a71116d1
MC
11939 0x00000000, 0x007fffff },
11940 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
11941 0x00000000, 0x0000003f },
11942 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
11943 0x00000000, 0x000001ff },
11944 { BUFMGR_MB_HIGH_WATER, 0x0000,
11945 0x00000000, 0x000001ff },
11946 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
11947 0xffffffff, 0x00000000 },
11948 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
11949 0xffffffff, 0x00000000 },
6aa20a22 11950
a71116d1
MC
11951 /* Mailbox Registers */
11952 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
11953 0x00000000, 0x000001ff },
11954 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
11955 0x00000000, 0x000001ff },
11956 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
11957 0x00000000, 0x000007ff },
11958 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
11959 0x00000000, 0x000001ff },
11960
11961 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
11962 };
11963
b16250e3 11964 is_5705 = is_5750 = 0;
63c3a66f 11965 if (tg3_flag(tp, 5705_PLUS)) {
a71116d1 11966 is_5705 = 1;
63c3a66f 11967 if (tg3_flag(tp, 5750_PLUS))
b16250e3
MC
11968 is_5750 = 1;
11969 }
a71116d1
MC
11970
11971 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
11972 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
11973 continue;
11974
11975 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
11976 continue;
11977
63c3a66f 11978 if (tg3_flag(tp, IS_5788) &&
a71116d1
MC
11979 (reg_tbl[i].flags & TG3_FL_NOT_5788))
11980 continue;
11981
b16250e3
MC
11982 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
11983 continue;
11984
a71116d1
MC
11985 offset = (u32) reg_tbl[i].offset;
11986 read_mask = reg_tbl[i].read_mask;
11987 write_mask = reg_tbl[i].write_mask;
11988
11989 /* Save the original register content */
11990 save_val = tr32(offset);
11991
11992 /* Determine the read-only value. */
11993 read_val = save_val & read_mask;
11994
11995 /* Write zero to the register, then make sure the read-only bits
11996 * are not changed and the read/write bits are all zeros.
11997 */
11998 tw32(offset, 0);
11999
12000 val = tr32(offset);
12001
12002 /* Test the read-only and read/write bits. */
12003 if (((val & read_mask) != read_val) || (val & write_mask))
12004 goto out;
12005
12006 /* Write ones to all the bits defined by RdMask and WrMask, then
12007 * make sure the read-only bits are not changed and the
12008 * read/write bits are all ones.
12009 */
12010 tw32(offset, read_mask | write_mask);
12011
12012 val = tr32(offset);
12013
12014 /* Test the read-only bits. */
12015 if ((val & read_mask) != read_val)
12016 goto out;
12017
12018 /* Test the read/write bits. */
12019 if ((val & write_mask) != write_mask)
12020 goto out;
12021
12022 tw32(offset, save_val);
12023 }
12024
12025 return 0;
12026
12027out:
9f88f29f 12028 if (netif_msg_hw(tp))
2445e461
MC
12029 netdev_err(tp->dev,
12030 "Register test failed at offset %x\n", offset);
a71116d1
MC
12031 tw32(offset, save_val);
12032 return -EIO;
12033}
12034
7942e1db
MC
12035static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
12036{
f71e1309 12037 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
7942e1db
MC
12038 int i;
12039 u32 j;
12040
e9edda69 12041 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
7942e1db
MC
12042 for (j = 0; j < len; j += 4) {
12043 u32 val;
12044
12045 tg3_write_mem(tp, offset + j, test_pattern[i]);
12046 tg3_read_mem(tp, offset + j, &val);
12047 if (val != test_pattern[i])
12048 return -EIO;
12049 }
12050 }
12051 return 0;
12052}
12053
12054static int tg3_test_memory(struct tg3 *tp)
12055{
12056 static struct mem_entry {
12057 u32 offset;
12058 u32 len;
12059 } mem_tbl_570x[] = {
38690194 12060 { 0x00000000, 0x00b50},
7942e1db
MC
12061 { 0x00002000, 0x1c000},
12062 { 0xffffffff, 0x00000}
12063 }, mem_tbl_5705[] = {
12064 { 0x00000100, 0x0000c},
12065 { 0x00000200, 0x00008},
7942e1db
MC
12066 { 0x00004000, 0x00800},
12067 { 0x00006000, 0x01000},
12068 { 0x00008000, 0x02000},
12069 { 0x00010000, 0x0e000},
12070 { 0xffffffff, 0x00000}
79f4d13a
MC
12071 }, mem_tbl_5755[] = {
12072 { 0x00000200, 0x00008},
12073 { 0x00004000, 0x00800},
12074 { 0x00006000, 0x00800},
12075 { 0x00008000, 0x02000},
12076 { 0x00010000, 0x0c000},
12077 { 0xffffffff, 0x00000}
b16250e3
MC
12078 }, mem_tbl_5906[] = {
12079 { 0x00000200, 0x00008},
12080 { 0x00004000, 0x00400},
12081 { 0x00006000, 0x00400},
12082 { 0x00008000, 0x01000},
12083 { 0x00010000, 0x01000},
12084 { 0xffffffff, 0x00000}
8b5a6c42
MC
12085 }, mem_tbl_5717[] = {
12086 { 0x00000200, 0x00008},
12087 { 0x00010000, 0x0a000},
12088 { 0x00020000, 0x13c00},
12089 { 0xffffffff, 0x00000}
12090 }, mem_tbl_57765[] = {
12091 { 0x00000200, 0x00008},
12092 { 0x00004000, 0x00800},
12093 { 0x00006000, 0x09800},
12094 { 0x00010000, 0x0a000},
12095 { 0xffffffff, 0x00000}
7942e1db
MC
12096 };
12097 struct mem_entry *mem_tbl;
12098 int err = 0;
12099 int i;
12100
63c3a66f 12101 if (tg3_flag(tp, 5717_PLUS))
8b5a6c42 12102 mem_tbl = mem_tbl_5717;
55086ad9 12103 else if (tg3_flag(tp, 57765_CLASS))
8b5a6c42 12104 mem_tbl = mem_tbl_57765;
63c3a66f 12105 else if (tg3_flag(tp, 5755_PLUS))
321d32a0
MC
12106 mem_tbl = mem_tbl_5755;
12107 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12108 mem_tbl = mem_tbl_5906;
63c3a66f 12109 else if (tg3_flag(tp, 5705_PLUS))
321d32a0
MC
12110 mem_tbl = mem_tbl_5705;
12111 else
7942e1db
MC
12112 mem_tbl = mem_tbl_570x;
12113
12114 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
be98da6a
MC
12115 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
12116 if (err)
7942e1db
MC
12117 break;
12118 }
6aa20a22 12119
7942e1db
MC
12120 return err;
12121}
12122
bb158d69
MC
12123#define TG3_TSO_MSS 500
12124
12125#define TG3_TSO_IP_HDR_LEN 20
12126#define TG3_TSO_TCP_HDR_LEN 20
12127#define TG3_TSO_TCP_OPT_LEN 12
12128
12129static const u8 tg3_tso_header[] = {
121300x08, 0x00,
121310x45, 0x00, 0x00, 0x00,
121320x00, 0x00, 0x40, 0x00,
121330x40, 0x06, 0x00, 0x00,
121340x0a, 0x00, 0x00, 0x01,
121350x0a, 0x00, 0x00, 0x02,
121360x0d, 0x00, 0xe0, 0x00,
121370x00, 0x00, 0x01, 0x00,
121380x00, 0x00, 0x02, 0x00,
121390x80, 0x10, 0x10, 0x00,
121400x14, 0x09, 0x00, 0x00,
121410x01, 0x01, 0x08, 0x0a,
121420x11, 0x11, 0x11, 0x11,
121430x11, 0x11, 0x11, 0x11,
12144};
9f40dead 12145
28a45957 12146static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
c76949a6 12147{
5e5a7f37 12148 u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
bb158d69 12149 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
84b67b27 12150 u32 budget;
9205fd9c
ED
12151 struct sk_buff *skb;
12152 u8 *tx_data, *rx_data;
c76949a6
MC
12153 dma_addr_t map;
12154 int num_pkts, tx_len, rx_len, i, err;
12155 struct tg3_rx_buffer_desc *desc;
898a56f8 12156 struct tg3_napi *tnapi, *rnapi;
8fea32b9 12157 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
c76949a6 12158
c8873405
MC
12159 tnapi = &tp->napi[0];
12160 rnapi = &tp->napi[0];
0c1d0e2b 12161 if (tp->irq_cnt > 1) {
63c3a66f 12162 if (tg3_flag(tp, ENABLE_RSS))
1da85aa3 12163 rnapi = &tp->napi[1];
63c3a66f 12164 if (tg3_flag(tp, ENABLE_TSS))
c8873405 12165 tnapi = &tp->napi[1];
0c1d0e2b 12166 }
fd2ce37f 12167 coal_now = tnapi->coal_now | rnapi->coal_now;
898a56f8 12168
c76949a6
MC
12169 err = -EIO;
12170
4852a861 12171 tx_len = pktsz;
a20e9c62 12172 skb = netdev_alloc_skb(tp->dev, tx_len);
a50bb7b9
JJ
12173 if (!skb)
12174 return -ENOMEM;
12175
c76949a6
MC
12176 tx_data = skb_put(skb, tx_len);
12177 memcpy(tx_data, tp->dev->dev_addr, 6);
12178 memset(tx_data + 6, 0x0, 8);
12179
4852a861 12180 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
c76949a6 12181
28a45957 12182 if (tso_loopback) {
bb158d69
MC
12183 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
12184
12185 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
12186 TG3_TSO_TCP_OPT_LEN;
12187
12188 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
12189 sizeof(tg3_tso_header));
12190 mss = TG3_TSO_MSS;
12191
12192 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
12193 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
12194
12195 /* Set the total length field in the IP header */
12196 iph->tot_len = htons((u16)(mss + hdr_len));
12197
12198 base_flags = (TXD_FLAG_CPU_PRE_DMA |
12199 TXD_FLAG_CPU_POST_DMA);
12200
63c3a66f
JP
12201 if (tg3_flag(tp, HW_TSO_1) ||
12202 tg3_flag(tp, HW_TSO_2) ||
12203 tg3_flag(tp, HW_TSO_3)) {
bb158d69
MC
12204 struct tcphdr *th;
12205 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
12206 th = (struct tcphdr *)&tx_data[val];
12207 th->check = 0;
12208 } else
12209 base_flags |= TXD_FLAG_TCPUDP_CSUM;
12210
63c3a66f 12211 if (tg3_flag(tp, HW_TSO_3)) {
bb158d69
MC
12212 mss |= (hdr_len & 0xc) << 12;
12213 if (hdr_len & 0x10)
12214 base_flags |= 0x00000010;
12215 base_flags |= (hdr_len & 0x3e0) << 5;
63c3a66f 12216 } else if (tg3_flag(tp, HW_TSO_2))
bb158d69 12217 mss |= hdr_len << 9;
63c3a66f 12218 else if (tg3_flag(tp, HW_TSO_1) ||
bb158d69
MC
12219 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
12220 mss |= (TG3_TSO_TCP_OPT_LEN << 9);
12221 } else {
12222 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
12223 }
12224
12225 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
12226 } else {
12227 num_pkts = 1;
12228 data_off = ETH_HLEN;
c441b456
MC
12229
12230 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
12231 tx_len > VLAN_ETH_FRAME_LEN)
12232 base_flags |= TXD_FLAG_JMB_PKT;
bb158d69
MC
12233 }
12234
12235 for (i = data_off; i < tx_len; i++)
c76949a6
MC
12236 tx_data[i] = (u8) (i & 0xff);
12237
f4188d8a
AD
12238 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
12239 if (pci_dma_mapping_error(tp->pdev, map)) {
a21771dd
MC
12240 dev_kfree_skb(skb);
12241 return -EIO;
12242 }
c76949a6 12243
0d681b27
MC
12244 val = tnapi->tx_prod;
12245 tnapi->tx_buffers[val].skb = skb;
12246 dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
12247
c76949a6 12248 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
fd2ce37f 12249 rnapi->coal_now);
c76949a6
MC
12250
12251 udelay(10);
12252
898a56f8 12253 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
c76949a6 12254
84b67b27
MC
12255 budget = tg3_tx_avail(tnapi);
12256 if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
d1a3b737
MC
12257 base_flags | TXD_FLAG_END, mss, 0)) {
12258 tnapi->tx_buffers[val].skb = NULL;
12259 dev_kfree_skb(skb);
12260 return -EIO;
12261 }
c76949a6 12262
f3f3f27e 12263 tnapi->tx_prod++;
c76949a6 12264
6541b806
MC
12265 /* Sync BD data before updating mailbox */
12266 wmb();
12267
f3f3f27e
MC
12268 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
12269 tr32_mailbox(tnapi->prodmbox);
c76949a6
MC
12270
12271 udelay(10);
12272
303fc921
MC
12273 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
12274 for (i = 0; i < 35; i++) {
c76949a6 12275 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
fd2ce37f 12276 coal_now);
c76949a6
MC
12277
12278 udelay(10);
12279
898a56f8
MC
12280 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
12281 rx_idx = rnapi->hw_status->idx[0].rx_producer;
f3f3f27e 12282 if ((tx_idx == tnapi->tx_prod) &&
c76949a6
MC
12283 (rx_idx == (rx_start_idx + num_pkts)))
12284 break;
12285 }
12286
ba1142e4 12287 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
c76949a6
MC
12288 dev_kfree_skb(skb);
12289
f3f3f27e 12290 if (tx_idx != tnapi->tx_prod)
c76949a6
MC
12291 goto out;
12292
12293 if (rx_idx != rx_start_idx + num_pkts)
12294 goto out;
12295
bb158d69
MC
12296 val = data_off;
12297 while (rx_idx != rx_start_idx) {
12298 desc = &rnapi->rx_rcb[rx_start_idx++];
12299 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
12300 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
c76949a6 12301
bb158d69
MC
12302 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
12303 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
12304 goto out;
c76949a6 12305
bb158d69
MC
12306 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
12307 - ETH_FCS_LEN;
c76949a6 12308
28a45957 12309 if (!tso_loopback) {
bb158d69
MC
12310 if (rx_len != tx_len)
12311 goto out;
4852a861 12312
bb158d69
MC
12313 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
12314 if (opaque_key != RXD_OPAQUE_RING_STD)
12315 goto out;
12316 } else {
12317 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
12318 goto out;
12319 }
12320 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
12321 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
54e0a67f 12322 >> RXD_TCPCSUM_SHIFT != 0xffff) {
4852a861 12323 goto out;
bb158d69 12324 }
4852a861 12325
bb158d69 12326 if (opaque_key == RXD_OPAQUE_RING_STD) {
9205fd9c 12327 rx_data = tpr->rx_std_buffers[desc_idx].data;
bb158d69
MC
12328 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
12329 mapping);
12330 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
9205fd9c 12331 rx_data = tpr->rx_jmb_buffers[desc_idx].data;
bb158d69
MC
12332 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
12333 mapping);
12334 } else
12335 goto out;
c76949a6 12336
bb158d69
MC
12337 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
12338 PCI_DMA_FROMDEVICE);
c76949a6 12339
9205fd9c 12340 rx_data += TG3_RX_OFFSET(tp);
bb158d69 12341 for (i = data_off; i < rx_len; i++, val++) {
9205fd9c 12342 if (*(rx_data + i) != (u8) (val & 0xff))
bb158d69
MC
12343 goto out;
12344 }
c76949a6 12345 }
bb158d69 12346
c76949a6 12347 err = 0;
6aa20a22 12348
9205fd9c 12349 /* tg3_free_rings will unmap and free the rx_data */
c76949a6
MC
12350out:
12351 return err;
12352}
12353
00c266b7
MC
12354#define TG3_STD_LOOPBACK_FAILED 1
12355#define TG3_JMB_LOOPBACK_FAILED 2
bb158d69 12356#define TG3_TSO_LOOPBACK_FAILED 4
28a45957
MC
12357#define TG3_LOOPBACK_FAILED \
12358 (TG3_STD_LOOPBACK_FAILED | \
12359 TG3_JMB_LOOPBACK_FAILED | \
12360 TG3_TSO_LOOPBACK_FAILED)
00c266b7 12361
941ec90f 12362static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
9f40dead 12363{
28a45957 12364 int err = -EIO;
2215e24c 12365 u32 eee_cap;
c441b456
MC
12366 u32 jmb_pkt_sz = 9000;
12367
12368 if (tp->dma_limit)
12369 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
9f40dead 12370
ab789046
MC
12371 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
12372 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
12373
28a45957 12374 if (!netif_running(tp->dev)) {
93df8b8f
NNS
12375 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12376 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
941ec90f 12377 if (do_extlpbk)
93df8b8f 12378 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
28a45957
MC
12379 goto done;
12380 }
12381
b9ec6c1b 12382 err = tg3_reset_hw(tp, 1);
ab789046 12383 if (err) {
93df8b8f
NNS
12384 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12385 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
941ec90f 12386 if (do_extlpbk)
93df8b8f 12387 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
ab789046
MC
12388 goto done;
12389 }
9f40dead 12390
63c3a66f 12391 if (tg3_flag(tp, ENABLE_RSS)) {
4a85f098
MC
12392 int i;
12393
12394 /* Reroute all rx packets to the 1st queue */
12395 for (i = MAC_RSS_INDIR_TBL_0;
12396 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
12397 tw32(i, 0x0);
12398 }
12399
6e01b20b
MC
12400 /* HW errata - mac loopback fails in some cases on 5780.
12401 * Normal traffic and PHY loopback are not affected by
12402 * errata. Also, the MAC loopback test is deprecated for
12403 * all newer ASIC revisions.
12404 */
12405 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
12406 !tg3_flag(tp, CPMU_PRESENT)) {
12407 tg3_mac_loopback(tp, true);
9936bcf6 12408
28a45957 12409 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
93df8b8f 12410 data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
6e01b20b
MC
12411
12412 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
c441b456 12413 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
93df8b8f 12414 data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
6e01b20b
MC
12415
12416 tg3_mac_loopback(tp, false);
12417 }
4852a861 12418
f07e9af3 12419 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
63c3a66f 12420 !tg3_flag(tp, USE_PHYLIB)) {
5e5a7f37
MC
12421 int i;
12422
941ec90f 12423 tg3_phy_lpbk_set(tp, 0, false);
5e5a7f37
MC
12424
12425 /* Wait for link */
12426 for (i = 0; i < 100; i++) {
12427 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
12428 break;
12429 mdelay(1);
12430 }
12431
28a45957 12432 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
93df8b8f 12433 data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
63c3a66f 12434 if (tg3_flag(tp, TSO_CAPABLE) &&
28a45957 12435 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
93df8b8f 12436 data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
63c3a66f 12437 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
c441b456 12438 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
93df8b8f 12439 data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
9f40dead 12440
941ec90f
MC
12441 if (do_extlpbk) {
12442 tg3_phy_lpbk_set(tp, 0, true);
12443
12444 /* All link indications report up, but the hardware
12445 * isn't really ready for about 20 msec. Double it
12446 * to be sure.
12447 */
12448 mdelay(40);
12449
12450 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
93df8b8f
NNS
12451 data[TG3_EXT_LOOPB_TEST] |=
12452 TG3_STD_LOOPBACK_FAILED;
941ec90f
MC
12453 if (tg3_flag(tp, TSO_CAPABLE) &&
12454 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
93df8b8f
NNS
12455 data[TG3_EXT_LOOPB_TEST] |=
12456 TG3_TSO_LOOPBACK_FAILED;
941ec90f 12457 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
c441b456 12458 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
93df8b8f
NNS
12459 data[TG3_EXT_LOOPB_TEST] |=
12460 TG3_JMB_LOOPBACK_FAILED;
941ec90f
MC
12461 }
12462
5e5a7f37
MC
12463 /* Re-enable gphy autopowerdown. */
12464 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
12465 tg3_phy_toggle_apd(tp, true);
12466 }
6833c043 12467
93df8b8f
NNS
12468 err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
12469 data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
28a45957 12470
ab789046
MC
12471done:
12472 tp->phy_flags |= eee_cap;
12473
9f40dead
MC
12474 return err;
12475}
12476
4cafd3f5
MC
12477static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
12478 u64 *data)
12479{
566f86ad 12480 struct tg3 *tp = netdev_priv(dev);
941ec90f 12481 bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
566f86ad 12482
bed9829f
MC
12483 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
12484 tg3_power_up(tp)) {
12485 etest->flags |= ETH_TEST_FL_FAILED;
12486 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
12487 return;
12488 }
bc1c7567 12489
566f86ad
MC
12490 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
12491
12492 if (tg3_test_nvram(tp) != 0) {
12493 etest->flags |= ETH_TEST_FL_FAILED;
93df8b8f 12494 data[TG3_NVRAM_TEST] = 1;
566f86ad 12495 }
941ec90f 12496 if (!doextlpbk && tg3_test_link(tp)) {
ca43007a 12497 etest->flags |= ETH_TEST_FL_FAILED;
93df8b8f 12498 data[TG3_LINK_TEST] = 1;
ca43007a 12499 }
a71116d1 12500 if (etest->flags & ETH_TEST_FL_OFFLINE) {
b02fd9e3 12501 int err, err2 = 0, irq_sync = 0;
bbe832c0
MC
12502
12503 if (netif_running(dev)) {
b02fd9e3 12504 tg3_phy_stop(tp);
a71116d1 12505 tg3_netif_stop(tp);
bbe832c0
MC
12506 irq_sync = 1;
12507 }
a71116d1 12508
bbe832c0 12509 tg3_full_lock(tp, irq_sync);
a71116d1
MC
12510
12511 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
ec41c7df 12512 err = tg3_nvram_lock(tp);
a71116d1 12513 tg3_halt_cpu(tp, RX_CPU_BASE);
63c3a66f 12514 if (!tg3_flag(tp, 5705_PLUS))
a71116d1 12515 tg3_halt_cpu(tp, TX_CPU_BASE);
ec41c7df
MC
12516 if (!err)
12517 tg3_nvram_unlock(tp);
a71116d1 12518
f07e9af3 12519 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
d9ab5ad1
MC
12520 tg3_phy_reset(tp);
12521
a71116d1
MC
12522 if (tg3_test_registers(tp) != 0) {
12523 etest->flags |= ETH_TEST_FL_FAILED;
93df8b8f 12524 data[TG3_REGISTER_TEST] = 1;
a71116d1 12525 }
28a45957 12526
7942e1db
MC
12527 if (tg3_test_memory(tp) != 0) {
12528 etest->flags |= ETH_TEST_FL_FAILED;
93df8b8f 12529 data[TG3_MEMORY_TEST] = 1;
7942e1db 12530 }
28a45957 12531
941ec90f
MC
12532 if (doextlpbk)
12533 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
12534
93df8b8f 12535 if (tg3_test_loopback(tp, data, doextlpbk))
c76949a6 12536 etest->flags |= ETH_TEST_FL_FAILED;
a71116d1 12537
f47c11ee
DM
12538 tg3_full_unlock(tp);
12539
d4bc3927
MC
12540 if (tg3_test_interrupt(tp) != 0) {
12541 etest->flags |= ETH_TEST_FL_FAILED;
93df8b8f 12542 data[TG3_INTERRUPT_TEST] = 1;
d4bc3927 12543 }
f47c11ee
DM
12544
12545 tg3_full_lock(tp, 0);
d4bc3927 12546
a71116d1
MC
12547 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12548 if (netif_running(dev)) {
63c3a66f 12549 tg3_flag_set(tp, INIT_COMPLETE);
b02fd9e3
MC
12550 err2 = tg3_restart_hw(tp, 1);
12551 if (!err2)
b9ec6c1b 12552 tg3_netif_start(tp);
a71116d1 12553 }
f47c11ee
DM
12554
12555 tg3_full_unlock(tp);
b02fd9e3
MC
12556
12557 if (irq_sync && !err2)
12558 tg3_phy_start(tp);
a71116d1 12559 }
80096068 12560 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
c866b7ea 12561 tg3_power_down(tp);
bc1c7567 12562
4cafd3f5
MC
12563}
12564
1da177e4
LT
12565static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
12566{
12567 struct mii_ioctl_data *data = if_mii(ifr);
12568 struct tg3 *tp = netdev_priv(dev);
12569 int err;
12570
63c3a66f 12571 if (tg3_flag(tp, USE_PHYLIB)) {
3f0e3ad7 12572 struct phy_device *phydev;
f07e9af3 12573 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
b02fd9e3 12574 return -EAGAIN;
3f0e3ad7 12575 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
28b04113 12576 return phy_mii_ioctl(phydev, ifr, cmd);
b02fd9e3
MC
12577 }
12578
33f401ae 12579 switch (cmd) {
1da177e4 12580 case SIOCGMIIPHY:
882e9793 12581 data->phy_id = tp->phy_addr;
1da177e4
LT
12582
12583 /* fallthru */
12584 case SIOCGMIIREG: {
12585 u32 mii_regval;
12586
f07e9af3 12587 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
1da177e4
LT
12588 break; /* We have no PHY */
12589
34eea5ac 12590 if (!netif_running(dev))
bc1c7567
MC
12591 return -EAGAIN;
12592
f47c11ee 12593 spin_lock_bh(&tp->lock);
1da177e4 12594 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
f47c11ee 12595 spin_unlock_bh(&tp->lock);
1da177e4
LT
12596
12597 data->val_out = mii_regval;
12598
12599 return err;
12600 }
12601
12602 case SIOCSMIIREG:
f07e9af3 12603 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
1da177e4
LT
12604 break; /* We have no PHY */
12605
34eea5ac 12606 if (!netif_running(dev))
bc1c7567
MC
12607 return -EAGAIN;
12608
f47c11ee 12609 spin_lock_bh(&tp->lock);
1da177e4 12610 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
f47c11ee 12611 spin_unlock_bh(&tp->lock);
1da177e4
LT
12612
12613 return err;
12614
12615 default:
12616 /* do nothing */
12617 break;
12618 }
12619 return -EOPNOTSUPP;
12620}
12621
15f9850d
DM
12622static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
12623{
12624 struct tg3 *tp = netdev_priv(dev);
12625
12626 memcpy(ec, &tp->coal, sizeof(*ec));
12627 return 0;
12628}
12629
d244c892
MC
12630static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
12631{
12632 struct tg3 *tp = netdev_priv(dev);
12633 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
12634 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
12635
63c3a66f 12636 if (!tg3_flag(tp, 5705_PLUS)) {
d244c892
MC
12637 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
12638 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
12639 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
12640 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
12641 }
12642
12643 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
12644 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
12645 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
12646 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
12647 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
12648 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
12649 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
12650 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
12651 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
12652 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
12653 return -EINVAL;
12654
12655 /* No rx interrupts will be generated if both are zero */
12656 if ((ec->rx_coalesce_usecs == 0) &&
12657 (ec->rx_max_coalesced_frames == 0))
12658 return -EINVAL;
12659
12660 /* No tx interrupts will be generated if both are zero */
12661 if ((ec->tx_coalesce_usecs == 0) &&
12662 (ec->tx_max_coalesced_frames == 0))
12663 return -EINVAL;
12664
12665 /* Only copy relevant parameters, ignore all others. */
12666 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
12667 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
12668 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
12669 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
12670 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
12671 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
12672 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
12673 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
12674 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
12675
12676 if (netif_running(dev)) {
12677 tg3_full_lock(tp, 0);
12678 __tg3_set_coalesce(tp, &tp->coal);
12679 tg3_full_unlock(tp);
12680 }
12681 return 0;
12682}
12683
7282d491 12684static const struct ethtool_ops tg3_ethtool_ops = {
1da177e4
LT
12685 .get_settings = tg3_get_settings,
12686 .set_settings = tg3_set_settings,
12687 .get_drvinfo = tg3_get_drvinfo,
12688 .get_regs_len = tg3_get_regs_len,
12689 .get_regs = tg3_get_regs,
12690 .get_wol = tg3_get_wol,
12691 .set_wol = tg3_set_wol,
12692 .get_msglevel = tg3_get_msglevel,
12693 .set_msglevel = tg3_set_msglevel,
12694 .nway_reset = tg3_nway_reset,
12695 .get_link = ethtool_op_get_link,
12696 .get_eeprom_len = tg3_get_eeprom_len,
12697 .get_eeprom = tg3_get_eeprom,
12698 .set_eeprom = tg3_set_eeprom,
12699 .get_ringparam = tg3_get_ringparam,
12700 .set_ringparam = tg3_set_ringparam,
12701 .get_pauseparam = tg3_get_pauseparam,
12702 .set_pauseparam = tg3_set_pauseparam,
4cafd3f5 12703 .self_test = tg3_self_test,
1da177e4 12704 .get_strings = tg3_get_strings,
81b8709c 12705 .set_phys_id = tg3_set_phys_id,
1da177e4 12706 .get_ethtool_stats = tg3_get_ethtool_stats,
15f9850d 12707 .get_coalesce = tg3_get_coalesce,
d244c892 12708 .set_coalesce = tg3_set_coalesce,
b9f2c044 12709 .get_sset_count = tg3_get_sset_count,
90415477
MC
12710 .get_rxnfc = tg3_get_rxnfc,
12711 .get_rxfh_indir_size = tg3_get_rxfh_indir_size,
12712 .get_rxfh_indir = tg3_get_rxfh_indir,
12713 .set_rxfh_indir = tg3_set_rxfh_indir,
0968169c
MC
12714 .get_channels = tg3_get_channels,
12715 .set_channels = tg3_set_channels,
3f847490 12716 .get_ts_info = ethtool_op_get_ts_info,
1da177e4
LT
12717};
12718
b4017c53
DM
12719static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
12720 struct rtnl_link_stats64 *stats)
12721{
12722 struct tg3 *tp = netdev_priv(dev);
12723
0f566b20
MC
12724 spin_lock_bh(&tp->lock);
12725 if (!tp->hw_stats) {
12726 spin_unlock_bh(&tp->lock);
b4017c53 12727 return &tp->net_stats_prev;
0f566b20 12728 }
b4017c53 12729
b4017c53
DM
12730 tg3_get_nstats(tp, stats);
12731 spin_unlock_bh(&tp->lock);
12732
12733 return stats;
12734}
12735
ccd5ba9d
MC
12736static void tg3_set_rx_mode(struct net_device *dev)
12737{
12738 struct tg3 *tp = netdev_priv(dev);
12739
12740 if (!netif_running(dev))
12741 return;
12742
12743 tg3_full_lock(tp, 0);
12744 __tg3_set_rx_mode(dev);
12745 tg3_full_unlock(tp);
12746}
12747
faf1627a
MC
12748static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
12749 int new_mtu)
12750{
12751 dev->mtu = new_mtu;
12752
12753 if (new_mtu > ETH_DATA_LEN) {
12754 if (tg3_flag(tp, 5780_CLASS)) {
12755 netdev_update_features(dev);
12756 tg3_flag_clear(tp, TSO_CAPABLE);
12757 } else {
12758 tg3_flag_set(tp, JUMBO_RING_ENABLE);
12759 }
12760 } else {
12761 if (tg3_flag(tp, 5780_CLASS)) {
12762 tg3_flag_set(tp, TSO_CAPABLE);
12763 netdev_update_features(dev);
12764 }
12765 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
12766 }
12767}
12768
12769static int tg3_change_mtu(struct net_device *dev, int new_mtu)
12770{
12771 struct tg3 *tp = netdev_priv(dev);
2fae5e36 12772 int err, reset_phy = 0;
faf1627a
MC
12773
12774 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
12775 return -EINVAL;
12776
12777 if (!netif_running(dev)) {
12778 /* We'll just catch it later when the
12779 * device is up'd.
12780 */
12781 tg3_set_mtu(dev, tp, new_mtu);
12782 return 0;
12783 }
12784
12785 tg3_phy_stop(tp);
12786
12787 tg3_netif_stop(tp);
12788
12789 tg3_full_lock(tp, 1);
12790
12791 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12792
12793 tg3_set_mtu(dev, tp, new_mtu);
12794
2fae5e36
MC
12795 /* Reset PHY, otherwise the read DMA engine will be in a mode that
12796 * breaks all requests to 256 bytes.
12797 */
12798 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
12799 reset_phy = 1;
12800
12801 err = tg3_restart_hw(tp, reset_phy);
faf1627a
MC
12802
12803 if (!err)
12804 tg3_netif_start(tp);
12805
12806 tg3_full_unlock(tp);
12807
12808 if (!err)
12809 tg3_phy_start(tp);
12810
12811 return err;
12812}
12813
12814static const struct net_device_ops tg3_netdev_ops = {
12815 .ndo_open = tg3_open,
12816 .ndo_stop = tg3_close,
12817 .ndo_start_xmit = tg3_start_xmit,
12818 .ndo_get_stats64 = tg3_get_stats64,
12819 .ndo_validate_addr = eth_validate_addr,
12820 .ndo_set_rx_mode = tg3_set_rx_mode,
12821 .ndo_set_mac_address = tg3_set_mac_addr,
12822 .ndo_do_ioctl = tg3_ioctl,
12823 .ndo_tx_timeout = tg3_tx_timeout,
12824 .ndo_change_mtu = tg3_change_mtu,
12825 .ndo_fix_features = tg3_fix_features,
12826 .ndo_set_features = tg3_set_features,
12827#ifdef CONFIG_NET_POLL_CONTROLLER
12828 .ndo_poll_controller = tg3_poll_controller,
12829#endif
12830};
12831
229b1ad1 12832static void tg3_get_eeprom_size(struct tg3 *tp)
1da177e4 12833{
1b27777a 12834 u32 cursize, val, magic;
1da177e4
LT
12835
12836 tp->nvram_size = EEPROM_CHIP_SIZE;
12837
e4f34110 12838 if (tg3_nvram_read(tp, 0, &magic) != 0)
1da177e4
LT
12839 return;
12840
b16250e3
MC
12841 if ((magic != TG3_EEPROM_MAGIC) &&
12842 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
12843 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
1da177e4
LT
12844 return;
12845
12846 /*
12847 * Size the chip by reading offsets at increasing powers of two.
12848 * When we encounter our validation signature, we know the addressing
12849 * has wrapped around, and thus have our chip size.
12850 */
1b27777a 12851 cursize = 0x10;
1da177e4
LT
12852
12853 while (cursize < tp->nvram_size) {
e4f34110 12854 if (tg3_nvram_read(tp, cursize, &val) != 0)
1da177e4
LT
12855 return;
12856
1820180b 12857 if (val == magic)
1da177e4
LT
12858 break;
12859
12860 cursize <<= 1;
12861 }
12862
12863 tp->nvram_size = cursize;
12864}
6aa20a22 12865
229b1ad1 12866static void tg3_get_nvram_size(struct tg3 *tp)
1da177e4
LT
12867{
12868 u32 val;
12869
63c3a66f 12870 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
1b27777a
MC
12871 return;
12872
12873 /* Selfboot format */
1820180b 12874 if (val != TG3_EEPROM_MAGIC) {
1b27777a
MC
12875 tg3_get_eeprom_size(tp);
12876 return;
12877 }
12878
6d348f2c 12879 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
1da177e4 12880 if (val != 0) {
6d348f2c
MC
12881 /* This is confusing. We want to operate on the
12882 * 16-bit value at offset 0xf2. The tg3_nvram_read()
12883 * call will read from NVRAM and byteswap the data
12884 * according to the byteswapping settings for all
12885 * other register accesses. This ensures the data we
12886 * want will always reside in the lower 16-bits.
12887 * However, the data in NVRAM is in LE format, which
12888 * means the data from the NVRAM read will always be
12889 * opposite the endianness of the CPU. The 16-bit
12890 * byteswap then brings the data to CPU endianness.
12891 */
12892 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
1da177e4
LT
12893 return;
12894 }
12895 }
fd1122a2 12896 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
1da177e4
LT
12897}
12898
229b1ad1 12899static void tg3_get_nvram_info(struct tg3 *tp)
1da177e4
LT
12900{
12901 u32 nvcfg1;
12902
12903 nvcfg1 = tr32(NVRAM_CFG1);
12904 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
63c3a66f 12905 tg3_flag_set(tp, FLASH);
8590a603 12906 } else {
1da177e4
LT
12907 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12908 tw32(NVRAM_CFG1, nvcfg1);
12909 }
12910
6ff6f81d 12911 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
63c3a66f 12912 tg3_flag(tp, 5780_CLASS)) {
1da177e4 12913 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
8590a603
MC
12914 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
12915 tp->nvram_jedecnum = JEDEC_ATMEL;
12916 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
63c3a66f 12917 tg3_flag_set(tp, NVRAM_BUFFERED);
8590a603
MC
12918 break;
12919 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
12920 tp->nvram_jedecnum = JEDEC_ATMEL;
12921 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
12922 break;
12923 case FLASH_VENDOR_ATMEL_EEPROM:
12924 tp->nvram_jedecnum = JEDEC_ATMEL;
12925 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
63c3a66f 12926 tg3_flag_set(tp, NVRAM_BUFFERED);
8590a603
MC
12927 break;
12928 case FLASH_VENDOR_ST:
12929 tp->nvram_jedecnum = JEDEC_ST;
12930 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
63c3a66f 12931 tg3_flag_set(tp, NVRAM_BUFFERED);
8590a603
MC
12932 break;
12933 case FLASH_VENDOR_SAIFUN:
12934 tp->nvram_jedecnum = JEDEC_SAIFUN;
12935 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
12936 break;
12937 case FLASH_VENDOR_SST_SMALL:
12938 case FLASH_VENDOR_SST_LARGE:
12939 tp->nvram_jedecnum = JEDEC_SST;
12940 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
12941 break;
1da177e4 12942 }
8590a603 12943 } else {
1da177e4
LT
12944 tp->nvram_jedecnum = JEDEC_ATMEL;
12945 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
63c3a66f 12946 tg3_flag_set(tp, NVRAM_BUFFERED);
1da177e4
LT
12947 }
12948}
12949
229b1ad1 12950static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
a1b950d5
MC
12951{
12952 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
12953 case FLASH_5752PAGE_SIZE_256:
12954 tp->nvram_pagesize = 256;
12955 break;
12956 case FLASH_5752PAGE_SIZE_512:
12957 tp->nvram_pagesize = 512;
12958 break;
12959 case FLASH_5752PAGE_SIZE_1K:
12960 tp->nvram_pagesize = 1024;
12961 break;
12962 case FLASH_5752PAGE_SIZE_2K:
12963 tp->nvram_pagesize = 2048;
12964 break;
12965 case FLASH_5752PAGE_SIZE_4K:
12966 tp->nvram_pagesize = 4096;
12967 break;
12968 case FLASH_5752PAGE_SIZE_264:
12969 tp->nvram_pagesize = 264;
12970 break;
12971 case FLASH_5752PAGE_SIZE_528:
12972 tp->nvram_pagesize = 528;
12973 break;
12974 }
12975}
12976
229b1ad1 12977static void tg3_get_5752_nvram_info(struct tg3 *tp)
361b4ac2
MC
12978{
12979 u32 nvcfg1;
12980
12981 nvcfg1 = tr32(NVRAM_CFG1);
12982
e6af301b
MC
12983 /* NVRAM protection for TPM */
12984 if (nvcfg1 & (1 << 27))
63c3a66f 12985 tg3_flag_set(tp, PROTECTED_NVRAM);
e6af301b 12986
361b4ac2 12987 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
8590a603
MC
12988 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
12989 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
12990 tp->nvram_jedecnum = JEDEC_ATMEL;
63c3a66f 12991 tg3_flag_set(tp, NVRAM_BUFFERED);
8590a603
MC
12992 break;
12993 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12994 tp->nvram_jedecnum = JEDEC_ATMEL;
63c3a66f
JP
12995 tg3_flag_set(tp, NVRAM_BUFFERED);
12996 tg3_flag_set(tp, FLASH);
8590a603
MC
12997 break;
12998 case FLASH_5752VENDOR_ST_M45PE10:
12999 case FLASH_5752VENDOR_ST_M45PE20:
13000 case FLASH_5752VENDOR_ST_M45PE40:
13001 tp->nvram_jedecnum = JEDEC_ST;
63c3a66f
JP
13002 tg3_flag_set(tp, NVRAM_BUFFERED);
13003 tg3_flag_set(tp, FLASH);
8590a603 13004 break;
361b4ac2
MC
13005 }
13006
63c3a66f 13007 if (tg3_flag(tp, FLASH)) {
a1b950d5 13008 tg3_nvram_get_pagesize(tp, nvcfg1);
8590a603 13009 } else {
361b4ac2
MC
13010 /* For eeprom, set pagesize to maximum eeprom size */
13011 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13012
13013 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13014 tw32(NVRAM_CFG1, nvcfg1);
13015 }
13016}
13017
229b1ad1 13018static void tg3_get_5755_nvram_info(struct tg3 *tp)
d3c7b886 13019{
989a9d23 13020 u32 nvcfg1, protect = 0;
d3c7b886
MC
13021
13022 nvcfg1 = tr32(NVRAM_CFG1);
13023
13024 /* NVRAM protection for TPM */
989a9d23 13025 if (nvcfg1 & (1 << 27)) {
63c3a66f 13026 tg3_flag_set(tp, PROTECTED_NVRAM);
989a9d23
MC
13027 protect = 1;
13028 }
d3c7b886 13029
989a9d23
MC
13030 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
13031 switch (nvcfg1) {
8590a603
MC
13032 case FLASH_5755VENDOR_ATMEL_FLASH_1:
13033 case FLASH_5755VENDOR_ATMEL_FLASH_2:
13034 case FLASH_5755VENDOR_ATMEL_FLASH_3:
13035 case FLASH_5755VENDOR_ATMEL_FLASH_5:
13036 tp->nvram_jedecnum = JEDEC_ATMEL;
63c3a66f
JP
13037 tg3_flag_set(tp, NVRAM_BUFFERED);
13038 tg3_flag_set(tp, FLASH);
8590a603
MC
13039 tp->nvram_pagesize = 264;
13040 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
13041 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
13042 tp->nvram_size = (protect ? 0x3e200 :
13043 TG3_NVRAM_SIZE_512KB);
13044 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
13045 tp->nvram_size = (protect ? 0x1f200 :
13046 TG3_NVRAM_SIZE_256KB);
13047 else
13048 tp->nvram_size = (protect ? 0x1f200 :
13049 TG3_NVRAM_SIZE_128KB);
13050 break;
13051 case FLASH_5752VENDOR_ST_M45PE10:
13052 case FLASH_5752VENDOR_ST_M45PE20:
13053 case FLASH_5752VENDOR_ST_M45PE40:
13054 tp->nvram_jedecnum = JEDEC_ST;
63c3a66f
JP
13055 tg3_flag_set(tp, NVRAM_BUFFERED);
13056 tg3_flag_set(tp, FLASH);
8590a603
MC
13057 tp->nvram_pagesize = 256;
13058 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
13059 tp->nvram_size = (protect ?
13060 TG3_NVRAM_SIZE_64KB :
13061 TG3_NVRAM_SIZE_128KB);
13062 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
13063 tp->nvram_size = (protect ?
13064 TG3_NVRAM_SIZE_64KB :
13065 TG3_NVRAM_SIZE_256KB);
13066 else
13067 tp->nvram_size = (protect ?
13068 TG3_NVRAM_SIZE_128KB :
13069 TG3_NVRAM_SIZE_512KB);
13070 break;
d3c7b886
MC
13071 }
13072}
13073
229b1ad1 13074static void tg3_get_5787_nvram_info(struct tg3 *tp)
1b27777a
MC
13075{
13076 u32 nvcfg1;
13077
13078 nvcfg1 = tr32(NVRAM_CFG1);
13079
13080 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
8590a603
MC
13081 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
13082 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
13083 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
13084 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
13085 tp->nvram_jedecnum = JEDEC_ATMEL;
63c3a66f 13086 tg3_flag_set(tp, NVRAM_BUFFERED);
8590a603 13087 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
1b27777a 13088
8590a603
MC
13089 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13090 tw32(NVRAM_CFG1, nvcfg1);
13091 break;
13092 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13093 case FLASH_5755VENDOR_ATMEL_FLASH_1:
13094 case FLASH_5755VENDOR_ATMEL_FLASH_2:
13095 case FLASH_5755VENDOR_ATMEL_FLASH_3:
13096 tp->nvram_jedecnum = JEDEC_ATMEL;
63c3a66f
JP
13097 tg3_flag_set(tp, NVRAM_BUFFERED);
13098 tg3_flag_set(tp, FLASH);
8590a603
MC
13099 tp->nvram_pagesize = 264;
13100 break;
13101 case FLASH_5752VENDOR_ST_M45PE10:
13102 case FLASH_5752VENDOR_ST_M45PE20:
13103 case FLASH_5752VENDOR_ST_M45PE40:
13104 tp->nvram_jedecnum = JEDEC_ST;
63c3a66f
JP
13105 tg3_flag_set(tp, NVRAM_BUFFERED);
13106 tg3_flag_set(tp, FLASH);
8590a603
MC
13107 tp->nvram_pagesize = 256;
13108 break;
1b27777a
MC
13109 }
13110}
13111
229b1ad1 13112static void tg3_get_5761_nvram_info(struct tg3 *tp)
6b91fa02
MC
13113{
13114 u32 nvcfg1, protect = 0;
13115
13116 nvcfg1 = tr32(NVRAM_CFG1);
13117
13118 /* NVRAM protection for TPM */
13119 if (nvcfg1 & (1 << 27)) {
63c3a66f 13120 tg3_flag_set(tp, PROTECTED_NVRAM);
6b91fa02
MC
13121 protect = 1;
13122 }
13123
13124 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
13125 switch (nvcfg1) {
8590a603
MC
13126 case FLASH_5761VENDOR_ATMEL_ADB021D:
13127 case FLASH_5761VENDOR_ATMEL_ADB041D:
13128 case FLASH_5761VENDOR_ATMEL_ADB081D:
13129 case FLASH_5761VENDOR_ATMEL_ADB161D:
13130 case FLASH_5761VENDOR_ATMEL_MDB021D:
13131 case FLASH_5761VENDOR_ATMEL_MDB041D:
13132 case FLASH_5761VENDOR_ATMEL_MDB081D:
13133 case FLASH_5761VENDOR_ATMEL_MDB161D:
13134 tp->nvram_jedecnum = JEDEC_ATMEL;
63c3a66f
JP
13135 tg3_flag_set(tp, NVRAM_BUFFERED);
13136 tg3_flag_set(tp, FLASH);
13137 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
8590a603
MC
13138 tp->nvram_pagesize = 256;
13139 break;
13140 case FLASH_5761VENDOR_ST_A_M45PE20:
13141 case FLASH_5761VENDOR_ST_A_M45PE40:
13142 case FLASH_5761VENDOR_ST_A_M45PE80:
13143 case FLASH_5761VENDOR_ST_A_M45PE16:
13144 case FLASH_5761VENDOR_ST_M_M45PE20:
13145 case FLASH_5761VENDOR_ST_M_M45PE40:
13146 case FLASH_5761VENDOR_ST_M_M45PE80:
13147 case FLASH_5761VENDOR_ST_M_M45PE16:
13148 tp->nvram_jedecnum = JEDEC_ST;
63c3a66f
JP
13149 tg3_flag_set(tp, NVRAM_BUFFERED);
13150 tg3_flag_set(tp, FLASH);
8590a603
MC
13151 tp->nvram_pagesize = 256;
13152 break;
6b91fa02
MC
13153 }
13154
13155 if (protect) {
13156 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
13157 } else {
13158 switch (nvcfg1) {
8590a603
MC
13159 case FLASH_5761VENDOR_ATMEL_ADB161D:
13160 case FLASH_5761VENDOR_ATMEL_MDB161D:
13161 case FLASH_5761VENDOR_ST_A_M45PE16:
13162 case FLASH_5761VENDOR_ST_M_M45PE16:
13163 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
13164 break;
13165 case FLASH_5761VENDOR_ATMEL_ADB081D:
13166 case FLASH_5761VENDOR_ATMEL_MDB081D:
13167 case FLASH_5761VENDOR_ST_A_M45PE80:
13168 case FLASH_5761VENDOR_ST_M_M45PE80:
13169 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13170 break;
13171 case FLASH_5761VENDOR_ATMEL_ADB041D:
13172 case FLASH_5761VENDOR_ATMEL_MDB041D:
13173 case FLASH_5761VENDOR_ST_A_M45PE40:
13174 case FLASH_5761VENDOR_ST_M_M45PE40:
13175 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13176 break;
13177 case FLASH_5761VENDOR_ATMEL_ADB021D:
13178 case FLASH_5761VENDOR_ATMEL_MDB021D:
13179 case FLASH_5761VENDOR_ST_A_M45PE20:
13180 case FLASH_5761VENDOR_ST_M_M45PE20:
13181 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13182 break;
6b91fa02
MC
13183 }
13184 }
13185}
13186
229b1ad1 13187static void tg3_get_5906_nvram_info(struct tg3 *tp)
b5d3772c
MC
13188{
13189 tp->nvram_jedecnum = JEDEC_ATMEL;
63c3a66f 13190 tg3_flag_set(tp, NVRAM_BUFFERED);
b5d3772c
MC
13191 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13192}
13193
229b1ad1 13194static void tg3_get_57780_nvram_info(struct tg3 *tp)
321d32a0
MC
13195{
13196 u32 nvcfg1;
13197
13198 nvcfg1 = tr32(NVRAM_CFG1);
13199
13200 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13201 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
13202 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
13203 tp->nvram_jedecnum = JEDEC_ATMEL;
63c3a66f 13204 tg3_flag_set(tp, NVRAM_BUFFERED);
321d32a0
MC
13205 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13206
13207 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13208 tw32(NVRAM_CFG1, nvcfg1);
13209 return;
13210 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13211 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
13212 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
13213 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
13214 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
13215 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
13216 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
13217 tp->nvram_jedecnum = JEDEC_ATMEL;
63c3a66f
JP
13218 tg3_flag_set(tp, NVRAM_BUFFERED);
13219 tg3_flag_set(tp, FLASH);
321d32a0
MC
13220
13221 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13222 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13223 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
13224 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
13225 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13226 break;
13227 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
13228 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
13229 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13230 break;
13231 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
13232 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
13233 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13234 break;
13235 }
13236 break;
13237 case FLASH_5752VENDOR_ST_M45PE10:
13238 case FLASH_5752VENDOR_ST_M45PE20:
13239 case FLASH_5752VENDOR_ST_M45PE40:
13240 tp->nvram_jedecnum = JEDEC_ST;
63c3a66f
JP
13241 tg3_flag_set(tp, NVRAM_BUFFERED);
13242 tg3_flag_set(tp, FLASH);
321d32a0
MC
13243
13244 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13245 case FLASH_5752VENDOR_ST_M45PE10:
13246 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13247 break;
13248 case FLASH_5752VENDOR_ST_M45PE20:
13249 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13250 break;
13251 case FLASH_5752VENDOR_ST_M45PE40:
13252 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13253 break;
13254 }
13255 break;
13256 default:
63c3a66f 13257 tg3_flag_set(tp, NO_NVRAM);
321d32a0
MC
13258 return;
13259 }
13260
a1b950d5
MC
13261 tg3_nvram_get_pagesize(tp, nvcfg1);
13262 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
63c3a66f 13263 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
a1b950d5
MC
13264}
13265
13266
229b1ad1 13267static void tg3_get_5717_nvram_info(struct tg3 *tp)
a1b950d5
MC
13268{
13269 u32 nvcfg1;
13270
13271 nvcfg1 = tr32(NVRAM_CFG1);
13272
13273 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13274 case FLASH_5717VENDOR_ATMEL_EEPROM:
13275 case FLASH_5717VENDOR_MICRO_EEPROM:
13276 tp->nvram_jedecnum = JEDEC_ATMEL;
63c3a66f 13277 tg3_flag_set(tp, NVRAM_BUFFERED);
a1b950d5
MC
13278 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13279
13280 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13281 tw32(NVRAM_CFG1, nvcfg1);
13282 return;
13283 case FLASH_5717VENDOR_ATMEL_MDB011D:
13284 case FLASH_5717VENDOR_ATMEL_ADB011B:
13285 case FLASH_5717VENDOR_ATMEL_ADB011D:
13286 case FLASH_5717VENDOR_ATMEL_MDB021D:
13287 case FLASH_5717VENDOR_ATMEL_ADB021B:
13288 case FLASH_5717VENDOR_ATMEL_ADB021D:
13289 case FLASH_5717VENDOR_ATMEL_45USPT:
13290 tp->nvram_jedecnum = JEDEC_ATMEL;
63c3a66f
JP
13291 tg3_flag_set(tp, NVRAM_BUFFERED);
13292 tg3_flag_set(tp, FLASH);
a1b950d5
MC
13293
13294 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13295 case FLASH_5717VENDOR_ATMEL_MDB021D:
66ee33bf
MC
13296 /* Detect size with tg3_nvram_get_size() */
13297 break;
a1b950d5
MC
13298 case FLASH_5717VENDOR_ATMEL_ADB021B:
13299 case FLASH_5717VENDOR_ATMEL_ADB021D:
13300 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13301 break;
13302 default:
13303 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13304 break;
13305 }
321d32a0 13306 break;
a1b950d5
MC
13307 case FLASH_5717VENDOR_ST_M_M25PE10:
13308 case FLASH_5717VENDOR_ST_A_M25PE10:
13309 case FLASH_5717VENDOR_ST_M_M45PE10:
13310 case FLASH_5717VENDOR_ST_A_M45PE10:
13311 case FLASH_5717VENDOR_ST_M_M25PE20:
13312 case FLASH_5717VENDOR_ST_A_M25PE20:
13313 case FLASH_5717VENDOR_ST_M_M45PE20:
13314 case FLASH_5717VENDOR_ST_A_M45PE20:
13315 case FLASH_5717VENDOR_ST_25USPT:
13316 case FLASH_5717VENDOR_ST_45USPT:
13317 tp->nvram_jedecnum = JEDEC_ST;
63c3a66f
JP
13318 tg3_flag_set(tp, NVRAM_BUFFERED);
13319 tg3_flag_set(tp, FLASH);
a1b950d5
MC
13320
13321 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13322 case FLASH_5717VENDOR_ST_M_M25PE20:
a1b950d5 13323 case FLASH_5717VENDOR_ST_M_M45PE20:
66ee33bf
MC
13324 /* Detect size with tg3_nvram_get_size() */
13325 break;
13326 case FLASH_5717VENDOR_ST_A_M25PE20:
a1b950d5
MC
13327 case FLASH_5717VENDOR_ST_A_M45PE20:
13328 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13329 break;
13330 default:
13331 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13332 break;
13333 }
321d32a0 13334 break;
a1b950d5 13335 default:
63c3a66f 13336 tg3_flag_set(tp, NO_NVRAM);
a1b950d5 13337 return;
321d32a0 13338 }
a1b950d5
MC
13339
13340 tg3_nvram_get_pagesize(tp, nvcfg1);
13341 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
63c3a66f 13342 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
321d32a0
MC
13343}
13344
229b1ad1 13345static void tg3_get_5720_nvram_info(struct tg3 *tp)
9b91b5f1
MC
13346{
13347 u32 nvcfg1, nvmpinstrp;
13348
13349 nvcfg1 = tr32(NVRAM_CFG1);
13350 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
13351
13352 switch (nvmpinstrp) {
13353 case FLASH_5720_EEPROM_HD:
13354 case FLASH_5720_EEPROM_LD:
13355 tp->nvram_jedecnum = JEDEC_ATMEL;
63c3a66f 13356 tg3_flag_set(tp, NVRAM_BUFFERED);
9b91b5f1
MC
13357
13358 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13359 tw32(NVRAM_CFG1, nvcfg1);
13360 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
13361 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13362 else
13363 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
13364 return;
13365 case FLASH_5720VENDOR_M_ATMEL_DB011D:
13366 case FLASH_5720VENDOR_A_ATMEL_DB011B:
13367 case FLASH_5720VENDOR_A_ATMEL_DB011D:
13368 case FLASH_5720VENDOR_M_ATMEL_DB021D:
13369 case FLASH_5720VENDOR_A_ATMEL_DB021B:
13370 case FLASH_5720VENDOR_A_ATMEL_DB021D:
13371 case FLASH_5720VENDOR_M_ATMEL_DB041D:
13372 case FLASH_5720VENDOR_A_ATMEL_DB041B:
13373 case FLASH_5720VENDOR_A_ATMEL_DB041D:
13374 case FLASH_5720VENDOR_M_ATMEL_DB081D:
13375 case FLASH_5720VENDOR_A_ATMEL_DB081D:
13376 case FLASH_5720VENDOR_ATMEL_45USPT:
13377 tp->nvram_jedecnum = JEDEC_ATMEL;
63c3a66f
JP
13378 tg3_flag_set(tp, NVRAM_BUFFERED);
13379 tg3_flag_set(tp, FLASH);
9b91b5f1
MC
13380
13381 switch (nvmpinstrp) {
13382 case FLASH_5720VENDOR_M_ATMEL_DB021D:
13383 case FLASH_5720VENDOR_A_ATMEL_DB021B:
13384 case FLASH_5720VENDOR_A_ATMEL_DB021D:
13385 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13386 break;
13387 case FLASH_5720VENDOR_M_ATMEL_DB041D:
13388 case FLASH_5720VENDOR_A_ATMEL_DB041B:
13389 case FLASH_5720VENDOR_A_ATMEL_DB041D:
13390 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13391 break;
13392 case FLASH_5720VENDOR_M_ATMEL_DB081D:
13393 case FLASH_5720VENDOR_A_ATMEL_DB081D:
13394 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13395 break;
13396 default:
13397 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13398 break;
13399 }
13400 break;
13401 case FLASH_5720VENDOR_M_ST_M25PE10:
13402 case FLASH_5720VENDOR_M_ST_M45PE10:
13403 case FLASH_5720VENDOR_A_ST_M25PE10:
13404 case FLASH_5720VENDOR_A_ST_M45PE10:
13405 case FLASH_5720VENDOR_M_ST_M25PE20:
13406 case FLASH_5720VENDOR_M_ST_M45PE20:
13407 case FLASH_5720VENDOR_A_ST_M25PE20:
13408 case FLASH_5720VENDOR_A_ST_M45PE20:
13409 case FLASH_5720VENDOR_M_ST_M25PE40:
13410 case FLASH_5720VENDOR_M_ST_M45PE40:
13411 case FLASH_5720VENDOR_A_ST_M25PE40:
13412 case FLASH_5720VENDOR_A_ST_M45PE40:
13413 case FLASH_5720VENDOR_M_ST_M25PE80:
13414 case FLASH_5720VENDOR_M_ST_M45PE80:
13415 case FLASH_5720VENDOR_A_ST_M25PE80:
13416 case FLASH_5720VENDOR_A_ST_M45PE80:
13417 case FLASH_5720VENDOR_ST_25USPT:
13418 case FLASH_5720VENDOR_ST_45USPT:
13419 tp->nvram_jedecnum = JEDEC_ST;
63c3a66f
JP
13420 tg3_flag_set(tp, NVRAM_BUFFERED);
13421 tg3_flag_set(tp, FLASH);
9b91b5f1
MC
13422
13423 switch (nvmpinstrp) {
13424 case FLASH_5720VENDOR_M_ST_M25PE20:
13425 case FLASH_5720VENDOR_M_ST_M45PE20:
13426 case FLASH_5720VENDOR_A_ST_M25PE20:
13427 case FLASH_5720VENDOR_A_ST_M45PE20:
13428 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13429 break;
13430 case FLASH_5720VENDOR_M_ST_M25PE40:
13431 case FLASH_5720VENDOR_M_ST_M45PE40:
13432 case FLASH_5720VENDOR_A_ST_M25PE40:
13433 case FLASH_5720VENDOR_A_ST_M45PE40:
13434 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13435 break;
13436 case FLASH_5720VENDOR_M_ST_M25PE80:
13437 case FLASH_5720VENDOR_M_ST_M45PE80:
13438 case FLASH_5720VENDOR_A_ST_M25PE80:
13439 case FLASH_5720VENDOR_A_ST_M45PE80:
13440 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13441 break;
13442 default:
13443 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13444 break;
13445 }
13446 break;
13447 default:
63c3a66f 13448 tg3_flag_set(tp, NO_NVRAM);
9b91b5f1
MC
13449 return;
13450 }
13451
13452 tg3_nvram_get_pagesize(tp, nvcfg1);
13453 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
63c3a66f 13454 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
9b91b5f1
MC
13455}
13456
1da177e4 13457/* Chips other than 5700/5701 use the NVRAM for fetching info. */
229b1ad1 13458static void tg3_nvram_init(struct tg3 *tp)
1da177e4 13459{
1da177e4
LT
13460 tw32_f(GRC_EEPROM_ADDR,
13461 (EEPROM_ADDR_FSM_RESET |
13462 (EEPROM_DEFAULT_CLOCK_PERIOD <<
13463 EEPROM_ADDR_CLKPERD_SHIFT)));
13464
9d57f01c 13465 msleep(1);
1da177e4
LT
13466
13467 /* Enable seeprom accesses. */
13468 tw32_f(GRC_LOCAL_CTRL,
13469 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
13470 udelay(100);
13471
13472 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13473 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
63c3a66f 13474 tg3_flag_set(tp, NVRAM);
1da177e4 13475
ec41c7df 13476 if (tg3_nvram_lock(tp)) {
5129c3a3
MC
13477 netdev_warn(tp->dev,
13478 "Cannot get nvram lock, %s failed\n",
05dbe005 13479 __func__);
ec41c7df
MC
13480 return;
13481 }
e6af301b 13482 tg3_enable_nvram_access(tp);
1da177e4 13483
989a9d23
MC
13484 tp->nvram_size = 0;
13485
361b4ac2
MC
13486 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
13487 tg3_get_5752_nvram_info(tp);
d3c7b886
MC
13488 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
13489 tg3_get_5755_nvram_info(tp);
d30cdd28 13490 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
57e6983c
MC
13491 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13492 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1b27777a 13493 tg3_get_5787_nvram_info(tp);
6b91fa02
MC
13494 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
13495 tg3_get_5761_nvram_info(tp);
b5d3772c
MC
13496 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13497 tg3_get_5906_nvram_info(tp);
b703df6f 13498 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
55086ad9 13499 tg3_flag(tp, 57765_CLASS))
321d32a0 13500 tg3_get_57780_nvram_info(tp);
9b91b5f1
MC
13501 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13502 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
a1b950d5 13503 tg3_get_5717_nvram_info(tp);
9b91b5f1
MC
13504 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13505 tg3_get_5720_nvram_info(tp);
361b4ac2
MC
13506 else
13507 tg3_get_nvram_info(tp);
13508
989a9d23
MC
13509 if (tp->nvram_size == 0)
13510 tg3_get_nvram_size(tp);
1da177e4 13511
e6af301b 13512 tg3_disable_nvram_access(tp);
381291b7 13513 tg3_nvram_unlock(tp);
1da177e4
LT
13514
13515 } else {
63c3a66f
JP
13516 tg3_flag_clear(tp, NVRAM);
13517 tg3_flag_clear(tp, NVRAM_BUFFERED);
1da177e4
LT
13518
13519 tg3_get_eeprom_size(tp);
13520 }
13521}
13522
1da177e4
LT
13523struct subsys_tbl_ent {
13524 u16 subsys_vendor, subsys_devid;
13525 u32 phy_id;
13526};
13527
229b1ad1 13528static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
1da177e4 13529 /* Broadcom boards. */
24daf2b0 13530 { TG3PCI_SUBVENDOR_ID_BROADCOM,
79eb6904 13531 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
24daf2b0 13532 { TG3PCI_SUBVENDOR_ID_BROADCOM,
79eb6904 13533 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
24daf2b0 13534 { TG3PCI_SUBVENDOR_ID_BROADCOM,
79eb6904 13535 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
24daf2b0
MC
13536 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13537 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
13538 { TG3PCI_SUBVENDOR_ID_BROADCOM,
79eb6904 13539 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
24daf2b0 13540 { TG3PCI_SUBVENDOR_ID_BROADCOM,
79eb6904 13541 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
24daf2b0
MC
13542 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13543 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
13544 { TG3PCI_SUBVENDOR_ID_BROADCOM,
79eb6904 13545 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
24daf2b0 13546 { TG3PCI_SUBVENDOR_ID_BROADCOM,
79eb6904 13547 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
24daf2b0 13548 { TG3PCI_SUBVENDOR_ID_BROADCOM,
79eb6904 13549 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
24daf2b0 13550 { TG3PCI_SUBVENDOR_ID_BROADCOM,
79eb6904 13551 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
1da177e4
LT
13552
13553 /* 3com boards. */
24daf2b0 13554 { TG3PCI_SUBVENDOR_ID_3COM,
79eb6904 13555 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
24daf2b0 13556 { TG3PCI_SUBVENDOR_ID_3COM,
79eb6904 13557 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
24daf2b0
MC
13558 { TG3PCI_SUBVENDOR_ID_3COM,
13559 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
13560 { TG3PCI_SUBVENDOR_ID_3COM,
79eb6904 13561 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
24daf2b0 13562 { TG3PCI_SUBVENDOR_ID_3COM,
79eb6904 13563 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
1da177e4
LT
13564
13565 /* DELL boards. */
24daf2b0 13566 { TG3PCI_SUBVENDOR_ID_DELL,
79eb6904 13567 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
24daf2b0 13568 { TG3PCI_SUBVENDOR_ID_DELL,
79eb6904 13569 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
24daf2b0 13570 { TG3PCI_SUBVENDOR_ID_DELL,
79eb6904 13571 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
24daf2b0 13572 { TG3PCI_SUBVENDOR_ID_DELL,
79eb6904 13573 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
1da177e4
LT
13574
13575 /* Compaq boards. */
24daf2b0 13576 { TG3PCI_SUBVENDOR_ID_COMPAQ,
79eb6904 13577 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
24daf2b0 13578 { TG3PCI_SUBVENDOR_ID_COMPAQ,
79eb6904 13579 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
24daf2b0
MC
13580 { TG3PCI_SUBVENDOR_ID_COMPAQ,
13581 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
13582 { TG3PCI_SUBVENDOR_ID_COMPAQ,
79eb6904 13583 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
24daf2b0 13584 { TG3PCI_SUBVENDOR_ID_COMPAQ,
79eb6904 13585 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
1da177e4
LT
13586
13587 /* IBM boards. */
24daf2b0
MC
13588 { TG3PCI_SUBVENDOR_ID_IBM,
13589 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
1da177e4
LT
13590};
13591
229b1ad1 13592static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
1da177e4
LT
13593{
13594 int i;
13595
13596 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
13597 if ((subsys_id_to_phy_id[i].subsys_vendor ==
13598 tp->pdev->subsystem_vendor) &&
13599 (subsys_id_to_phy_id[i].subsys_devid ==
13600 tp->pdev->subsystem_device))
13601 return &subsys_id_to_phy_id[i];
13602 }
13603 return NULL;
13604}
13605
229b1ad1 13606static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
1da177e4 13607{
1da177e4 13608 u32 val;
f49639e6 13609
79eb6904 13610 tp->phy_id = TG3_PHY_ID_INVALID;
7d0c41ef
MC
13611 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13612
a85feb8c 13613 /* Assume an onboard device and WOL capable by default. */
63c3a66f
JP
13614 tg3_flag_set(tp, EEPROM_WRITE_PROT);
13615 tg3_flag_set(tp, WOL_CAP);
72b845e0 13616
b5d3772c 13617 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
9d26e213 13618 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
63c3a66f
JP
13619 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13620 tg3_flag_set(tp, IS_NIC);
9d26e213 13621 }
0527ba35
MC
13622 val = tr32(VCPU_CFGSHDW);
13623 if (val & VCPU_CFGSHDW_ASPM_DBNC)
63c3a66f 13624 tg3_flag_set(tp, ASPM_WORKAROUND);
0527ba35 13625 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
6fdbab9d 13626 (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
63c3a66f 13627 tg3_flag_set(tp, WOL_ENABLE);
6fdbab9d
RW
13628 device_set_wakeup_enable(&tp->pdev->dev, true);
13629 }
05ac4cb7 13630 goto done;
b5d3772c
MC
13631 }
13632
1da177e4
LT
13633 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
13634 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
13635 u32 nic_cfg, led_cfg;
a9daf367 13636 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
7d0c41ef 13637 int eeprom_phy_serdes = 0;
1da177e4
LT
13638
13639 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
13640 tp->nic_sram_data_cfg = nic_cfg;
13641
13642 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
13643 ver >>= NIC_SRAM_DATA_VER_SHIFT;
6ff6f81d
MC
13644 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13645 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13646 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
1da177e4
LT
13647 (ver > 0) && (ver < 0x100))
13648 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
13649
a9daf367
MC
13650 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13651 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
13652
1da177e4
LT
13653 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
13654 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
13655 eeprom_phy_serdes = 1;
13656
13657 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
13658 if (nic_phy_id != 0) {
13659 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
13660 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
13661
13662 eeprom_phy_id = (id1 >> 16) << 10;
13663 eeprom_phy_id |= (id2 & 0xfc00) << 16;
13664 eeprom_phy_id |= (id2 & 0x03ff) << 0;
13665 } else
13666 eeprom_phy_id = 0;
13667
7d0c41ef 13668 tp->phy_id = eeprom_phy_id;
747e8f8b 13669 if (eeprom_phy_serdes) {
63c3a66f 13670 if (!tg3_flag(tp, 5705_PLUS))
f07e9af3 13671 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
a50d0796 13672 else
f07e9af3 13673 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
747e8f8b 13674 }
7d0c41ef 13675
63c3a66f 13676 if (tg3_flag(tp, 5750_PLUS))
1da177e4
LT
13677 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
13678 SHASTA_EXT_LED_MODE_MASK);
cbf46853 13679 else
1da177e4
LT
13680 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
13681
13682 switch (led_cfg) {
13683 default:
13684 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
13685 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13686 break;
13687
13688 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
13689 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13690 break;
13691
13692 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
13693 tp->led_ctrl = LED_CTRL_MODE_MAC;
9ba27794
MC
13694
13695 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
13696 * read on some older 5700/5701 bootcode.
13697 */
13698 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
13699 ASIC_REV_5700 ||
13700 GET_ASIC_REV(tp->pci_chip_rev_id) ==
13701 ASIC_REV_5701)
13702 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13703
1da177e4
LT
13704 break;
13705
13706 case SHASTA_EXT_LED_SHARED:
13707 tp->led_ctrl = LED_CTRL_MODE_SHARED;
13708 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
13709 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
13710 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13711 LED_CTRL_MODE_PHY_2);
13712 break;
13713
13714 case SHASTA_EXT_LED_MAC:
13715 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
13716 break;
13717
13718 case SHASTA_EXT_LED_COMBO:
13719 tp->led_ctrl = LED_CTRL_MODE_COMBO;
13720 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
13721 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13722 LED_CTRL_MODE_PHY_2);
13723 break;
13724
855e1111 13725 }
1da177e4
LT
13726
13727 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13728 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
13729 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
13730 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13731
b2a5c19c
MC
13732 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
13733 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
5f60891b 13734
9d26e213 13735 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
63c3a66f 13736 tg3_flag_set(tp, EEPROM_WRITE_PROT);
9d26e213
MC
13737 if ((tp->pdev->subsystem_vendor ==
13738 PCI_VENDOR_ID_ARIMA) &&
13739 (tp->pdev->subsystem_device == 0x205a ||
13740 tp->pdev->subsystem_device == 0x2063))
63c3a66f 13741 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
9d26e213 13742 } else {
63c3a66f
JP
13743 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13744 tg3_flag_set(tp, IS_NIC);
9d26e213 13745 }
1da177e4
LT
13746
13747 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
63c3a66f
JP
13748 tg3_flag_set(tp, ENABLE_ASF);
13749 if (tg3_flag(tp, 5750_PLUS))
13750 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
1da177e4 13751 }
b2b98d4a
MC
13752
13753 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
63c3a66f
JP
13754 tg3_flag(tp, 5750_PLUS))
13755 tg3_flag_set(tp, ENABLE_APE);
b2b98d4a 13756
f07e9af3 13757 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
a85feb8c 13758 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
63c3a66f 13759 tg3_flag_clear(tp, WOL_CAP);
1da177e4 13760
63c3a66f 13761 if (tg3_flag(tp, WOL_CAP) &&
6fdbab9d 13762 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
63c3a66f 13763 tg3_flag_set(tp, WOL_ENABLE);
6fdbab9d
RW
13764 device_set_wakeup_enable(&tp->pdev->dev, true);
13765 }
0527ba35 13766
1da177e4 13767 if (cfg2 & (1 << 17))
f07e9af3 13768 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
1da177e4
LT
13769
13770 /* serdes signal pre-emphasis in register 0x590 set by */
13771 /* bootcode if bit 18 is set */
13772 if (cfg2 & (1 << 18))
f07e9af3 13773 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
8ed5d97e 13774
63c3a66f
JP
13775 if ((tg3_flag(tp, 57765_PLUS) ||
13776 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13777 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
6833c043 13778 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
f07e9af3 13779 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
6833c043 13780
63c3a66f 13781 if (tg3_flag(tp, PCI_EXPRESS) &&
8c69b1e7 13782 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
63c3a66f 13783 !tg3_flag(tp, 57765_PLUS)) {
8ed5d97e
MC
13784 u32 cfg3;
13785
13786 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
13787 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
63c3a66f 13788 tg3_flag_set(tp, ASPM_WORKAROUND);
8ed5d97e 13789 }
a9daf367 13790
14417063 13791 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
63c3a66f 13792 tg3_flag_set(tp, RGMII_INBAND_DISABLE);
a9daf367 13793 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
63c3a66f 13794 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
a9daf367 13795 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
63c3a66f 13796 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
1da177e4 13797 }
05ac4cb7 13798done:
63c3a66f 13799 if (tg3_flag(tp, WOL_CAP))
43067ed8 13800 device_set_wakeup_enable(&tp->pdev->dev,
63c3a66f 13801 tg3_flag(tp, WOL_ENABLE));
43067ed8
RW
13802 else
13803 device_set_wakeup_capable(&tp->pdev->dev, false);
7d0c41ef
MC
13804}
13805
229b1ad1 13806static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
b2a5c19c
MC
13807{
13808 int i;
13809 u32 val;
13810
13811 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
13812 tw32(OTP_CTRL, cmd);
13813
13814 /* Wait for up to 1 ms for command to execute. */
13815 for (i = 0; i < 100; i++) {
13816 val = tr32(OTP_STATUS);
13817 if (val & OTP_STATUS_CMD_DONE)
13818 break;
13819 udelay(10);
13820 }
13821
13822 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
13823}
13824
13825/* Read the gphy configuration from the OTP region of the chip. The gphy
13826 * configuration is a 32-bit value that straddles the alignment boundary.
13827 * We do two 32-bit reads and then shift and merge the results.
13828 */
229b1ad1 13829static u32 tg3_read_otp_phycfg(struct tg3 *tp)
b2a5c19c
MC
13830{
13831 u32 bhalf_otp, thalf_otp;
13832
13833 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
13834
13835 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
13836 return 0;
13837
13838 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
13839
13840 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13841 return 0;
13842
13843 thalf_otp = tr32(OTP_READ_DATA);
13844
13845 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
13846
13847 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13848 return 0;
13849
13850 bhalf_otp = tr32(OTP_READ_DATA);
13851
13852 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
13853}
13854
229b1ad1 13855static void tg3_phy_init_link_config(struct tg3 *tp)
e256f8a3 13856{
202ff1c2 13857 u32 adv = ADVERTISED_Autoneg;
e256f8a3
MC
13858
13859 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
13860 adv |= ADVERTISED_1000baseT_Half |
13861 ADVERTISED_1000baseT_Full;
13862
13863 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
13864 adv |= ADVERTISED_100baseT_Half |
13865 ADVERTISED_100baseT_Full |
13866 ADVERTISED_10baseT_Half |
13867 ADVERTISED_10baseT_Full |
13868 ADVERTISED_TP;
13869 else
13870 adv |= ADVERTISED_FIBRE;
13871
13872 tp->link_config.advertising = adv;
e740522e
MC
13873 tp->link_config.speed = SPEED_UNKNOWN;
13874 tp->link_config.duplex = DUPLEX_UNKNOWN;
e256f8a3 13875 tp->link_config.autoneg = AUTONEG_ENABLE;
e740522e
MC
13876 tp->link_config.active_speed = SPEED_UNKNOWN;
13877 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
34655ad6
MC
13878
13879 tp->old_link = -1;
e256f8a3
MC
13880}
13881
229b1ad1 13882static int tg3_phy_probe(struct tg3 *tp)
7d0c41ef
MC
13883{
13884 u32 hw_phy_id_1, hw_phy_id_2;
13885 u32 hw_phy_id, hw_phy_id_masked;
13886 int err;
1da177e4 13887
e256f8a3 13888 /* flow control autonegotiation is default behavior */
63c3a66f 13889 tg3_flag_set(tp, PAUSE_AUTONEG);
e256f8a3
MC
13890 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
13891
8151ad57
MC
13892 if (tg3_flag(tp, ENABLE_APE)) {
13893 switch (tp->pci_fn) {
13894 case 0:
13895 tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
13896 break;
13897 case 1:
13898 tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
13899 break;
13900 case 2:
13901 tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
13902 break;
13903 case 3:
13904 tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
13905 break;
13906 }
13907 }
13908
63c3a66f 13909 if (tg3_flag(tp, USE_PHYLIB))
b02fd9e3
MC
13910 return tg3_phy_init(tp);
13911
1da177e4 13912 /* Reading the PHY ID register can conflict with ASF
877d0310 13913 * firmware access to the PHY hardware.
1da177e4
LT
13914 */
13915 err = 0;
63c3a66f 13916 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
79eb6904 13917 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
1da177e4
LT
13918 } else {
13919 /* Now read the physical PHY_ID from the chip and verify
13920 * that it is sane. If it doesn't look good, we fall back
13921 * to either the hard-coded table based PHY_ID and failing
13922 * that the value found in the eeprom area.
13923 */
13924 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
13925 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
13926
13927 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
13928 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
13929 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
13930
79eb6904 13931 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
1da177e4
LT
13932 }
13933
79eb6904 13934 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
1da177e4 13935 tp->phy_id = hw_phy_id;
79eb6904 13936 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
f07e9af3 13937 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
da6b2d01 13938 else
f07e9af3 13939 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
1da177e4 13940 } else {
79eb6904 13941 if (tp->phy_id != TG3_PHY_ID_INVALID) {
7d0c41ef
MC
13942 /* Do nothing, phy ID already set up in
13943 * tg3_get_eeprom_hw_cfg().
13944 */
1da177e4
LT
13945 } else {
13946 struct subsys_tbl_ent *p;
13947
13948 /* No eeprom signature? Try the hardcoded
13949 * subsys device table.
13950 */
24daf2b0 13951 p = tg3_lookup_by_subsys(tp);
1da177e4
LT
13952 if (!p)
13953 return -ENODEV;
13954
13955 tp->phy_id = p->phy_id;
13956 if (!tp->phy_id ||
79eb6904 13957 tp->phy_id == TG3_PHY_ID_BCM8002)
f07e9af3 13958 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
1da177e4
LT
13959 }
13960 }
13961
a6b68dab 13962 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
5baa5e9a
MC
13963 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13964 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
13965 (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
a6b68dab
MC
13966 tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
13967 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
13968 tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
52b02d04
MC
13969 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
13970
e256f8a3
MC
13971 tg3_phy_init_link_config(tp);
13972
f07e9af3 13973 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
63c3a66f
JP
13974 !tg3_flag(tp, ENABLE_APE) &&
13975 !tg3_flag(tp, ENABLE_ASF)) {
e2bf73e7 13976 u32 bmsr, dummy;
1da177e4
LT
13977
13978 tg3_readphy(tp, MII_BMSR, &bmsr);
13979 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
13980 (bmsr & BMSR_LSTATUS))
13981 goto skip_phy_reset;
6aa20a22 13982
1da177e4
LT
13983 err = tg3_phy_reset(tp);
13984 if (err)
13985 return err;
13986
42b64a45 13987 tg3_phy_set_wirespeed(tp);
1da177e4 13988
e2bf73e7 13989 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
42b64a45
MC
13990 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
13991 tp->link_config.flowctrl);
1da177e4
LT
13992
13993 tg3_writephy(tp, MII_BMCR,
13994 BMCR_ANENABLE | BMCR_ANRESTART);
13995 }
1da177e4
LT
13996 }
13997
13998skip_phy_reset:
79eb6904 13999 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
1da177e4
LT
14000 err = tg3_init_5401phy_dsp(tp);
14001 if (err)
14002 return err;
1da177e4 14003
1da177e4
LT
14004 err = tg3_init_5401phy_dsp(tp);
14005 }
14006
1da177e4
LT
14007 return err;
14008}
14009
229b1ad1 14010static void tg3_read_vpd(struct tg3 *tp)
1da177e4 14011{
a4a8bb15 14012 u8 *vpd_data;
4181b2c8 14013 unsigned int block_end, rosize, len;
535a490e 14014 u32 vpdlen;
184b8904 14015 int j, i = 0;
a4a8bb15 14016
535a490e 14017 vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
a4a8bb15
MC
14018 if (!vpd_data)
14019 goto out_no_vpd;
1da177e4 14020
535a490e 14021 i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
4181b2c8
MC
14022 if (i < 0)
14023 goto out_not_found;
1da177e4 14024
4181b2c8
MC
14025 rosize = pci_vpd_lrdt_size(&vpd_data[i]);
14026 block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
14027 i += PCI_VPD_LRDT_TAG_SIZE;
1da177e4 14028
535a490e 14029 if (block_end > vpdlen)
4181b2c8 14030 goto out_not_found;
af2c6a4a 14031
184b8904
MC
14032 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
14033 PCI_VPD_RO_KEYWORD_MFR_ID);
14034 if (j > 0) {
14035 len = pci_vpd_info_field_size(&vpd_data[j]);
14036
14037 j += PCI_VPD_INFO_FLD_HDR_SIZE;
14038 if (j + len > block_end || len != 4 ||
14039 memcmp(&vpd_data[j], "1028", 4))
14040 goto partno;
14041
14042 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
14043 PCI_VPD_RO_KEYWORD_VENDOR0);
14044 if (j < 0)
14045 goto partno;
14046
14047 len = pci_vpd_info_field_size(&vpd_data[j]);
14048
14049 j += PCI_VPD_INFO_FLD_HDR_SIZE;
14050 if (j + len > block_end)
14051 goto partno;
14052
14053 memcpy(tp->fw_ver, &vpd_data[j], len);
535a490e 14054 strncat(tp->fw_ver, " bc ", vpdlen - len - 1);
184b8904
MC
14055 }
14056
14057partno:
4181b2c8
MC
14058 i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
14059 PCI_VPD_RO_KEYWORD_PARTNO);
14060 if (i < 0)
14061 goto out_not_found;
af2c6a4a 14062
4181b2c8 14063 len = pci_vpd_info_field_size(&vpd_data[i]);
1da177e4 14064
4181b2c8
MC
14065 i += PCI_VPD_INFO_FLD_HDR_SIZE;
14066 if (len > TG3_BPN_SIZE ||
535a490e 14067 (len + i) > vpdlen)
4181b2c8 14068 goto out_not_found;
1da177e4 14069
4181b2c8 14070 memcpy(tp->board_part_number, &vpd_data[i], len);
1da177e4 14071
1da177e4 14072out_not_found:
a4a8bb15 14073 kfree(vpd_data);
37a949c5 14074 if (tp->board_part_number[0])
a4a8bb15
MC
14075 return;
14076
14077out_no_vpd:
37a949c5 14078 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
79d49695
MC
14079 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
14080 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
37a949c5
MC
14081 strcpy(tp->board_part_number, "BCM5717");
14082 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
14083 strcpy(tp->board_part_number, "BCM5718");
14084 else
14085 goto nomatch;
14086 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
14087 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
14088 strcpy(tp->board_part_number, "BCM57780");
14089 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
14090 strcpy(tp->board_part_number, "BCM57760");
14091 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
14092 strcpy(tp->board_part_number, "BCM57790");
14093 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
14094 strcpy(tp->board_part_number, "BCM57788");
14095 else
14096 goto nomatch;
14097 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
14098 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
14099 strcpy(tp->board_part_number, "BCM57761");
14100 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
14101 strcpy(tp->board_part_number, "BCM57765");
14102 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
14103 strcpy(tp->board_part_number, "BCM57781");
14104 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
14105 strcpy(tp->board_part_number, "BCM57785");
14106 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
14107 strcpy(tp->board_part_number, "BCM57791");
14108 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
14109 strcpy(tp->board_part_number, "BCM57795");
14110 else
14111 goto nomatch;
55086ad9
MC
14112 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766) {
14113 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
14114 strcpy(tp->board_part_number, "BCM57762");
14115 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
14116 strcpy(tp->board_part_number, "BCM57766");
14117 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
14118 strcpy(tp->board_part_number, "BCM57782");
14119 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
14120 strcpy(tp->board_part_number, "BCM57786");
14121 else
14122 goto nomatch;
37a949c5 14123 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
b5d3772c 14124 strcpy(tp->board_part_number, "BCM95906");
37a949c5
MC
14125 } else {
14126nomatch:
b5d3772c 14127 strcpy(tp->board_part_number, "none");
37a949c5 14128 }
1da177e4
LT
14129}
14130
229b1ad1 14131static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
9c8a620e
MC
14132{
14133 u32 val;
14134
e4f34110 14135 if (tg3_nvram_read(tp, offset, &val) ||
9c8a620e 14136 (val & 0xfc000000) != 0x0c000000 ||
e4f34110 14137 tg3_nvram_read(tp, offset + 4, &val) ||
9c8a620e
MC
14138 val != 0)
14139 return 0;
14140
14141 return 1;
14142}
14143
229b1ad1 14144static void tg3_read_bc_ver(struct tg3 *tp)
acd9c119 14145{
ff3a7cb2 14146 u32 val, offset, start, ver_offset;
75f9936e 14147 int i, dst_off;
ff3a7cb2 14148 bool newver = false;
acd9c119
MC
14149
14150 if (tg3_nvram_read(tp, 0xc, &offset) ||
14151 tg3_nvram_read(tp, 0x4, &start))
14152 return;
14153
14154 offset = tg3_nvram_logical_addr(tp, offset);
14155
ff3a7cb2 14156 if (tg3_nvram_read(tp, offset, &val))
acd9c119
MC
14157 return;
14158
ff3a7cb2
MC
14159 if ((val & 0xfc000000) == 0x0c000000) {
14160 if (tg3_nvram_read(tp, offset + 4, &val))
acd9c119
MC
14161 return;
14162
ff3a7cb2
MC
14163 if (val == 0)
14164 newver = true;
14165 }
14166
75f9936e
MC
14167 dst_off = strlen(tp->fw_ver);
14168
ff3a7cb2 14169 if (newver) {
75f9936e
MC
14170 if (TG3_VER_SIZE - dst_off < 16 ||
14171 tg3_nvram_read(tp, offset + 8, &ver_offset))
ff3a7cb2
MC
14172 return;
14173
14174 offset = offset + ver_offset - start;
14175 for (i = 0; i < 16; i += 4) {
14176 __be32 v;
14177 if (tg3_nvram_read_be32(tp, offset + i, &v))
14178 return;
14179
75f9936e 14180 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
ff3a7cb2
MC
14181 }
14182 } else {
14183 u32 major, minor;
14184
14185 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
14186 return;
14187
14188 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
14189 TG3_NVM_BCVER_MAJSFT;
14190 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
75f9936e
MC
14191 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
14192 "v%d.%02d", major, minor);
acd9c119
MC
14193 }
14194}
14195
229b1ad1 14196static void tg3_read_hwsb_ver(struct tg3 *tp)
a6f6cb1c
MC
14197{
14198 u32 val, major, minor;
14199
14200 /* Use native endian representation */
14201 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
14202 return;
14203
14204 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
14205 TG3_NVM_HWSB_CFG1_MAJSFT;
14206 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
14207 TG3_NVM_HWSB_CFG1_MINSFT;
14208
14209 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
14210}
14211
229b1ad1 14212static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
dfe00d7d
MC
14213{
14214 u32 offset, major, minor, build;
14215
75f9936e 14216 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
dfe00d7d
MC
14217
14218 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
14219 return;
14220
14221 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
14222 case TG3_EEPROM_SB_REVISION_0:
14223 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
14224 break;
14225 case TG3_EEPROM_SB_REVISION_2:
14226 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
14227 break;
14228 case TG3_EEPROM_SB_REVISION_3:
14229 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
14230 break;
a4153d40
MC
14231 case TG3_EEPROM_SB_REVISION_4:
14232 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
14233 break;
14234 case TG3_EEPROM_SB_REVISION_5:
14235 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
14236 break;
bba226ac
MC
14237 case TG3_EEPROM_SB_REVISION_6:
14238 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
14239 break;
dfe00d7d
MC
14240 default:
14241 return;
14242 }
14243
e4f34110 14244 if (tg3_nvram_read(tp, offset, &val))
dfe00d7d
MC
14245 return;
14246
14247 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
14248 TG3_EEPROM_SB_EDH_BLD_SHFT;
14249 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
14250 TG3_EEPROM_SB_EDH_MAJ_SHFT;
14251 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
14252
14253 if (minor > 99 || build > 26)
14254 return;
14255
75f9936e
MC
14256 offset = strlen(tp->fw_ver);
14257 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
14258 " v%d.%02d", major, minor);
dfe00d7d
MC
14259
14260 if (build > 0) {
75f9936e
MC
14261 offset = strlen(tp->fw_ver);
14262 if (offset < TG3_VER_SIZE - 1)
14263 tp->fw_ver[offset] = 'a' + build - 1;
dfe00d7d
MC
14264 }
14265}
14266
229b1ad1 14267static void tg3_read_mgmtfw_ver(struct tg3 *tp)
c4e6575c
MC
14268{
14269 u32 val, offset, start;
acd9c119 14270 int i, vlen;
9c8a620e
MC
14271
14272 for (offset = TG3_NVM_DIR_START;
14273 offset < TG3_NVM_DIR_END;
14274 offset += TG3_NVM_DIRENT_SIZE) {
e4f34110 14275 if (tg3_nvram_read(tp, offset, &val))
c4e6575c
MC
14276 return;
14277
9c8a620e
MC
14278 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
14279 break;
14280 }
14281
14282 if (offset == TG3_NVM_DIR_END)
14283 return;
14284
63c3a66f 14285 if (!tg3_flag(tp, 5705_PLUS))
9c8a620e 14286 start = 0x08000000;
e4f34110 14287 else if (tg3_nvram_read(tp, offset - 4, &start))
9c8a620e
MC
14288 return;
14289
e4f34110 14290 if (tg3_nvram_read(tp, offset + 4, &offset) ||
9c8a620e 14291 !tg3_fw_img_is_valid(tp, offset) ||
e4f34110 14292 tg3_nvram_read(tp, offset + 8, &val))
9c8a620e
MC
14293 return;
14294
14295 offset += val - start;
14296
acd9c119 14297 vlen = strlen(tp->fw_ver);
9c8a620e 14298
acd9c119
MC
14299 tp->fw_ver[vlen++] = ',';
14300 tp->fw_ver[vlen++] = ' ';
9c8a620e
MC
14301
14302 for (i = 0; i < 4; i++) {
a9dc529d
MC
14303 __be32 v;
14304 if (tg3_nvram_read_be32(tp, offset, &v))
c4e6575c
MC
14305 return;
14306
b9fc7dc5 14307 offset += sizeof(v);
c4e6575c 14308
acd9c119
MC
14309 if (vlen > TG3_VER_SIZE - sizeof(v)) {
14310 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
9c8a620e 14311 break;
c4e6575c 14312 }
9c8a620e 14313
acd9c119
MC
14314 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
14315 vlen += sizeof(v);
c4e6575c 14316 }
acd9c119
MC
14317}
14318
229b1ad1 14319static void tg3_probe_ncsi(struct tg3 *tp)
7fd76445 14320{
7fd76445 14321 u32 apedata;
7fd76445
MC
14322
14323 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
14324 if (apedata != APE_SEG_SIG_MAGIC)
14325 return;
14326
14327 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
14328 if (!(apedata & APE_FW_STATUS_READY))
14329 return;
14330
165f4d1c
MC
14331 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
14332 tg3_flag_set(tp, APE_HAS_NCSI);
14333}
14334
229b1ad1 14335static void tg3_read_dash_ver(struct tg3 *tp)
165f4d1c
MC
14336{
14337 int vlen;
14338 u32 apedata;
14339 char *fwtype;
14340
7fd76445
MC
14341 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
14342
165f4d1c 14343 if (tg3_flag(tp, APE_HAS_NCSI))
ecc79648 14344 fwtype = "NCSI";
165f4d1c 14345 else
ecc79648
MC
14346 fwtype = "DASH";
14347
7fd76445
MC
14348 vlen = strlen(tp->fw_ver);
14349
ecc79648
MC
14350 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
14351 fwtype,
7fd76445
MC
14352 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
14353 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
14354 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
14355 (apedata & APE_FW_VERSION_BLDMSK));
14356}
14357
229b1ad1 14358static void tg3_read_fw_ver(struct tg3 *tp)
acd9c119
MC
14359{
14360 u32 val;
75f9936e 14361 bool vpd_vers = false;
acd9c119 14362
75f9936e
MC
14363 if (tp->fw_ver[0] != 0)
14364 vpd_vers = true;
df259d8c 14365
63c3a66f 14366 if (tg3_flag(tp, NO_NVRAM)) {
75f9936e 14367 strcat(tp->fw_ver, "sb");
df259d8c
MC
14368 return;
14369 }
14370
acd9c119
MC
14371 if (tg3_nvram_read(tp, 0, &val))
14372 return;
14373
14374 if (val == TG3_EEPROM_MAGIC)
14375 tg3_read_bc_ver(tp);
14376 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
14377 tg3_read_sb_ver(tp, val);
a6f6cb1c
MC
14378 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
14379 tg3_read_hwsb_ver(tp);
acd9c119 14380
165f4d1c
MC
14381 if (tg3_flag(tp, ENABLE_ASF)) {
14382 if (tg3_flag(tp, ENABLE_APE)) {
14383 tg3_probe_ncsi(tp);
14384 if (!vpd_vers)
14385 tg3_read_dash_ver(tp);
14386 } else if (!vpd_vers) {
14387 tg3_read_mgmtfw_ver(tp);
14388 }
c9cab24e 14389 }
9c8a620e
MC
14390
14391 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
c4e6575c
MC
14392}
14393
7cb32cf2
MC
14394static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
14395{
63c3a66f 14396 if (tg3_flag(tp, LRG_PROD_RING_CAP))
de9f5230 14397 return TG3_RX_RET_MAX_SIZE_5717;
63c3a66f 14398 else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
de9f5230 14399 return TG3_RX_RET_MAX_SIZE_5700;
7cb32cf2 14400 else
de9f5230 14401 return TG3_RX_RET_MAX_SIZE_5705;
7cb32cf2
MC
14402}
14403
4143470c 14404static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
895950c2
JP
14405 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
14406 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
14407 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
14408 { },
14409};
14410
229b1ad1 14411static struct pci_dev *tg3_find_peer(struct tg3 *tp)
16c7fa7d
MC
14412{
14413 struct pci_dev *peer;
14414 unsigned int func, devnr = tp->pdev->devfn & ~7;
14415
14416 for (func = 0; func < 8; func++) {
14417 peer = pci_get_slot(tp->pdev->bus, devnr | func);
14418 if (peer && peer != tp->pdev)
14419 break;
14420 pci_dev_put(peer);
14421 }
14422 /* 5704 can be configured in single-port mode, set peer to
14423 * tp->pdev in that case.
14424 */
14425 if (!peer) {
14426 peer = tp->pdev;
14427 return peer;
14428 }
14429
14430 /*
14431 * We don't need to keep the refcount elevated; there's no way
14432 * to remove one half of this device without removing the other
14433 */
14434 pci_dev_put(peer);
14435
14436 return peer;
14437}
14438
229b1ad1 14439static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
42b123b1
MC
14440{
14441 tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
14442 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
14443 u32 reg;
14444
14445 /* All devices that use the alternate
14446 * ASIC REV location have a CPMU.
14447 */
14448 tg3_flag_set(tp, CPMU_PRESENT);
14449
14450 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
79d49695 14451 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
42b123b1
MC
14452 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
14453 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
14454 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
14455 reg = TG3PCI_GEN2_PRODID_ASICREV;
14456 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
14457 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
14458 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
14459 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
14460 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14461 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14462 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
14463 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
14464 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
14465 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
14466 reg = TG3PCI_GEN15_PRODID_ASICREV;
14467 else
14468 reg = TG3PCI_PRODID_ASICREV;
14469
14470 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
14471 }
14472
14473 /* Wrong chip ID in 5752 A0. This code can be removed later
14474 * as A0 is not in production.
14475 */
14476 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
14477 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
14478
79d49695
MC
14479 if (tp->pci_chip_rev_id == CHIPREV_ID_5717_C0)
14480 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
14481
42b123b1
MC
14482 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14483 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14484 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14485 tg3_flag_set(tp, 5717_PLUS);
14486
14487 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
14488 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
14489 tg3_flag_set(tp, 57765_CLASS);
14490
14491 if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS))
14492 tg3_flag_set(tp, 57765_PLUS);
14493
14494 /* Intentionally exclude ASIC_REV_5906 */
14495 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14496 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14497 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14498 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14499 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14500 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14501 tg3_flag(tp, 57765_PLUS))
14502 tg3_flag_set(tp, 5755_PLUS);
14503
14504 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
14505 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
14506 tg3_flag_set(tp, 5780_CLASS);
14507
14508 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14509 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14510 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
14511 tg3_flag(tp, 5755_PLUS) ||
14512 tg3_flag(tp, 5780_CLASS))
14513 tg3_flag_set(tp, 5750_PLUS);
14514
14515 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
14516 tg3_flag(tp, 5750_PLUS))
14517 tg3_flag_set(tp, 5705_PLUS);
14518}
14519
3d567e0e
NNS
14520static bool tg3_10_100_only_device(struct tg3 *tp,
14521 const struct pci_device_id *ent)
14522{
14523 u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
14524
14525 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14526 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
14527 (tp->phy_flags & TG3_PHYFLG_IS_FET))
14528 return true;
14529
14530 if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
14531 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
14532 if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
14533 return true;
14534 } else {
14535 return true;
14536 }
14537 }
14538
14539 return false;
14540}
14541
229b1ad1 14542static int tg3_get_invariants(struct tg3 *tp,
3d567e0e 14543 const struct pci_device_id *ent)
1da177e4 14544{
1da177e4 14545 u32 misc_ctrl_reg;
1da177e4
LT
14546 u32 pci_state_reg, grc_misc_cfg;
14547 u32 val;
14548 u16 pci_cmd;
5e7dfd0f 14549 int err;
1da177e4 14550
1da177e4
LT
14551 /* Force memory write invalidate off. If we leave it on,
14552 * then on 5700_BX chips we have to enable a workaround.
14553 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
14554 * to match the cacheline size. The Broadcom driver have this
14555 * workaround but turns MWI off all the times so never uses
14556 * it. This seems to suggest that the workaround is insufficient.
14557 */
14558 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14559 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
14560 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14561
16821285
MC
14562 /* Important! -- Make sure register accesses are byteswapped
14563 * correctly. Also, for those chips that require it, make
14564 * sure that indirect register accesses are enabled before
14565 * the first operation.
1da177e4
LT
14566 */
14567 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14568 &misc_ctrl_reg);
16821285
MC
14569 tp->misc_host_ctrl |= (misc_ctrl_reg &
14570 MISC_HOST_CTRL_CHIPREV);
14571 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14572 tp->misc_host_ctrl);
1da177e4 14573
42b123b1 14574 tg3_detect_asic_rev(tp, misc_ctrl_reg);
ff645bec 14575
6892914f
MC
14576 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
14577 * we need to disable memory and use config. cycles
14578 * only to access all registers. The 5702/03 chips
14579 * can mistakenly decode the special cycles from the
14580 * ICH chipsets as memory write cycles, causing corruption
14581 * of register and memory space. Only certain ICH bridges
14582 * will drive special cycles with non-zero data during the
14583 * address phase which can fall within the 5703's address
14584 * range. This is not an ICH bug as the PCI spec allows
14585 * non-zero address during special cycles. However, only
14586 * these ICH bridges are known to drive non-zero addresses
14587 * during special cycles.
14588 *
14589 * Since special cycles do not cross PCI bridges, we only
14590 * enable this workaround if the 5703 is on the secondary
14591 * bus of these ICH bridges.
14592 */
14593 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
14594 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
14595 static struct tg3_dev_id {
14596 u32 vendor;
14597 u32 device;
14598 u32 rev;
14599 } ich_chipsets[] = {
14600 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
14601 PCI_ANY_ID },
14602 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
14603 PCI_ANY_ID },
14604 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
14605 0xa },
14606 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
14607 PCI_ANY_ID },
14608 { },
14609 };
14610 struct tg3_dev_id *pci_id = &ich_chipsets[0];
14611 struct pci_dev *bridge = NULL;
14612
14613 while (pci_id->vendor != 0) {
14614 bridge = pci_get_device(pci_id->vendor, pci_id->device,
14615 bridge);
14616 if (!bridge) {
14617 pci_id++;
14618 continue;
14619 }
14620 if (pci_id->rev != PCI_ANY_ID) {
44c10138 14621 if (bridge->revision > pci_id->rev)
6892914f
MC
14622 continue;
14623 }
14624 if (bridge->subordinate &&
14625 (bridge->subordinate->number ==
14626 tp->pdev->bus->number)) {
63c3a66f 14627 tg3_flag_set(tp, ICH_WORKAROUND);
6892914f
MC
14628 pci_dev_put(bridge);
14629 break;
14630 }
14631 }
14632 }
14633
6ff6f81d 14634 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
41588ba1
MC
14635 static struct tg3_dev_id {
14636 u32 vendor;
14637 u32 device;
14638 } bridge_chipsets[] = {
14639 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
14640 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
14641 { },
14642 };
14643 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
14644 struct pci_dev *bridge = NULL;
14645
14646 while (pci_id->vendor != 0) {
14647 bridge = pci_get_device(pci_id->vendor,
14648 pci_id->device,
14649 bridge);
14650 if (!bridge) {
14651 pci_id++;
14652 continue;
14653 }
14654 if (bridge->subordinate &&
14655 (bridge->subordinate->number <=
14656 tp->pdev->bus->number) &&
b918c62e 14657 (bridge->subordinate->busn_res.end >=
41588ba1 14658 tp->pdev->bus->number)) {
63c3a66f 14659 tg3_flag_set(tp, 5701_DMA_BUG);
41588ba1
MC
14660 pci_dev_put(bridge);
14661 break;
14662 }
14663 }
14664 }
14665
4a29cc2e
MC
14666 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
14667 * DMA addresses > 40-bit. This bridge may have other additional
14668 * 57xx devices behind it in some 4-port NIC designs for example.
14669 * Any tg3 device found behind the bridge will also need the 40-bit
14670 * DMA workaround.
14671 */
42b123b1 14672 if (tg3_flag(tp, 5780_CLASS)) {
63c3a66f 14673 tg3_flag_set(tp, 40BIT_DMA_BUG);
4cf78e4f 14674 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
859a5887 14675 } else {
4a29cc2e
MC
14676 struct pci_dev *bridge = NULL;
14677
14678 do {
14679 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
14680 PCI_DEVICE_ID_SERVERWORKS_EPB,
14681 bridge);
14682 if (bridge && bridge->subordinate &&
14683 (bridge->subordinate->number <=
14684 tp->pdev->bus->number) &&
b918c62e 14685 (bridge->subordinate->busn_res.end >=
4a29cc2e 14686 tp->pdev->bus->number)) {
63c3a66f 14687 tg3_flag_set(tp, 40BIT_DMA_BUG);
4a29cc2e
MC
14688 pci_dev_put(bridge);
14689 break;
14690 }
14691 } while (bridge);
14692 }
4cf78e4f 14693
f6eb9b1f 14694 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
3a1e19d3 14695 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
7544b097
MC
14696 tp->pdev_peer = tg3_find_peer(tp);
14697
507399f1 14698 /* Determine TSO capabilities */
a0512944 14699 if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0)
4d163b75 14700 ; /* Do nothing. HW bug. */
63c3a66f
JP
14701 else if (tg3_flag(tp, 57765_PLUS))
14702 tg3_flag_set(tp, HW_TSO_3);
14703 else if (tg3_flag(tp, 5755_PLUS) ||
e849cdc3 14704 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
63c3a66f
JP
14705 tg3_flag_set(tp, HW_TSO_2);
14706 else if (tg3_flag(tp, 5750_PLUS)) {
14707 tg3_flag_set(tp, HW_TSO_1);
14708 tg3_flag_set(tp, TSO_BUG);
507399f1
MC
14709 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
14710 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
63c3a66f 14711 tg3_flag_clear(tp, TSO_BUG);
507399f1
MC
14712 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14713 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14714 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
63c3a66f 14715 tg3_flag_set(tp, TSO_BUG);
507399f1
MC
14716 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
14717 tp->fw_needed = FIRMWARE_TG3TSO5;
14718 else
14719 tp->fw_needed = FIRMWARE_TG3TSO;
14720 }
14721
dabc5c67 14722 /* Selectively allow TSO based on operating conditions */
6ff6f81d
MC
14723 if (tg3_flag(tp, HW_TSO_1) ||
14724 tg3_flag(tp, HW_TSO_2) ||
14725 tg3_flag(tp, HW_TSO_3) ||
cf9ecf4b
MC
14726 tp->fw_needed) {
14727 /* For firmware TSO, assume ASF is disabled.
14728 * We'll disable TSO later if we discover ASF
14729 * is enabled in tg3_get_eeprom_hw_cfg().
14730 */
dabc5c67 14731 tg3_flag_set(tp, TSO_CAPABLE);
cf9ecf4b 14732 } else {
dabc5c67
MC
14733 tg3_flag_clear(tp, TSO_CAPABLE);
14734 tg3_flag_clear(tp, TSO_BUG);
14735 tp->fw_needed = NULL;
14736 }
14737
14738 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
14739 tp->fw_needed = FIRMWARE_TG3;
14740
507399f1
MC
14741 tp->irq_max = 1;
14742
63c3a66f
JP
14743 if (tg3_flag(tp, 5750_PLUS)) {
14744 tg3_flag_set(tp, SUPPORT_MSI);
7544b097
MC
14745 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
14746 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
14747 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
14748 tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
14749 tp->pdev_peer == tp->pdev))
63c3a66f 14750 tg3_flag_clear(tp, SUPPORT_MSI);
7544b097 14751
63c3a66f 14752 if (tg3_flag(tp, 5755_PLUS) ||
b5d3772c 14753 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
63c3a66f 14754 tg3_flag_set(tp, 1SHOT_MSI);
52c0fd83 14755 }
4f125f42 14756
63c3a66f
JP
14757 if (tg3_flag(tp, 57765_PLUS)) {
14758 tg3_flag_set(tp, SUPPORT_MSIX);
507399f1
MC
14759 tp->irq_max = TG3_IRQ_MAX_VECS;
14760 }
f6eb9b1f 14761 }
0e1406dd 14762
9102426a
MC
14763 tp->txq_max = 1;
14764 tp->rxq_max = 1;
14765 if (tp->irq_max > 1) {
14766 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
14767 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
14768
14769 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14770 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14771 tp->txq_max = tp->irq_max - 1;
14772 }
14773
b7abee6e
MC
14774 if (tg3_flag(tp, 5755_PLUS) ||
14775 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
63c3a66f 14776 tg3_flag_set(tp, SHORT_DMA_BUG);
f6eb9b1f 14777
e31aa987 14778 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
a4cb428d 14779 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
e31aa987 14780
fa6b2aae
MC
14781 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14782 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14783 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
63c3a66f 14784 tg3_flag_set(tp, LRG_PROD_RING_CAP);
de9f5230 14785
63c3a66f 14786 if (tg3_flag(tp, 57765_PLUS) &&
a0512944 14787 tp->pci_chip_rev_id != CHIPREV_ID_5719_A0)
63c3a66f 14788 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
b703df6f 14789
63c3a66f
JP
14790 if (!tg3_flag(tp, 5705_PLUS) ||
14791 tg3_flag(tp, 5780_CLASS) ||
14792 tg3_flag(tp, USE_JUMBO_BDFLAG))
14793 tg3_flag_set(tp, JUMBO_CAPABLE);
0f893dc6 14794
52f4490c
MC
14795 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14796 &pci_state_reg);
14797
708ebb3a 14798 if (pci_is_pcie(tp->pdev)) {
5e7dfd0f
MC
14799 u16 lnkctl;
14800
63c3a66f 14801 tg3_flag_set(tp, PCI_EXPRESS);
5f5c51e3 14802
0f49bfbd 14803 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
5e7dfd0f 14804 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
7196cd6c
MC
14805 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
14806 ASIC_REV_5906) {
63c3a66f 14807 tg3_flag_clear(tp, HW_TSO_2);
dabc5c67 14808 tg3_flag_clear(tp, TSO_CAPABLE);
7196cd6c 14809 }
5e7dfd0f 14810 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
321d32a0 14811 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9cf74ebb
MC
14812 tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
14813 tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
63c3a66f 14814 tg3_flag_set(tp, CLKREQ_BUG);
614b0590 14815 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
63c3a66f 14816 tg3_flag_set(tp, L1PLLPD_EN);
c7835a77 14817 }
52f4490c 14818 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
708ebb3a
JM
14819 /* BCM5785 devices are effectively PCIe devices, and should
14820 * follow PCIe codepaths, but do not have a PCIe capabilities
14821 * section.
93a700a9 14822 */
63c3a66f
JP
14823 tg3_flag_set(tp, PCI_EXPRESS);
14824 } else if (!tg3_flag(tp, 5705_PLUS) ||
14825 tg3_flag(tp, 5780_CLASS)) {
52f4490c
MC
14826 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
14827 if (!tp->pcix_cap) {
2445e461
MC
14828 dev_err(&tp->pdev->dev,
14829 "Cannot find PCI-X capability, aborting\n");
52f4490c
MC
14830 return -EIO;
14831 }
14832
14833 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
63c3a66f 14834 tg3_flag_set(tp, PCIX_MODE);
52f4490c 14835 }
1da177e4 14836
399de50b
MC
14837 /* If we have an AMD 762 or VIA K8T800 chipset, write
14838 * reordering to the mailbox registers done by the host
14839 * controller can cause major troubles. We read back from
14840 * every mailbox register write to force the writes to be
14841 * posted to the chip in order.
14842 */
4143470c 14843 if (pci_dev_present(tg3_write_reorder_chipsets) &&
63c3a66f
JP
14844 !tg3_flag(tp, PCI_EXPRESS))
14845 tg3_flag_set(tp, MBOX_WRITE_REORDER);
399de50b 14846
69fc4053
MC
14847 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
14848 &tp->pci_cacheline_sz);
14849 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14850 &tp->pci_lat_timer);
1da177e4
LT
14851 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14852 tp->pci_lat_timer < 64) {
14853 tp->pci_lat_timer = 64;
69fc4053
MC
14854 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14855 tp->pci_lat_timer);
1da177e4
LT
14856 }
14857
16821285
MC
14858 /* Important! -- It is critical that the PCI-X hw workaround
14859 * situation is decided before the first MMIO register access.
14860 */
52f4490c
MC
14861 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
14862 /* 5700 BX chips need to have their TX producer index
14863 * mailboxes written twice to workaround a bug.
14864 */
63c3a66f 14865 tg3_flag_set(tp, TXD_MBOX_HWBUG);
1da177e4 14866
52f4490c 14867 /* If we are in PCI-X mode, enable register write workaround.
1da177e4
LT
14868 *
14869 * The workaround is to use indirect register accesses
14870 * for all chip writes not to mailbox registers.
14871 */
63c3a66f 14872 if (tg3_flag(tp, PCIX_MODE)) {
1da177e4 14873 u32 pm_reg;
1da177e4 14874
63c3a66f 14875 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
1da177e4
LT
14876
14877 /* The chip can have it's power management PCI config
14878 * space registers clobbered due to this bug.
14879 * So explicitly force the chip into D0 here.
14880 */
9974a356
MC
14881 pci_read_config_dword(tp->pdev,
14882 tp->pm_cap + PCI_PM_CTRL,
1da177e4
LT
14883 &pm_reg);
14884 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
14885 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
9974a356
MC
14886 pci_write_config_dword(tp->pdev,
14887 tp->pm_cap + PCI_PM_CTRL,
1da177e4
LT
14888 pm_reg);
14889
14890 /* Also, force SERR#/PERR# in PCI command. */
14891 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14892 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
14893 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14894 }
14895 }
14896
1da177e4 14897 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
63c3a66f 14898 tg3_flag_set(tp, PCI_HIGH_SPEED);
1da177e4 14899 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
63c3a66f 14900 tg3_flag_set(tp, PCI_32BIT);
1da177e4
LT
14901
14902 /* Chip-specific fixup from Broadcom driver */
14903 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
14904 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
14905 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
14906 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
14907 }
14908
1ee582d8 14909 /* Default fast path register access methods */
20094930 14910 tp->read32 = tg3_read32;
1ee582d8 14911 tp->write32 = tg3_write32;
09ee929c 14912 tp->read32_mbox = tg3_read32;
20094930 14913 tp->write32_mbox = tg3_write32;
1ee582d8
MC
14914 tp->write32_tx_mbox = tg3_write32;
14915 tp->write32_rx_mbox = tg3_write32;
14916
14917 /* Various workaround register access methods */
63c3a66f 14918 if (tg3_flag(tp, PCIX_TARGET_HWBUG))
1ee582d8 14919 tp->write32 = tg3_write_indirect_reg32;
98efd8a6 14920 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
63c3a66f 14921 (tg3_flag(tp, PCI_EXPRESS) &&
98efd8a6
MC
14922 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
14923 /*
14924 * Back to back register writes can cause problems on these
14925 * chips, the workaround is to read back all reg writes
14926 * except those to mailbox regs.
14927 *
14928 * See tg3_write_indirect_reg32().
14929 */
1ee582d8 14930 tp->write32 = tg3_write_flush_reg32;
98efd8a6
MC
14931 }
14932
63c3a66f 14933 if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
1ee582d8 14934 tp->write32_tx_mbox = tg3_write32_tx_mbox;
63c3a66f 14935 if (tg3_flag(tp, MBOX_WRITE_REORDER))
1ee582d8
MC
14936 tp->write32_rx_mbox = tg3_write_flush_reg32;
14937 }
20094930 14938
63c3a66f 14939 if (tg3_flag(tp, ICH_WORKAROUND)) {
6892914f
MC
14940 tp->read32 = tg3_read_indirect_reg32;
14941 tp->write32 = tg3_write_indirect_reg32;
14942 tp->read32_mbox = tg3_read_indirect_mbox;
14943 tp->write32_mbox = tg3_write_indirect_mbox;
14944 tp->write32_tx_mbox = tg3_write_indirect_mbox;
14945 tp->write32_rx_mbox = tg3_write_indirect_mbox;
14946
14947 iounmap(tp->regs);
22abe310 14948 tp->regs = NULL;
6892914f
MC
14949
14950 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14951 pci_cmd &= ~PCI_COMMAND_MEMORY;
14952 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14953 }
b5d3772c
MC
14954 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14955 tp->read32_mbox = tg3_read32_mbox_5906;
14956 tp->write32_mbox = tg3_write32_mbox_5906;
14957 tp->write32_tx_mbox = tg3_write32_mbox_5906;
14958 tp->write32_rx_mbox = tg3_write32_mbox_5906;
14959 }
6892914f 14960
bbadf503 14961 if (tp->write32 == tg3_write_indirect_reg32 ||
63c3a66f 14962 (tg3_flag(tp, PCIX_MODE) &&
bbadf503 14963 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
f49639e6 14964 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
63c3a66f 14965 tg3_flag_set(tp, SRAM_USE_CONFIG);
bbadf503 14966
16821285
MC
14967 /* The memory arbiter has to be enabled in order for SRAM accesses
14968 * to succeed. Normally on powerup the tg3 chip firmware will make
14969 * sure it is enabled, but other entities such as system netboot
14970 * code might disable it.
14971 */
14972 val = tr32(MEMARB_MODE);
14973 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
14974
9dc5e342
MC
14975 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
14976 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14977 tg3_flag(tp, 5780_CLASS)) {
14978 if (tg3_flag(tp, PCIX_MODE)) {
14979 pci_read_config_dword(tp->pdev,
14980 tp->pcix_cap + PCI_X_STATUS,
14981 &val);
14982 tp->pci_fn = val & 0x7;
14983 }
14984 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
14985 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14986 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14987 NIC_SRAM_CPMUSTAT_SIG) {
14988 tp->pci_fn = val & TG3_CPMU_STATUS_FMSK_5717;
14989 tp->pci_fn = tp->pci_fn ? 1 : 0;
14990 }
14991 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14992 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
14993 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14994 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14995 NIC_SRAM_CPMUSTAT_SIG) {
14996 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
14997 TG3_CPMU_STATUS_FSHFT_5719;
14998 }
69f11c99
MC
14999 }
15000
7d0c41ef 15001 /* Get eeprom hw config before calling tg3_set_power_state().
63c3a66f 15002 * In particular, the TG3_FLAG_IS_NIC flag must be
7d0c41ef
MC
15003 * determined before calling tg3_set_power_state() so that
15004 * we know whether or not to switch out of Vaux power.
15005 * When the flag is set, it means that GPIO1 is used for eeprom
15006 * write protect and also implies that it is a LOM where GPIOs
15007 * are not used to switch power.
6aa20a22 15008 */
7d0c41ef
MC
15009 tg3_get_eeprom_hw_cfg(tp);
15010
cf9ecf4b
MC
15011 if (tp->fw_needed && tg3_flag(tp, ENABLE_ASF)) {
15012 tg3_flag_clear(tp, TSO_CAPABLE);
15013 tg3_flag_clear(tp, TSO_BUG);
15014 tp->fw_needed = NULL;
15015 }
15016
63c3a66f 15017 if (tg3_flag(tp, ENABLE_APE)) {
0d3031d9
MC
15018 /* Allow reads and writes to the
15019 * APE register and memory space.
15020 */
15021 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
f92d9dc1
MC
15022 PCISTATE_ALLOW_APE_SHMEM_WR |
15023 PCISTATE_ALLOW_APE_PSPACE_WR;
0d3031d9
MC
15024 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
15025 pci_state_reg);
c9cab24e
MC
15026
15027 tg3_ape_lock_init(tp);
0d3031d9
MC
15028 }
15029
16821285
MC
15030 /* Set up tp->grc_local_ctrl before calling
15031 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
15032 * will bring 5700's external PHY out of reset.
314fba34
MC
15033 * It is also used as eeprom write protect on LOMs.
15034 */
15035 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
6ff6f81d 15036 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
63c3a66f 15037 tg3_flag(tp, EEPROM_WRITE_PROT))
314fba34
MC
15038 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
15039 GRC_LCLCTRL_GPIO_OUTPUT1);
3e7d83bc
MC
15040 /* Unused GPIO3 must be driven as output on 5752 because there
15041 * are no pull-up resistors on unused GPIO pins.
15042 */
15043 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
15044 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
314fba34 15045
321d32a0 15046 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
cb4ed1fd 15047 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
55086ad9 15048 tg3_flag(tp, 57765_CLASS))
af36e6b6
MC
15049 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
15050
8d519ab2
MC
15051 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
15052 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
5f0c4a3c
MC
15053 /* Turn off the debug UART. */
15054 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
63c3a66f 15055 if (tg3_flag(tp, IS_NIC))
5f0c4a3c
MC
15056 /* Keep VMain power. */
15057 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
15058 GRC_LCLCTRL_GPIO_OUTPUT0;
15059 }
15060
16821285
MC
15061 /* Switch out of Vaux if it is a NIC */
15062 tg3_pwrsrc_switch_to_vmain(tp);
1da177e4 15063
1da177e4
LT
15064 /* Derive initial jumbo mode from MTU assigned in
15065 * ether_setup() via the alloc_etherdev() call
15066 */
63c3a66f
JP
15067 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
15068 tg3_flag_set(tp, JUMBO_RING_ENABLE);
1da177e4
LT
15069
15070 /* Determine WakeOnLan speed to use. */
15071 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15072 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
15073 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
15074 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
63c3a66f 15075 tg3_flag_clear(tp, WOL_SPEED_100MB);
1da177e4 15076 } else {
63c3a66f 15077 tg3_flag_set(tp, WOL_SPEED_100MB);
1da177e4
LT
15078 }
15079
7f97a4bd 15080 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
f07e9af3 15081 tp->phy_flags |= TG3_PHYFLG_IS_FET;
7f97a4bd 15082
1da177e4 15083 /* A few boards don't want Ethernet@WireSpeed phy feature */
6ff6f81d
MC
15084 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15085 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
1da177e4 15086 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
747e8f8b 15087 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
f07e9af3
MC
15088 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
15089 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15090 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
1da177e4
LT
15091
15092 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
15093 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
f07e9af3 15094 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
1da177e4 15095 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
f07e9af3 15096 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
1da177e4 15097
63c3a66f 15098 if (tg3_flag(tp, 5705_PLUS) &&
f07e9af3 15099 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
321d32a0 15100 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
f6eb9b1f 15101 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
63c3a66f 15102 !tg3_flag(tp, 57765_PLUS)) {
c424cb24 15103 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
d30cdd28 15104 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
9936bcf6
MC
15105 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
15106 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
d4011ada
MC
15107 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
15108 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
f07e9af3 15109 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
c1d2a196 15110 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
f07e9af3 15111 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
321d32a0 15112 } else
f07e9af3 15113 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
c424cb24 15114 }
1da177e4 15115
b2a5c19c
MC
15116 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
15117 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
15118 tp->phy_otp = tg3_read_otp_phycfg(tp);
15119 if (tp->phy_otp == 0)
15120 tp->phy_otp = TG3_OTP_DEFAULT;
15121 }
15122
63c3a66f 15123 if (tg3_flag(tp, CPMU_PRESENT))
8ef21428
MC
15124 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
15125 else
15126 tp->mi_mode = MAC_MI_MODE_BASE;
15127
1da177e4 15128 tp->coalesce_mode = 0;
1da177e4
LT
15129 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
15130 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
15131 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
15132
4d958473
MC
15133 /* Set these bits to enable statistics workaround. */
15134 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
15135 tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
15136 tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
15137 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
15138 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
15139 }
15140
321d32a0
MC
15141 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
15142 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
63c3a66f 15143 tg3_flag_set(tp, USE_PHYLIB);
57e6983c 15144
158d7abd
MC
15145 err = tg3_mdio_init(tp);
15146 if (err)
15147 return err;
1da177e4
LT
15148
15149 /* Initialize data/descriptor byte/word swapping. */
15150 val = tr32(GRC_MODE);
f2096f94
MC
15151 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
15152 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
15153 GRC_MODE_WORD_SWAP_B2HRX_DATA |
15154 GRC_MODE_B2HRX_ENABLE |
15155 GRC_MODE_HTX2B_ENABLE |
15156 GRC_MODE_HOST_STACKUP);
15157 else
15158 val &= GRC_MODE_HOST_STACKUP;
15159
1da177e4
LT
15160 tw32(GRC_MODE, val | tp->grc_mode);
15161
15162 tg3_switch_clocks(tp);
15163
15164 /* Clear this out for sanity. */
15165 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
15166
15167 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
15168 &pci_state_reg);
15169 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
63c3a66f 15170 !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
1da177e4
LT
15171 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
15172
15173 if (chiprevid == CHIPREV_ID_5701_A0 ||
15174 chiprevid == CHIPREV_ID_5701_B0 ||
15175 chiprevid == CHIPREV_ID_5701_B2 ||
15176 chiprevid == CHIPREV_ID_5701_B5) {
15177 void __iomem *sram_base;
15178
15179 /* Write some dummy words into the SRAM status block
15180 * area, see if it reads back correctly. If the return
15181 * value is bad, force enable the PCIX workaround.
15182 */
15183 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
15184
15185 writel(0x00000000, sram_base);
15186 writel(0x00000000, sram_base + 4);
15187 writel(0xffffffff, sram_base + 4);
15188 if (readl(sram_base) != 0x00000000)
63c3a66f 15189 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
1da177e4
LT
15190 }
15191 }
15192
15193 udelay(50);
15194 tg3_nvram_init(tp);
15195
15196 grc_misc_cfg = tr32(GRC_MISC_CFG);
15197 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
15198
1da177e4
LT
15199 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
15200 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
15201 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
63c3a66f 15202 tg3_flag_set(tp, IS_5788);
1da177e4 15203
63c3a66f 15204 if (!tg3_flag(tp, IS_5788) &&
6ff6f81d 15205 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
63c3a66f
JP
15206 tg3_flag_set(tp, TAGGED_STATUS);
15207 if (tg3_flag(tp, TAGGED_STATUS)) {
fac9b83e
DM
15208 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
15209 HOSTCC_MODE_CLRTICK_TXBD);
15210
15211 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
15212 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15213 tp->misc_host_ctrl);
15214 }
15215
3bda1258 15216 /* Preserve the APE MAC_MODE bits */
63c3a66f 15217 if (tg3_flag(tp, ENABLE_APE))
d2394e6b 15218 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
3bda1258 15219 else
6e01b20b 15220 tp->mac_mode = 0;
3bda1258 15221
3d567e0e 15222 if (tg3_10_100_only_device(tp, ent))
f07e9af3 15223 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
1da177e4
LT
15224
15225 err = tg3_phy_probe(tp);
15226 if (err) {
2445e461 15227 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
1da177e4 15228 /* ... but do not return immediately ... */
b02fd9e3 15229 tg3_mdio_fini(tp);
1da177e4
LT
15230 }
15231
184b8904 15232 tg3_read_vpd(tp);
c4e6575c 15233 tg3_read_fw_ver(tp);
1da177e4 15234
f07e9af3
MC
15235 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
15236 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
1da177e4
LT
15237 } else {
15238 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
f07e9af3 15239 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
1da177e4 15240 else
f07e9af3 15241 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
1da177e4
LT
15242 }
15243
15244 /* 5700 {AX,BX} chips have a broken status block link
15245 * change bit implementation, so we must use the
15246 * status register in those cases.
15247 */
15248 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
63c3a66f 15249 tg3_flag_set(tp, USE_LINKCHG_REG);
1da177e4 15250 else
63c3a66f 15251 tg3_flag_clear(tp, USE_LINKCHG_REG);
1da177e4
LT
15252
15253 /* The led_ctrl is set during tg3_phy_probe, here we might
15254 * have to force the link status polling mechanism based
15255 * upon subsystem IDs.
15256 */
15257 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
007a880d 15258 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
f07e9af3
MC
15259 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
15260 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
63c3a66f 15261 tg3_flag_set(tp, USE_LINKCHG_REG);
1da177e4
LT
15262 }
15263
15264 /* For all SERDES we poll the MAC status register. */
f07e9af3 15265 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
63c3a66f 15266 tg3_flag_set(tp, POLL_SERDES);
1da177e4 15267 else
63c3a66f 15268 tg3_flag_clear(tp, POLL_SERDES);
1da177e4 15269
9205fd9c 15270 tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
d2757fc4 15271 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
1da177e4 15272 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
63c3a66f 15273 tg3_flag(tp, PCIX_MODE)) {
9205fd9c 15274 tp->rx_offset = NET_SKB_PAD;
d2757fc4 15275#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
9dc7a113 15276 tp->rx_copy_thresh = ~(u16)0;
d2757fc4
MC
15277#endif
15278 }
1da177e4 15279
2c49a44d
MC
15280 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
15281 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
7cb32cf2
MC
15282 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
15283
2c49a44d 15284 tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
f92905de
MC
15285
15286 /* Increment the rx prod index on the rx std ring by at most
15287 * 8 for these chips to workaround hw errata.
15288 */
15289 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
15290 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
15291 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
15292 tp->rx_std_max_post = 8;
15293
63c3a66f 15294 if (tg3_flag(tp, ASPM_WORKAROUND))
8ed5d97e
MC
15295 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
15296 PCIE_PWR_MGMT_L1_THRESH_MSK;
15297
1da177e4
LT
15298 return err;
15299}
15300
49b6e95f 15301#ifdef CONFIG_SPARC
229b1ad1 15302static int tg3_get_macaddr_sparc(struct tg3 *tp)
1da177e4
LT
15303{
15304 struct net_device *dev = tp->dev;
15305 struct pci_dev *pdev = tp->pdev;
49b6e95f 15306 struct device_node *dp = pci_device_to_OF_node(pdev);
374d4cac 15307 const unsigned char *addr;
49b6e95f
DM
15308 int len;
15309
15310 addr = of_get_property(dp, "local-mac-address", &len);
15311 if (addr && len == 6) {
15312 memcpy(dev->dev_addr, addr, 6);
15313 memcpy(dev->perm_addr, dev->dev_addr, 6);
15314 return 0;
1da177e4
LT
15315 }
15316 return -ENODEV;
15317}
15318
229b1ad1 15319static int tg3_get_default_macaddr_sparc(struct tg3 *tp)
1da177e4
LT
15320{
15321 struct net_device *dev = tp->dev;
15322
15323 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
2ff43697 15324 memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
1da177e4
LT
15325 return 0;
15326}
15327#endif
15328
229b1ad1 15329static int tg3_get_device_address(struct tg3 *tp)
1da177e4
LT
15330{
15331 struct net_device *dev = tp->dev;
15332 u32 hi, lo, mac_offset;
008652b3 15333 int addr_ok = 0;
1da177e4 15334
49b6e95f 15335#ifdef CONFIG_SPARC
1da177e4
LT
15336 if (!tg3_get_macaddr_sparc(tp))
15337 return 0;
15338#endif
15339
15340 mac_offset = 0x7c;
6ff6f81d 15341 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
63c3a66f 15342 tg3_flag(tp, 5780_CLASS)) {
1da177e4
LT
15343 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
15344 mac_offset = 0xcc;
15345 if (tg3_nvram_lock(tp))
15346 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
15347 else
15348 tg3_nvram_unlock(tp);
63c3a66f 15349 } else if (tg3_flag(tp, 5717_PLUS)) {
69f11c99 15350 if (tp->pci_fn & 1)
a1b950d5 15351 mac_offset = 0xcc;
69f11c99 15352 if (tp->pci_fn > 1)
a50d0796 15353 mac_offset += 0x18c;
a1b950d5 15354 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
b5d3772c 15355 mac_offset = 0x10;
1da177e4
LT
15356
15357 /* First try to get it from MAC address mailbox. */
15358 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
15359 if ((hi >> 16) == 0x484b) {
15360 dev->dev_addr[0] = (hi >> 8) & 0xff;
15361 dev->dev_addr[1] = (hi >> 0) & 0xff;
15362
15363 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
15364 dev->dev_addr[2] = (lo >> 24) & 0xff;
15365 dev->dev_addr[3] = (lo >> 16) & 0xff;
15366 dev->dev_addr[4] = (lo >> 8) & 0xff;
15367 dev->dev_addr[5] = (lo >> 0) & 0xff;
1da177e4 15368
008652b3
MC
15369 /* Some old bootcode may report a 0 MAC address in SRAM */
15370 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
15371 }
15372 if (!addr_ok) {
15373 /* Next, try NVRAM. */
63c3a66f 15374 if (!tg3_flag(tp, NO_NVRAM) &&
df259d8c 15375 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
6d348f2c 15376 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
62cedd11
MC
15377 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
15378 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
008652b3
MC
15379 }
15380 /* Finally just fetch it out of the MAC control regs. */
15381 else {
15382 hi = tr32(MAC_ADDR_0_HIGH);
15383 lo = tr32(MAC_ADDR_0_LOW);
15384
15385 dev->dev_addr[5] = lo & 0xff;
15386 dev->dev_addr[4] = (lo >> 8) & 0xff;
15387 dev->dev_addr[3] = (lo >> 16) & 0xff;
15388 dev->dev_addr[2] = (lo >> 24) & 0xff;
15389 dev->dev_addr[1] = hi & 0xff;
15390 dev->dev_addr[0] = (hi >> 8) & 0xff;
15391 }
1da177e4
LT
15392 }
15393
15394 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
7582a335 15395#ifdef CONFIG_SPARC
1da177e4
LT
15396 if (!tg3_get_default_macaddr_sparc(tp))
15397 return 0;
15398#endif
15399 return -EINVAL;
15400 }
2ff43697 15401 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
1da177e4
LT
15402 return 0;
15403}
15404
59e6b434
DM
15405#define BOUNDARY_SINGLE_CACHELINE 1
15406#define BOUNDARY_MULTI_CACHELINE 2
15407
229b1ad1 15408static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
59e6b434
DM
15409{
15410 int cacheline_size;
15411 u8 byte;
15412 int goal;
15413
15414 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
15415 if (byte == 0)
15416 cacheline_size = 1024;
15417 else
15418 cacheline_size = (int) byte * 4;
15419
15420 /* On 5703 and later chips, the boundary bits have no
15421 * effect.
15422 */
15423 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
15424 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
63c3a66f 15425 !tg3_flag(tp, PCI_EXPRESS))
59e6b434
DM
15426 goto out;
15427
15428#if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
15429 goal = BOUNDARY_MULTI_CACHELINE;
15430#else
15431#if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
15432 goal = BOUNDARY_SINGLE_CACHELINE;
15433#else
15434 goal = 0;
15435#endif
15436#endif
15437
63c3a66f 15438 if (tg3_flag(tp, 57765_PLUS)) {
cbf9ca6c
MC
15439 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
15440 goto out;
15441 }
15442
59e6b434
DM
15443 if (!goal)
15444 goto out;
15445
15446 /* PCI controllers on most RISC systems tend to disconnect
15447 * when a device tries to burst across a cache-line boundary.
15448 * Therefore, letting tg3 do so just wastes PCI bandwidth.
15449 *
15450 * Unfortunately, for PCI-E there are only limited
15451 * write-side controls for this, and thus for reads
15452 * we will still get the disconnects. We'll also waste
15453 * these PCI cycles for both read and write for chips
15454 * other than 5700 and 5701 which do not implement the
15455 * boundary bits.
15456 */
63c3a66f 15457 if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
59e6b434
DM
15458 switch (cacheline_size) {
15459 case 16:
15460 case 32:
15461 case 64:
15462 case 128:
15463 if (goal == BOUNDARY_SINGLE_CACHELINE) {
15464 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
15465 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
15466 } else {
15467 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
15468 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
15469 }
15470 break;
15471
15472 case 256:
15473 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
15474 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
15475 break;
15476
15477 default:
15478 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
15479 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
15480 break;
855e1111 15481 }
63c3a66f 15482 } else if (tg3_flag(tp, PCI_EXPRESS)) {
59e6b434
DM
15483 switch (cacheline_size) {
15484 case 16:
15485 case 32:
15486 case 64:
15487 if (goal == BOUNDARY_SINGLE_CACHELINE) {
15488 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
15489 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
15490 break;
15491 }
15492 /* fallthrough */
15493 case 128:
15494 default:
15495 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
15496 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
15497 break;
855e1111 15498 }
59e6b434
DM
15499 } else {
15500 switch (cacheline_size) {
15501 case 16:
15502 if (goal == BOUNDARY_SINGLE_CACHELINE) {
15503 val |= (DMA_RWCTRL_READ_BNDRY_16 |
15504 DMA_RWCTRL_WRITE_BNDRY_16);
15505 break;
15506 }
15507 /* fallthrough */
15508 case 32:
15509 if (goal == BOUNDARY_SINGLE_CACHELINE) {
15510 val |= (DMA_RWCTRL_READ_BNDRY_32 |
15511 DMA_RWCTRL_WRITE_BNDRY_32);
15512 break;
15513 }
15514 /* fallthrough */
15515 case 64:
15516 if (goal == BOUNDARY_SINGLE_CACHELINE) {
15517 val |= (DMA_RWCTRL_READ_BNDRY_64 |
15518 DMA_RWCTRL_WRITE_BNDRY_64);
15519 break;
15520 }
15521 /* fallthrough */
15522 case 128:
15523 if (goal == BOUNDARY_SINGLE_CACHELINE) {
15524 val |= (DMA_RWCTRL_READ_BNDRY_128 |
15525 DMA_RWCTRL_WRITE_BNDRY_128);
15526 break;
15527 }
15528 /* fallthrough */
15529 case 256:
15530 val |= (DMA_RWCTRL_READ_BNDRY_256 |
15531 DMA_RWCTRL_WRITE_BNDRY_256);
15532 break;
15533 case 512:
15534 val |= (DMA_RWCTRL_READ_BNDRY_512 |
15535 DMA_RWCTRL_WRITE_BNDRY_512);
15536 break;
15537 case 1024:
15538 default:
15539 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
15540 DMA_RWCTRL_WRITE_BNDRY_1024);
15541 break;
855e1111 15542 }
59e6b434
DM
15543 }
15544
15545out:
15546 return val;
15547}
15548
229b1ad1
BP
15549static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
15550 int size, int to_device)
1da177e4
LT
15551{
15552 struct tg3_internal_buffer_desc test_desc;
15553 u32 sram_dma_descs;
15554 int i, ret;
15555
15556 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
15557
15558 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
15559 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
15560 tw32(RDMAC_STATUS, 0);
15561 tw32(WDMAC_STATUS, 0);
15562
15563 tw32(BUFMGR_MODE, 0);
15564 tw32(FTQ_RESET, 0);
15565
15566 test_desc.addr_hi = ((u64) buf_dma) >> 32;
15567 test_desc.addr_lo = buf_dma & 0xffffffff;
15568 test_desc.nic_mbuf = 0x00002100;
15569 test_desc.len = size;
15570
15571 /*
15572 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
15573 * the *second* time the tg3 driver was getting loaded after an
15574 * initial scan.
15575 *
15576 * Broadcom tells me:
15577 * ...the DMA engine is connected to the GRC block and a DMA
15578 * reset may affect the GRC block in some unpredictable way...
15579 * The behavior of resets to individual blocks has not been tested.
15580 *
15581 * Broadcom noted the GRC reset will also reset all sub-components.
15582 */
15583 if (to_device) {
15584 test_desc.cqid_sqid = (13 << 8) | 2;
15585
15586 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
15587 udelay(40);
15588 } else {
15589 test_desc.cqid_sqid = (16 << 8) | 7;
15590
15591 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
15592 udelay(40);
15593 }
15594 test_desc.flags = 0x00000005;
15595
15596 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
15597 u32 val;
15598
15599 val = *(((u32 *)&test_desc) + i);
15600 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
15601 sram_dma_descs + (i * sizeof(u32)));
15602 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
15603 }
15604 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
15605
859a5887 15606 if (to_device)
1da177e4 15607 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
859a5887 15608 else
1da177e4 15609 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
1da177e4
LT
15610
15611 ret = -ENODEV;
15612 for (i = 0; i < 40; i++) {
15613 u32 val;
15614
15615 if (to_device)
15616 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
15617 else
15618 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
15619 if ((val & 0xffff) == sram_dma_descs) {
15620 ret = 0;
15621 break;
15622 }
15623
15624 udelay(100);
15625 }
15626
15627 return ret;
15628}
15629
ded7340d 15630#define TEST_BUFFER_SIZE 0x2000
1da177e4 15631
4143470c 15632static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
895950c2
JP
15633 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
15634 { },
15635};
15636
229b1ad1 15637static int tg3_test_dma(struct tg3 *tp)
1da177e4
LT
15638{
15639 dma_addr_t buf_dma;
59e6b434 15640 u32 *buf, saved_dma_rwctrl;
cbf9ca6c 15641 int ret = 0;
1da177e4 15642
4bae65c8
MC
15643 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
15644 &buf_dma, GFP_KERNEL);
1da177e4
LT
15645 if (!buf) {
15646 ret = -ENOMEM;
15647 goto out_nofree;
15648 }
15649
15650 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
15651 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
15652
59e6b434 15653 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
1da177e4 15654
63c3a66f 15655 if (tg3_flag(tp, 57765_PLUS))
cbf9ca6c
MC
15656 goto out;
15657
63c3a66f 15658 if (tg3_flag(tp, PCI_EXPRESS)) {
1da177e4
LT
15659 /* DMA read watermark not used on PCIE */
15660 tp->dma_rwctrl |= 0x00180000;
63c3a66f 15661 } else if (!tg3_flag(tp, PCIX_MODE)) {
85e94ced
MC
15662 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
15663 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
1da177e4
LT
15664 tp->dma_rwctrl |= 0x003f0000;
15665 else
15666 tp->dma_rwctrl |= 0x003f000f;
15667 } else {
15668 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
15669 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
15670 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
49afdeb6 15671 u32 read_water = 0x7;
1da177e4 15672
4a29cc2e
MC
15673 /* If the 5704 is behind the EPB bridge, we can
15674 * do the less restrictive ONE_DMA workaround for
15675 * better performance.
15676 */
63c3a66f 15677 if (tg3_flag(tp, 40BIT_DMA_BUG) &&
4a29cc2e
MC
15678 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
15679 tp->dma_rwctrl |= 0x8000;
15680 else if (ccval == 0x6 || ccval == 0x7)
1da177e4
LT
15681 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
15682
49afdeb6
MC
15683 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
15684 read_water = 4;
59e6b434 15685 /* Set bit 23 to enable PCIX hw bug fix */
49afdeb6
MC
15686 tp->dma_rwctrl |=
15687 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
15688 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
15689 (1 << 23);
4cf78e4f
MC
15690 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
15691 /* 5780 always in PCIX mode */
15692 tp->dma_rwctrl |= 0x00144000;
a4e2b347
MC
15693 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
15694 /* 5714 always in PCIX mode */
15695 tp->dma_rwctrl |= 0x00148000;
1da177e4
LT
15696 } else {
15697 tp->dma_rwctrl |= 0x001b000f;
15698 }
15699 }
15700
15701 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
15702 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
15703 tp->dma_rwctrl &= 0xfffffff0;
15704
15705 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15706 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
15707 /* Remove this if it causes problems for some boards. */
15708 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
15709
15710 /* On 5700/5701 chips, we need to set this bit.
15711 * Otherwise the chip will issue cacheline transactions
15712 * to streamable DMA memory with not all the byte
15713 * enables turned on. This is an error on several
15714 * RISC PCI controllers, in particular sparc64.
15715 *
15716 * On 5703/5704 chips, this bit has been reassigned
15717 * a different meaning. In particular, it is used
15718 * on those chips to enable a PCI-X workaround.
15719 */
15720 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
15721 }
15722
15723 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15724
15725#if 0
15726 /* Unneeded, already done by tg3_get_invariants. */
15727 tg3_switch_clocks(tp);
15728#endif
15729
1da177e4
LT
15730 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
15731 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
15732 goto out;
15733
59e6b434
DM
15734 /* It is best to perform DMA test with maximum write burst size
15735 * to expose the 5700/5701 write DMA bug.
15736 */
15737 saved_dma_rwctrl = tp->dma_rwctrl;
15738 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15739 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15740
1da177e4
LT
15741 while (1) {
15742 u32 *p = buf, i;
15743
15744 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
15745 p[i] = i;
15746
15747 /* Send the buffer to the chip. */
15748 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
15749 if (ret) {
2445e461
MC
15750 dev_err(&tp->pdev->dev,
15751 "%s: Buffer write failed. err = %d\n",
15752 __func__, ret);
1da177e4
LT
15753 break;
15754 }
15755
15756#if 0
15757 /* validate data reached card RAM correctly. */
15758 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15759 u32 val;
15760 tg3_read_mem(tp, 0x2100 + (i*4), &val);
15761 if (le32_to_cpu(val) != p[i]) {
2445e461
MC
15762 dev_err(&tp->pdev->dev,
15763 "%s: Buffer corrupted on device! "
15764 "(%d != %d)\n", __func__, val, i);
1da177e4
LT
15765 /* ret = -ENODEV here? */
15766 }
15767 p[i] = 0;
15768 }
15769#endif
15770 /* Now read it back. */
15771 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
15772 if (ret) {
5129c3a3
MC
15773 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
15774 "err = %d\n", __func__, ret);
1da177e4
LT
15775 break;
15776 }
15777
15778 /* Verify it. */
15779 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15780 if (p[i] == i)
15781 continue;
15782
59e6b434
DM
15783 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15784 DMA_RWCTRL_WRITE_BNDRY_16) {
15785 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
1da177e4
LT
15786 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15787 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15788 break;
15789 } else {
2445e461
MC
15790 dev_err(&tp->pdev->dev,
15791 "%s: Buffer corrupted on read back! "
15792 "(%d != %d)\n", __func__, p[i], i);
1da177e4
LT
15793 ret = -ENODEV;
15794 goto out;
15795 }
15796 }
15797
15798 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
15799 /* Success. */
15800 ret = 0;
15801 break;
15802 }
15803 }
59e6b434
DM
15804 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15805 DMA_RWCTRL_WRITE_BNDRY_16) {
15806 /* DMA test passed without adjusting DMA boundary,
6d1cfbab
MC
15807 * now look for chipsets that are known to expose the
15808 * DMA bug without failing the test.
59e6b434 15809 */
4143470c 15810 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
6d1cfbab
MC
15811 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15812 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
859a5887 15813 } else {
6d1cfbab
MC
15814 /* Safe to use the calculated DMA boundary. */
15815 tp->dma_rwctrl = saved_dma_rwctrl;
859a5887 15816 }
6d1cfbab 15817
59e6b434
DM
15818 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15819 }
1da177e4
LT
15820
15821out:
4bae65c8 15822 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
1da177e4
LT
15823out_nofree:
15824 return ret;
15825}
15826
229b1ad1 15827static void tg3_init_bufmgr_config(struct tg3 *tp)
1da177e4 15828{
63c3a66f 15829 if (tg3_flag(tp, 57765_PLUS)) {
666bc831
MC
15830 tp->bufmgr_config.mbuf_read_dma_low_water =
15831 DEFAULT_MB_RDMA_LOW_WATER_5705;
15832 tp->bufmgr_config.mbuf_mac_rx_low_water =
15833 DEFAULT_MB_MACRX_LOW_WATER_57765;
15834 tp->bufmgr_config.mbuf_high_water =
15835 DEFAULT_MB_HIGH_WATER_57765;
15836
15837 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15838 DEFAULT_MB_RDMA_LOW_WATER_5705;
15839 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15840 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
15841 tp->bufmgr_config.mbuf_high_water_jumbo =
15842 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
63c3a66f 15843 } else if (tg3_flag(tp, 5705_PLUS)) {
fdfec172
MC
15844 tp->bufmgr_config.mbuf_read_dma_low_water =
15845 DEFAULT_MB_RDMA_LOW_WATER_5705;
15846 tp->bufmgr_config.mbuf_mac_rx_low_water =
15847 DEFAULT_MB_MACRX_LOW_WATER_5705;
15848 tp->bufmgr_config.mbuf_high_water =
15849 DEFAULT_MB_HIGH_WATER_5705;
b5d3772c
MC
15850 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
15851 tp->bufmgr_config.mbuf_mac_rx_low_water =
15852 DEFAULT_MB_MACRX_LOW_WATER_5906;
15853 tp->bufmgr_config.mbuf_high_water =
15854 DEFAULT_MB_HIGH_WATER_5906;
15855 }
fdfec172
MC
15856
15857 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15858 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
15859 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15860 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
15861 tp->bufmgr_config.mbuf_high_water_jumbo =
15862 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
15863 } else {
15864 tp->bufmgr_config.mbuf_read_dma_low_water =
15865 DEFAULT_MB_RDMA_LOW_WATER;
15866 tp->bufmgr_config.mbuf_mac_rx_low_water =
15867 DEFAULT_MB_MACRX_LOW_WATER;
15868 tp->bufmgr_config.mbuf_high_water =
15869 DEFAULT_MB_HIGH_WATER;
15870
15871 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15872 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
15873 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15874 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
15875 tp->bufmgr_config.mbuf_high_water_jumbo =
15876 DEFAULT_MB_HIGH_WATER_JUMBO;
15877 }
1da177e4
LT
15878
15879 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
15880 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
15881}
15882
229b1ad1 15883static char *tg3_phy_string(struct tg3 *tp)
1da177e4 15884{
79eb6904
MC
15885 switch (tp->phy_id & TG3_PHY_ID_MASK) {
15886 case TG3_PHY_ID_BCM5400: return "5400";
15887 case TG3_PHY_ID_BCM5401: return "5401";
15888 case TG3_PHY_ID_BCM5411: return "5411";
15889 case TG3_PHY_ID_BCM5701: return "5701";
15890 case TG3_PHY_ID_BCM5703: return "5703";
15891 case TG3_PHY_ID_BCM5704: return "5704";
15892 case TG3_PHY_ID_BCM5705: return "5705";
15893 case TG3_PHY_ID_BCM5750: return "5750";
15894 case TG3_PHY_ID_BCM5752: return "5752";
15895 case TG3_PHY_ID_BCM5714: return "5714";
15896 case TG3_PHY_ID_BCM5780: return "5780";
15897 case TG3_PHY_ID_BCM5755: return "5755";
15898 case TG3_PHY_ID_BCM5787: return "5787";
15899 case TG3_PHY_ID_BCM5784: return "5784";
15900 case TG3_PHY_ID_BCM5756: return "5722/5756";
15901 case TG3_PHY_ID_BCM5906: return "5906";
15902 case TG3_PHY_ID_BCM5761: return "5761";
15903 case TG3_PHY_ID_BCM5718C: return "5718C";
15904 case TG3_PHY_ID_BCM5718S: return "5718S";
15905 case TG3_PHY_ID_BCM57765: return "57765";
302b500b 15906 case TG3_PHY_ID_BCM5719C: return "5719C";
6418f2c1 15907 case TG3_PHY_ID_BCM5720C: return "5720C";
79eb6904 15908 case TG3_PHY_ID_BCM8002: return "8002/serdes";
1da177e4
LT
15909 case 0: return "serdes";
15910 default: return "unknown";
855e1111 15911 }
1da177e4
LT
15912}
15913
229b1ad1 15914static char *tg3_bus_string(struct tg3 *tp, char *str)
f9804ddb 15915{
63c3a66f 15916 if (tg3_flag(tp, PCI_EXPRESS)) {
f9804ddb
MC
15917 strcpy(str, "PCI Express");
15918 return str;
63c3a66f 15919 } else if (tg3_flag(tp, PCIX_MODE)) {
f9804ddb
MC
15920 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
15921
15922 strcpy(str, "PCIX:");
15923
15924 if ((clock_ctrl == 7) ||
15925 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
15926 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
15927 strcat(str, "133MHz");
15928 else if (clock_ctrl == 0)
15929 strcat(str, "33MHz");
15930 else if (clock_ctrl == 2)
15931 strcat(str, "50MHz");
15932 else if (clock_ctrl == 4)
15933 strcat(str, "66MHz");
15934 else if (clock_ctrl == 6)
15935 strcat(str, "100MHz");
f9804ddb
MC
15936 } else {
15937 strcpy(str, "PCI:");
63c3a66f 15938 if (tg3_flag(tp, PCI_HIGH_SPEED))
f9804ddb
MC
15939 strcat(str, "66MHz");
15940 else
15941 strcat(str, "33MHz");
15942 }
63c3a66f 15943 if (tg3_flag(tp, PCI_32BIT))
f9804ddb
MC
15944 strcat(str, ":32-bit");
15945 else
15946 strcat(str, ":64-bit");
15947 return str;
15948}
15949
229b1ad1 15950static void tg3_init_coal(struct tg3 *tp)
15f9850d
DM
15951{
15952 struct ethtool_coalesce *ec = &tp->coal;
15953
15954 memset(ec, 0, sizeof(*ec));
15955 ec->cmd = ETHTOOL_GCOALESCE;
15956 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
15957 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
15958 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
15959 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
15960 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
15961 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
15962 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
15963 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
15964 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
15965
15966 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
15967 HOSTCC_MODE_CLRTICK_TXBD)) {
15968 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
15969 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
15970 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
15971 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
15972 }
d244c892 15973
63c3a66f 15974 if (tg3_flag(tp, 5705_PLUS)) {
d244c892
MC
15975 ec->rx_coalesce_usecs_irq = 0;
15976 ec->tx_coalesce_usecs_irq = 0;
15977 ec->stats_block_coalesce_usecs = 0;
15978 }
15f9850d
DM
15979}
15980
229b1ad1 15981static int tg3_init_one(struct pci_dev *pdev,
1da177e4
LT
15982 const struct pci_device_id *ent)
15983{
1da177e4
LT
15984 struct net_device *dev;
15985 struct tg3 *tp;
646c9edd
MC
15986 int i, err, pm_cap;
15987 u32 sndmbx, rcvmbx, intmbx;
f9804ddb 15988 char str[40];
72f2afb8 15989 u64 dma_mask, persist_dma_mask;
c8f44aff 15990 netdev_features_t features = 0;
1da177e4 15991
05dbe005 15992 printk_once(KERN_INFO "%s\n", version);
1da177e4
LT
15993
15994 err = pci_enable_device(pdev);
15995 if (err) {
2445e461 15996 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
1da177e4
LT
15997 return err;
15998 }
15999
1da177e4
LT
16000 err = pci_request_regions(pdev, DRV_MODULE_NAME);
16001 if (err) {
2445e461 16002 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
1da177e4
LT
16003 goto err_out_disable_pdev;
16004 }
16005
16006 pci_set_master(pdev);
16007
16008 /* Find power-management capability. */
16009 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
16010 if (pm_cap == 0) {
2445e461
MC
16011 dev_err(&pdev->dev,
16012 "Cannot find Power Management capability, aborting\n");
1da177e4
LT
16013 err = -EIO;
16014 goto err_out_free_res;
16015 }
16016
16821285
MC
16017 err = pci_set_power_state(pdev, PCI_D0);
16018 if (err) {
16019 dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
16020 goto err_out_free_res;
16021 }
16022
fe5f5787 16023 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
1da177e4 16024 if (!dev) {
1da177e4 16025 err = -ENOMEM;
16821285 16026 goto err_out_power_down;
1da177e4
LT
16027 }
16028
1da177e4
LT
16029 SET_NETDEV_DEV(dev, &pdev->dev);
16030
1da177e4
LT
16031 tp = netdev_priv(dev);
16032 tp->pdev = pdev;
16033 tp->dev = dev;
16034 tp->pm_cap = pm_cap;
1da177e4
LT
16035 tp->rx_mode = TG3_DEF_RX_MODE;
16036 tp->tx_mode = TG3_DEF_TX_MODE;
8ef21428 16037
1da177e4
LT
16038 if (tg3_debug > 0)
16039 tp->msg_enable = tg3_debug;
16040 else
16041 tp->msg_enable = TG3_DEF_MSG_ENABLE;
16042
16043 /* The word/byte swap controls here control register access byte
16044 * swapping. DMA data byte swapping is controlled in the GRC_MODE
16045 * setting below.
16046 */
16047 tp->misc_host_ctrl =
16048 MISC_HOST_CTRL_MASK_PCI_INT |
16049 MISC_HOST_CTRL_WORD_SWAP |
16050 MISC_HOST_CTRL_INDIR_ACCESS |
16051 MISC_HOST_CTRL_PCISTATE_RW;
16052
16053 /* The NONFRM (non-frame) byte/word swap controls take effect
16054 * on descriptor entries, anything which isn't packet data.
16055 *
16056 * The StrongARM chips on the board (one for tx, one for rx)
16057 * are running in big-endian mode.
16058 */
16059 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
16060 GRC_MODE_WSWAP_NONFRM_DATA);
16061#ifdef __BIG_ENDIAN
16062 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
16063#endif
16064 spin_lock_init(&tp->lock);
1da177e4 16065 spin_lock_init(&tp->indirect_lock);
c4028958 16066 INIT_WORK(&tp->reset_task, tg3_reset_task);
1da177e4 16067
d5fe488a 16068 tp->regs = pci_ioremap_bar(pdev, BAR_0);
ab0049b4 16069 if (!tp->regs) {
ab96b241 16070 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
1da177e4
LT
16071 err = -ENOMEM;
16072 goto err_out_free_dev;
16073 }
16074
c9cab24e
MC
16075 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16076 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
16077 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
16078 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
16079 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
79d49695 16080 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
c9cab24e
MC
16081 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
16082 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
16083 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) {
16084 tg3_flag_set(tp, ENABLE_APE);
16085 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
16086 if (!tp->aperegs) {
16087 dev_err(&pdev->dev,
16088 "Cannot map APE registers, aborting\n");
16089 err = -ENOMEM;
16090 goto err_out_iounmap;
16091 }
16092 }
16093
1da177e4
LT
16094 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
16095 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
1da177e4 16096
1da177e4 16097 dev->ethtool_ops = &tg3_ethtool_ops;
1da177e4 16098 dev->watchdog_timeo = TG3_TX_TIMEOUT;
2ffcc981 16099 dev->netdev_ops = &tg3_netdev_ops;
1da177e4 16100 dev->irq = pdev->irq;
1da177e4 16101
3d567e0e 16102 err = tg3_get_invariants(tp, ent);
1da177e4 16103 if (err) {
ab96b241
MC
16104 dev_err(&pdev->dev,
16105 "Problem fetching invariants of chip, aborting\n");
c9cab24e 16106 goto err_out_apeunmap;
1da177e4
LT
16107 }
16108
4a29cc2e
MC
16109 /* The EPB bridge inside 5714, 5715, and 5780 and any
16110 * device behind the EPB cannot support DMA addresses > 40-bit.
72f2afb8
MC
16111 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
16112 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
16113 * do DMA address check in tg3_start_xmit().
16114 */
63c3a66f 16115 if (tg3_flag(tp, IS_5788))
284901a9 16116 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
63c3a66f 16117 else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
50cf156a 16118 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
72f2afb8 16119#ifdef CONFIG_HIGHMEM
6a35528a 16120 dma_mask = DMA_BIT_MASK(64);
72f2afb8 16121#endif
4a29cc2e 16122 } else
6a35528a 16123 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
72f2afb8
MC
16124
16125 /* Configure DMA attributes. */
284901a9 16126 if (dma_mask > DMA_BIT_MASK(32)) {
72f2afb8
MC
16127 err = pci_set_dma_mask(pdev, dma_mask);
16128 if (!err) {
0da0606f 16129 features |= NETIF_F_HIGHDMA;
72f2afb8
MC
16130 err = pci_set_consistent_dma_mask(pdev,
16131 persist_dma_mask);
16132 if (err < 0) {
ab96b241
MC
16133 dev_err(&pdev->dev, "Unable to obtain 64 bit "
16134 "DMA for consistent allocations\n");
c9cab24e 16135 goto err_out_apeunmap;
72f2afb8
MC
16136 }
16137 }
16138 }
284901a9
YH
16139 if (err || dma_mask == DMA_BIT_MASK(32)) {
16140 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
72f2afb8 16141 if (err) {
ab96b241
MC
16142 dev_err(&pdev->dev,
16143 "No usable DMA configuration, aborting\n");
c9cab24e 16144 goto err_out_apeunmap;
72f2afb8
MC
16145 }
16146 }
16147
fdfec172 16148 tg3_init_bufmgr_config(tp);
1da177e4 16149
0da0606f
MC
16150 features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
16151
16152 /* 5700 B0 chips do not support checksumming correctly due
16153 * to hardware bugs.
16154 */
16155 if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
16156 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
16157
16158 if (tg3_flag(tp, 5755_PLUS))
16159 features |= NETIF_F_IPV6_CSUM;
16160 }
16161
4e3a7aaa
MC
16162 /* TSO is on by default on chips that support hardware TSO.
16163 * Firmware TSO on older chips gives lower performance, so it
16164 * is off by default, but can be enabled using ethtool.
16165 */
63c3a66f
JP
16166 if ((tg3_flag(tp, HW_TSO_1) ||
16167 tg3_flag(tp, HW_TSO_2) ||
16168 tg3_flag(tp, HW_TSO_3)) &&
0da0606f
MC
16169 (features & NETIF_F_IP_CSUM))
16170 features |= NETIF_F_TSO;
63c3a66f 16171 if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
0da0606f
MC
16172 if (features & NETIF_F_IPV6_CSUM)
16173 features |= NETIF_F_TSO6;
63c3a66f 16174 if (tg3_flag(tp, HW_TSO_3) ||
e849cdc3 16175 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
57e6983c
MC
16176 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
16177 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
63c3a66f 16178 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
dc668910 16179 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
0da0606f 16180 features |= NETIF_F_TSO_ECN;
b0026624 16181 }
1da177e4 16182
d542fe27
MC
16183 dev->features |= features;
16184 dev->vlan_features |= features;
16185
06c03c02
MB
16186 /*
16187 * Add loopback capability only for a subset of devices that support
16188 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
16189 * loopback for the remaining devices.
16190 */
16191 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
16192 !tg3_flag(tp, CPMU_PRESENT))
16193 /* Add the loopback capability */
0da0606f
MC
16194 features |= NETIF_F_LOOPBACK;
16195
0da0606f 16196 dev->hw_features |= features;
06c03c02 16197
1da177e4 16198 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
63c3a66f 16199 !tg3_flag(tp, TSO_CAPABLE) &&
1da177e4 16200 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
63c3a66f 16201 tg3_flag_set(tp, MAX_RXPEND_64);
1da177e4
LT
16202 tp->rx_pending = 63;
16203 }
16204
1da177e4
LT
16205 err = tg3_get_device_address(tp);
16206 if (err) {
ab96b241
MC
16207 dev_err(&pdev->dev,
16208 "Could not obtain valid ethernet address, aborting\n");
c9cab24e 16209 goto err_out_apeunmap;
c88864df
MC
16210 }
16211
1da177e4
LT
16212 /*
16213 * Reset chip in case UNDI or EFI driver did not shutdown
16214 * DMA self test will enable WDMAC and we'll see (spurious)
16215 * pending DMA on the PCI bus at that point.
16216 */
16217 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
16218 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
1da177e4 16219 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
944d980e 16220 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
1da177e4
LT
16221 }
16222
16223 err = tg3_test_dma(tp);
16224 if (err) {
ab96b241 16225 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
c88864df 16226 goto err_out_apeunmap;
1da177e4
LT
16227 }
16228
78f90dcf
MC
16229 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
16230 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
16231 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
6fd45cb8 16232 for (i = 0; i < tp->irq_max; i++) {
78f90dcf
MC
16233 struct tg3_napi *tnapi = &tp->napi[i];
16234
16235 tnapi->tp = tp;
16236 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
16237
16238 tnapi->int_mbox = intmbx;
93a700a9 16239 if (i <= 4)
78f90dcf
MC
16240 intmbx += 0x8;
16241 else
16242 intmbx += 0x4;
16243
16244 tnapi->consmbox = rcvmbx;
16245 tnapi->prodmbox = sndmbx;
16246
66cfd1bd 16247 if (i)
78f90dcf 16248 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
66cfd1bd 16249 else
78f90dcf 16250 tnapi->coal_now = HOSTCC_MODE_NOW;
78f90dcf 16251
63c3a66f 16252 if (!tg3_flag(tp, SUPPORT_MSIX))
78f90dcf
MC
16253 break;
16254
16255 /*
16256 * If we support MSIX, we'll be using RSS. If we're using
16257 * RSS, the first vector only handles link interrupts and the
16258 * remaining vectors handle rx and tx interrupts. Reuse the
16259 * mailbox values for the next iteration. The values we setup
16260 * above are still useful for the single vectored mode.
16261 */
16262 if (!i)
16263 continue;
16264
16265 rcvmbx += 0x8;
16266
16267 if (sndmbx & 0x4)
16268 sndmbx -= 0x4;
16269 else
16270 sndmbx += 0xc;
16271 }
16272
15f9850d
DM
16273 tg3_init_coal(tp);
16274
c49a1561
MC
16275 pci_set_drvdata(pdev, dev);
16276
cd0d7228
MC
16277 if (tg3_flag(tp, 5717_PLUS)) {
16278 /* Resume a low-power mode */
16279 tg3_frob_aux_power(tp, false);
16280 }
16281
21f7638e
MC
16282 tg3_timer_init(tp);
16283
1da177e4
LT
16284 err = register_netdev(dev);
16285 if (err) {
ab96b241 16286 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
0d3031d9 16287 goto err_out_apeunmap;
1da177e4
LT
16288 }
16289
05dbe005
JP
16290 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
16291 tp->board_part_number,
16292 tp->pci_chip_rev_id,
16293 tg3_bus_string(tp, str),
16294 dev->dev_addr);
1da177e4 16295
f07e9af3 16296 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
3f0e3ad7
MC
16297 struct phy_device *phydev;
16298 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
5129c3a3
MC
16299 netdev_info(dev,
16300 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
05dbe005 16301 phydev->drv->name, dev_name(&phydev->dev));
f07e9af3
MC
16302 } else {
16303 char *ethtype;
16304
16305 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
16306 ethtype = "10/100Base-TX";
16307 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
16308 ethtype = "1000Base-SX";
16309 else
16310 ethtype = "10/100/1000Base-T";
16311
5129c3a3 16312 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
47007831
MC
16313 "(WireSpeed[%d], EEE[%d])\n",
16314 tg3_phy_string(tp), ethtype,
16315 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
16316 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
f07e9af3 16317 }
05dbe005
JP
16318
16319 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
dc668910 16320 (dev->features & NETIF_F_RXCSUM) != 0,
63c3a66f 16321 tg3_flag(tp, USE_LINKCHG_REG) != 0,
f07e9af3 16322 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
63c3a66f
JP
16323 tg3_flag(tp, ENABLE_ASF) != 0,
16324 tg3_flag(tp, TSO_CAPABLE) != 0);
05dbe005
JP
16325 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
16326 tp->dma_rwctrl,
16327 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
16328 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
1da177e4 16329
b45aa2f6
MC
16330 pci_save_state(pdev);
16331
1da177e4
LT
16332 return 0;
16333
0d3031d9
MC
16334err_out_apeunmap:
16335 if (tp->aperegs) {
16336 iounmap(tp->aperegs);
16337 tp->aperegs = NULL;
16338 }
16339
1da177e4 16340err_out_iounmap:
6892914f
MC
16341 if (tp->regs) {
16342 iounmap(tp->regs);
22abe310 16343 tp->regs = NULL;
6892914f 16344 }
1da177e4
LT
16345
16346err_out_free_dev:
16347 free_netdev(dev);
16348
16821285
MC
16349err_out_power_down:
16350 pci_set_power_state(pdev, PCI_D3hot);
16351
1da177e4
LT
16352err_out_free_res:
16353 pci_release_regions(pdev);
16354
16355err_out_disable_pdev:
16356 pci_disable_device(pdev);
16357 pci_set_drvdata(pdev, NULL);
16358 return err;
16359}
16360
229b1ad1 16361static void tg3_remove_one(struct pci_dev *pdev)
1da177e4
LT
16362{
16363 struct net_device *dev = pci_get_drvdata(pdev);
16364
16365 if (dev) {
16366 struct tg3 *tp = netdev_priv(dev);
16367
e3c5530b 16368 release_firmware(tp->fw);
077f849d 16369
db219973 16370 tg3_reset_task_cancel(tp);
158d7abd 16371
e730c823 16372 if (tg3_flag(tp, USE_PHYLIB)) {
b02fd9e3 16373 tg3_phy_fini(tp);
158d7abd 16374 tg3_mdio_fini(tp);
b02fd9e3 16375 }
158d7abd 16376
1da177e4 16377 unregister_netdev(dev);
0d3031d9
MC
16378 if (tp->aperegs) {
16379 iounmap(tp->aperegs);
16380 tp->aperegs = NULL;
16381 }
6892914f
MC
16382 if (tp->regs) {
16383 iounmap(tp->regs);
22abe310 16384 tp->regs = NULL;
6892914f 16385 }
1da177e4
LT
16386 free_netdev(dev);
16387 pci_release_regions(pdev);
16388 pci_disable_device(pdev);
16389 pci_set_drvdata(pdev, NULL);
16390 }
16391}
16392
aa6027ca 16393#ifdef CONFIG_PM_SLEEP
c866b7ea 16394static int tg3_suspend(struct device *device)
1da177e4 16395{
c866b7ea 16396 struct pci_dev *pdev = to_pci_dev(device);
1da177e4
LT
16397 struct net_device *dev = pci_get_drvdata(pdev);
16398 struct tg3 *tp = netdev_priv(dev);
16399 int err;
16400
16401 if (!netif_running(dev))
16402 return 0;
16403
db219973 16404 tg3_reset_task_cancel(tp);
b02fd9e3 16405 tg3_phy_stop(tp);
1da177e4
LT
16406 tg3_netif_stop(tp);
16407
21f7638e 16408 tg3_timer_stop(tp);
1da177e4 16409
f47c11ee 16410 tg3_full_lock(tp, 1);
1da177e4 16411 tg3_disable_ints(tp);
f47c11ee 16412 tg3_full_unlock(tp);
1da177e4
LT
16413
16414 netif_device_detach(dev);
16415
f47c11ee 16416 tg3_full_lock(tp, 0);
944d980e 16417 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
63c3a66f 16418 tg3_flag_clear(tp, INIT_COMPLETE);
f47c11ee 16419 tg3_full_unlock(tp);
1da177e4 16420
c866b7ea 16421 err = tg3_power_down_prepare(tp);
1da177e4 16422 if (err) {
b02fd9e3
MC
16423 int err2;
16424
f47c11ee 16425 tg3_full_lock(tp, 0);
1da177e4 16426
63c3a66f 16427 tg3_flag_set(tp, INIT_COMPLETE);
b02fd9e3
MC
16428 err2 = tg3_restart_hw(tp, 1);
16429 if (err2)
b9ec6c1b 16430 goto out;
1da177e4 16431
21f7638e 16432 tg3_timer_start(tp);
1da177e4
LT
16433
16434 netif_device_attach(dev);
16435 tg3_netif_start(tp);
16436
b9ec6c1b 16437out:
f47c11ee 16438 tg3_full_unlock(tp);
b02fd9e3
MC
16439
16440 if (!err2)
16441 tg3_phy_start(tp);
1da177e4
LT
16442 }
16443
16444 return err;
16445}
16446
c866b7ea 16447static int tg3_resume(struct device *device)
1da177e4 16448{
c866b7ea 16449 struct pci_dev *pdev = to_pci_dev(device);
1da177e4
LT
16450 struct net_device *dev = pci_get_drvdata(pdev);
16451 struct tg3 *tp = netdev_priv(dev);
16452 int err;
16453
16454 if (!netif_running(dev))
16455 return 0;
16456
1da177e4
LT
16457 netif_device_attach(dev);
16458
f47c11ee 16459 tg3_full_lock(tp, 0);
1da177e4 16460
63c3a66f 16461 tg3_flag_set(tp, INIT_COMPLETE);
b9ec6c1b
MC
16462 err = tg3_restart_hw(tp, 1);
16463 if (err)
16464 goto out;
1da177e4 16465
21f7638e 16466 tg3_timer_start(tp);
1da177e4 16467
1da177e4
LT
16468 tg3_netif_start(tp);
16469
b9ec6c1b 16470out:
f47c11ee 16471 tg3_full_unlock(tp);
1da177e4 16472
b02fd9e3
MC
16473 if (!err)
16474 tg3_phy_start(tp);
16475
b9ec6c1b 16476 return err;
1da177e4
LT
16477}
16478
c866b7ea 16479static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
aa6027ca
ED
16480#define TG3_PM_OPS (&tg3_pm_ops)
16481
16482#else
16483
16484#define TG3_PM_OPS NULL
16485
16486#endif /* CONFIG_PM_SLEEP */
c866b7ea 16487
b45aa2f6
MC
16488/**
16489 * tg3_io_error_detected - called when PCI error is detected
16490 * @pdev: Pointer to PCI device
16491 * @state: The current pci connection state
16492 *
16493 * This function is called after a PCI bus error affecting
16494 * this device has been detected.
16495 */
16496static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
16497 pci_channel_state_t state)
16498{
16499 struct net_device *netdev = pci_get_drvdata(pdev);
16500 struct tg3 *tp = netdev_priv(netdev);
16501 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
16502
16503 netdev_info(netdev, "PCI I/O error detected\n");
16504
16505 rtnl_lock();
16506
16507 if (!netif_running(netdev))
16508 goto done;
16509
16510 tg3_phy_stop(tp);
16511
16512 tg3_netif_stop(tp);
16513
21f7638e 16514 tg3_timer_stop(tp);
b45aa2f6
MC
16515
16516 /* Want to make sure that the reset task doesn't run */
db219973 16517 tg3_reset_task_cancel(tp);
b45aa2f6
MC
16518
16519 netif_device_detach(netdev);
16520
16521 /* Clean up software state, even if MMIO is blocked */
16522 tg3_full_lock(tp, 0);
16523 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
16524 tg3_full_unlock(tp);
16525
16526done:
16527 if (state == pci_channel_io_perm_failure)
16528 err = PCI_ERS_RESULT_DISCONNECT;
16529 else
16530 pci_disable_device(pdev);
16531
16532 rtnl_unlock();
16533
16534 return err;
16535}
16536
16537/**
16538 * tg3_io_slot_reset - called after the pci bus has been reset.
16539 * @pdev: Pointer to PCI device
16540 *
16541 * Restart the card from scratch, as if from a cold-boot.
16542 * At this point, the card has exprienced a hard reset,
16543 * followed by fixups by BIOS, and has its config space
16544 * set up identically to what it was at cold boot.
16545 */
16546static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
16547{
16548 struct net_device *netdev = pci_get_drvdata(pdev);
16549 struct tg3 *tp = netdev_priv(netdev);
16550 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
16551 int err;
16552
16553 rtnl_lock();
16554
16555 if (pci_enable_device(pdev)) {
16556 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
16557 goto done;
16558 }
16559
16560 pci_set_master(pdev);
16561 pci_restore_state(pdev);
16562 pci_save_state(pdev);
16563
16564 if (!netif_running(netdev)) {
16565 rc = PCI_ERS_RESULT_RECOVERED;
16566 goto done;
16567 }
16568
16569 err = tg3_power_up(tp);
bed9829f 16570 if (err)
b45aa2f6 16571 goto done;
b45aa2f6
MC
16572
16573 rc = PCI_ERS_RESULT_RECOVERED;
16574
16575done:
16576 rtnl_unlock();
16577
16578 return rc;
16579}
16580
16581/**
16582 * tg3_io_resume - called when traffic can start flowing again.
16583 * @pdev: Pointer to PCI device
16584 *
16585 * This callback is called when the error recovery driver tells
16586 * us that its OK to resume normal operation.
16587 */
16588static void tg3_io_resume(struct pci_dev *pdev)
16589{
16590 struct net_device *netdev = pci_get_drvdata(pdev);
16591 struct tg3 *tp = netdev_priv(netdev);
16592 int err;
16593
16594 rtnl_lock();
16595
16596 if (!netif_running(netdev))
16597 goto done;
16598
16599 tg3_full_lock(tp, 0);
63c3a66f 16600 tg3_flag_set(tp, INIT_COMPLETE);
b45aa2f6
MC
16601 err = tg3_restart_hw(tp, 1);
16602 tg3_full_unlock(tp);
16603 if (err) {
16604 netdev_err(netdev, "Cannot restart hardware after reset.\n");
16605 goto done;
16606 }
16607
16608 netif_device_attach(netdev);
16609
21f7638e 16610 tg3_timer_start(tp);
b45aa2f6
MC
16611
16612 tg3_netif_start(tp);
16613
16614 tg3_phy_start(tp);
16615
16616done:
16617 rtnl_unlock();
16618}
16619
3646f0e5 16620static const struct pci_error_handlers tg3_err_handler = {
b45aa2f6
MC
16621 .error_detected = tg3_io_error_detected,
16622 .slot_reset = tg3_io_slot_reset,
16623 .resume = tg3_io_resume
16624};
16625
1da177e4
LT
16626static struct pci_driver tg3_driver = {
16627 .name = DRV_MODULE_NAME,
16628 .id_table = tg3_pci_tbl,
16629 .probe = tg3_init_one,
229b1ad1 16630 .remove = tg3_remove_one,
b45aa2f6 16631 .err_handler = &tg3_err_handler,
aa6027ca 16632 .driver.pm = TG3_PM_OPS,
1da177e4
LT
16633};
16634
16635static int __init tg3_init(void)
16636{
29917620 16637 return pci_register_driver(&tg3_driver);
1da177e4
LT
16638}
16639
16640static void __exit tg3_cleanup(void)
16641{
16642 pci_unregister_driver(&tg3_driver);
16643}
16644
16645module_init(tg3_init);
16646module_exit(tg3_cleanup);