]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - drivers/net/tg3.c
Merge branch 'bugzilla-10695' into release
[mirror_ubuntu-zesty-kernel.git] / drivers / net / tg3.c
CommitLineData
1da177e4
LT
1/*
2 * tg3.c: Broadcom Tigon3 ethernet driver.
3 *
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
65610fba 7 * Copyright (C) 2005-2007 Broadcom Corporation.
1da177e4
LT
8 *
9 * Firmware is:
49cabf49
MC
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
12 *
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
1da177e4
LT
16 */
17
1da177e4
LT
18
19#include <linux/module.h>
20#include <linux/moduleparam.h>
21#include <linux/kernel.h>
22#include <linux/types.h>
23#include <linux/compiler.h>
24#include <linux/slab.h>
25#include <linux/delay.h>
14c85021 26#include <linux/in.h>
1da177e4
LT
27#include <linux/init.h>
28#include <linux/ioport.h>
29#include <linux/pci.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/ethtool.h>
34#include <linux/mii.h>
35#include <linux/if_vlan.h>
36#include <linux/ip.h>
37#include <linux/tcp.h>
38#include <linux/workqueue.h>
61487480 39#include <linux/prefetch.h>
f9a5f7d3 40#include <linux/dma-mapping.h>
1da177e4
LT
41
42#include <net/checksum.h>
c9bdd4b5 43#include <net/ip.h>
1da177e4
LT
44
45#include <asm/system.h>
46#include <asm/io.h>
47#include <asm/byteorder.h>
48#include <asm/uaccess.h>
49
49b6e95f 50#ifdef CONFIG_SPARC
1da177e4 51#include <asm/idprom.h>
49b6e95f 52#include <asm/prom.h>
1da177e4
LT
53#endif
54
55#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
56#define TG3_VLAN_TAG_USED 1
57#else
58#define TG3_VLAN_TAG_USED 0
59#endif
60
1da177e4 61#define TG3_TSO_SUPPORT 1
1da177e4
LT
62
63#include "tg3.h"
64
65#define DRV_MODULE_NAME "tg3"
66#define PFX DRV_MODULE_NAME ": "
1b84d946
MC
67#define DRV_MODULE_VERSION "3.92.1"
68#define DRV_MODULE_RELDATE "June 9, 2008"
1da177e4
LT
69
70#define TG3_DEF_MAC_MODE 0
71#define TG3_DEF_RX_MODE 0
72#define TG3_DEF_TX_MODE 0
73#define TG3_DEF_MSG_ENABLE \
74 (NETIF_MSG_DRV | \
75 NETIF_MSG_PROBE | \
76 NETIF_MSG_LINK | \
77 NETIF_MSG_TIMER | \
78 NETIF_MSG_IFDOWN | \
79 NETIF_MSG_IFUP | \
80 NETIF_MSG_RX_ERR | \
81 NETIF_MSG_TX_ERR)
82
83/* length of time before we decide the hardware is borked,
84 * and dev->tx_timeout() should be called to fix the problem
85 */
86#define TG3_TX_TIMEOUT (5 * HZ)
87
88/* hardware minimum and maximum for a single frame's data payload */
89#define TG3_MIN_MTU 60
90#define TG3_MAX_MTU(tp) \
0f893dc6 91 ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
1da177e4
LT
92
93/* These numbers seem to be hard coded in the NIC firmware somehow.
94 * You can't change the ring sizes, but you can change where you place
95 * them in the NIC onboard memory.
96 */
97#define TG3_RX_RING_SIZE 512
98#define TG3_DEF_RX_RING_PENDING 200
99#define TG3_RX_JUMBO_RING_SIZE 256
100#define TG3_DEF_RX_JUMBO_RING_PENDING 100
101
102/* Do not place this n-ring entries value into the tp struct itself,
103 * we really want to expose these constants to GCC so that modulo et
104 * al. operations are done with shifts and masks instead of with
105 * hw multiply/modulo instructions. Another solution would be to
106 * replace things like '% foo' with '& (foo - 1)'.
107 */
108#define TG3_RX_RCB_RING_SIZE(tp) \
109 ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ? 512 : 1024)
110
111#define TG3_TX_RING_SIZE 512
112#define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
113
114#define TG3_RX_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
115 TG3_RX_RING_SIZE)
116#define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
117 TG3_RX_JUMBO_RING_SIZE)
118#define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
119 TG3_RX_RCB_RING_SIZE(tp))
120#define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
121 TG3_TX_RING_SIZE)
1da177e4
LT
122#define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
123
124#define RX_PKT_BUF_SZ (1536 + tp->rx_offset + 64)
125#define RX_JUMBO_PKT_BUF_SZ (9046 + tp->rx_offset + 64)
126
127/* minimum number of free TX descriptors required to wake up TX process */
42952231 128#define TG3_TX_WAKEUP_THRESH(tp) ((tp)->tx_pending / 4)
1da177e4
LT
129
130/* number of ETHTOOL_GSTATS u64's */
131#define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
132
4cafd3f5
MC
133#define TG3_NUM_TEST 6
134
1da177e4
LT
135static char version[] __devinitdata =
136 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
137
138MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
139MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
140MODULE_LICENSE("GPL");
141MODULE_VERSION(DRV_MODULE_VERSION);
142
143static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
144module_param(tg3_debug, int, 0);
145MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
146
147static struct pci_device_id tg3_pci_tbl[] = {
13185217
HK
148 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
149 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
150 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
151 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
152 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
153 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
154 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
155 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
156 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
157 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
158 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
159 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
160 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
161 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
162 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
163 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
164 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
165 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
166 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
167 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
168 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
169 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
170 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)},
171 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
126a3368 172 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
13185217
HK
173 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
174 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
175 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)},
176 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
177 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
178 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
179 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
180 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
181 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
182 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
183 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
184 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
185 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
186 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
126a3368 187 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
13185217
HK
188 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
189 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
190 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
676917d4 191 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
13185217
HK
192 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
193 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
194 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
195 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
196 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
197 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
198 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
b5d3772c
MC
199 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
200 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
d30cdd28
MC
201 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
202 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
6c7af27c 203 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
9936bcf6
MC
204 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
205 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
13185217
HK
206 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
207 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
208 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
209 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
210 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
211 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
212 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
213 {}
1da177e4
LT
214};
215
216MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
217
50da859d 218static const struct {
1da177e4
LT
219 const char string[ETH_GSTRING_LEN];
220} ethtool_stats_keys[TG3_NUM_STATS] = {
221 { "rx_octets" },
222 { "rx_fragments" },
223 { "rx_ucast_packets" },
224 { "rx_mcast_packets" },
225 { "rx_bcast_packets" },
226 { "rx_fcs_errors" },
227 { "rx_align_errors" },
228 { "rx_xon_pause_rcvd" },
229 { "rx_xoff_pause_rcvd" },
230 { "rx_mac_ctrl_rcvd" },
231 { "rx_xoff_entered" },
232 { "rx_frame_too_long_errors" },
233 { "rx_jabbers" },
234 { "rx_undersize_packets" },
235 { "rx_in_length_errors" },
236 { "rx_out_length_errors" },
237 { "rx_64_or_less_octet_packets" },
238 { "rx_65_to_127_octet_packets" },
239 { "rx_128_to_255_octet_packets" },
240 { "rx_256_to_511_octet_packets" },
241 { "rx_512_to_1023_octet_packets" },
242 { "rx_1024_to_1522_octet_packets" },
243 { "rx_1523_to_2047_octet_packets" },
244 { "rx_2048_to_4095_octet_packets" },
245 { "rx_4096_to_8191_octet_packets" },
246 { "rx_8192_to_9022_octet_packets" },
247
248 { "tx_octets" },
249 { "tx_collisions" },
250
251 { "tx_xon_sent" },
252 { "tx_xoff_sent" },
253 { "tx_flow_control" },
254 { "tx_mac_errors" },
255 { "tx_single_collisions" },
256 { "tx_mult_collisions" },
257 { "tx_deferred" },
258 { "tx_excessive_collisions" },
259 { "tx_late_collisions" },
260 { "tx_collide_2times" },
261 { "tx_collide_3times" },
262 { "tx_collide_4times" },
263 { "tx_collide_5times" },
264 { "tx_collide_6times" },
265 { "tx_collide_7times" },
266 { "tx_collide_8times" },
267 { "tx_collide_9times" },
268 { "tx_collide_10times" },
269 { "tx_collide_11times" },
270 { "tx_collide_12times" },
271 { "tx_collide_13times" },
272 { "tx_collide_14times" },
273 { "tx_collide_15times" },
274 { "tx_ucast_packets" },
275 { "tx_mcast_packets" },
276 { "tx_bcast_packets" },
277 { "tx_carrier_sense_errors" },
278 { "tx_discards" },
279 { "tx_errors" },
280
281 { "dma_writeq_full" },
282 { "dma_write_prioq_full" },
283 { "rxbds_empty" },
284 { "rx_discards" },
285 { "rx_errors" },
286 { "rx_threshold_hit" },
287
288 { "dma_readq_full" },
289 { "dma_read_prioq_full" },
290 { "tx_comp_queue_full" },
291
292 { "ring_set_send_prod_index" },
293 { "ring_status_update" },
294 { "nic_irqs" },
295 { "nic_avoided_irqs" },
296 { "nic_tx_threshold_hit" }
297};
298
50da859d 299static const struct {
4cafd3f5
MC
300 const char string[ETH_GSTRING_LEN];
301} ethtool_test_keys[TG3_NUM_TEST] = {
302 { "nvram test (online) " },
303 { "link test (online) " },
304 { "register test (offline)" },
305 { "memory test (offline)" },
306 { "loopback test (offline)" },
307 { "interrupt test (offline)" },
308};
309
b401e9e2
MC
310static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
311{
312 writel(val, tp->regs + off);
313}
314
315static u32 tg3_read32(struct tg3 *tp, u32 off)
316{
6aa20a22 317 return (readl(tp->regs + off));
b401e9e2
MC
318}
319
0d3031d9
MC
320static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
321{
322 writel(val, tp->aperegs + off);
323}
324
325static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
326{
327 return (readl(tp->aperegs + off));
328}
329
1da177e4
LT
330static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
331{
6892914f
MC
332 unsigned long flags;
333
334 spin_lock_irqsave(&tp->indirect_lock, flags);
1ee582d8
MC
335 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
336 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
6892914f 337 spin_unlock_irqrestore(&tp->indirect_lock, flags);
1ee582d8
MC
338}
339
340static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
341{
342 writel(val, tp->regs + off);
343 readl(tp->regs + off);
1da177e4
LT
344}
345
6892914f 346static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
1da177e4 347{
6892914f
MC
348 unsigned long flags;
349 u32 val;
350
351 spin_lock_irqsave(&tp->indirect_lock, flags);
352 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
353 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
354 spin_unlock_irqrestore(&tp->indirect_lock, flags);
355 return val;
356}
357
358static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
359{
360 unsigned long flags;
361
362 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
363 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
364 TG3_64BIT_REG_LOW, val);
365 return;
366 }
367 if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
368 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
369 TG3_64BIT_REG_LOW, val);
370 return;
1da177e4 371 }
6892914f
MC
372
373 spin_lock_irqsave(&tp->indirect_lock, flags);
374 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
375 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
376 spin_unlock_irqrestore(&tp->indirect_lock, flags);
377
378 /* In indirect mode when disabling interrupts, we also need
379 * to clear the interrupt bit in the GRC local ctrl register.
380 */
381 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
382 (val == 0x1)) {
383 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
384 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
385 }
386}
387
388static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
389{
390 unsigned long flags;
391 u32 val;
392
393 spin_lock_irqsave(&tp->indirect_lock, flags);
394 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
395 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
396 spin_unlock_irqrestore(&tp->indirect_lock, flags);
397 return val;
398}
399
b401e9e2
MC
400/* usec_wait specifies the wait time in usec when writing to certain registers
401 * where it is unsafe to read back the register without some delay.
402 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
403 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
404 */
405static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
6892914f 406{
b401e9e2
MC
407 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
408 (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
409 /* Non-posted methods */
410 tp->write32(tp, off, val);
411 else {
412 /* Posted method */
413 tg3_write32(tp, off, val);
414 if (usec_wait)
415 udelay(usec_wait);
416 tp->read32(tp, off);
417 }
418 /* Wait again after the read for the posted method to guarantee that
419 * the wait time is met.
420 */
421 if (usec_wait)
422 udelay(usec_wait);
1da177e4
LT
423}
424
09ee929c
MC
425static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
426{
427 tp->write32_mbox(tp, off, val);
6892914f
MC
428 if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
429 !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
430 tp->read32_mbox(tp, off);
09ee929c
MC
431}
432
20094930 433static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
1da177e4
LT
434{
435 void __iomem *mbox = tp->regs + off;
436 writel(val, mbox);
437 if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
438 writel(val, mbox);
439 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
440 readl(mbox);
441}
442
b5d3772c
MC
443static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
444{
445 return (readl(tp->regs + off + GRCMBOX_BASE));
446}
447
448static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
449{
450 writel(val, tp->regs + off + GRCMBOX_BASE);
451}
452
20094930 453#define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
09ee929c 454#define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
20094930
MC
455#define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
456#define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
09ee929c 457#define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
20094930
MC
458
459#define tw32(reg,val) tp->write32(tp, reg, val)
b401e9e2
MC
460#define tw32_f(reg,val) _tw32_flush(tp,(reg),(val), 0)
461#define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
20094930 462#define tr32(reg) tp->read32(tp, reg)
1da177e4
LT
463
464static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
465{
6892914f
MC
466 unsigned long flags;
467
b5d3772c
MC
468 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
469 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
470 return;
471
6892914f 472 spin_lock_irqsave(&tp->indirect_lock, flags);
bbadf503
MC
473 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
474 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
475 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
1da177e4 476
bbadf503
MC
477 /* Always leave this as zero. */
478 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
479 } else {
480 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
481 tw32_f(TG3PCI_MEM_WIN_DATA, val);
28fbef78 482
bbadf503
MC
483 /* Always leave this as zero. */
484 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
485 }
486 spin_unlock_irqrestore(&tp->indirect_lock, flags);
758a6139
DM
487}
488
1da177e4
LT
489static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
490{
6892914f
MC
491 unsigned long flags;
492
b5d3772c
MC
493 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
494 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
495 *val = 0;
496 return;
497 }
498
6892914f 499 spin_lock_irqsave(&tp->indirect_lock, flags);
bbadf503
MC
500 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
501 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
502 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
1da177e4 503
bbadf503
MC
504 /* Always leave this as zero. */
505 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
506 } else {
507 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
508 *val = tr32(TG3PCI_MEM_WIN_DATA);
509
510 /* Always leave this as zero. */
511 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
512 }
6892914f 513 spin_unlock_irqrestore(&tp->indirect_lock, flags);
1da177e4
LT
514}
515
0d3031d9
MC
516static void tg3_ape_lock_init(struct tg3 *tp)
517{
518 int i;
519
520 /* Make sure the driver hasn't any stale locks. */
521 for (i = 0; i < 8; i++)
522 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + 4 * i,
523 APE_LOCK_GRANT_DRIVER);
524}
525
526static int tg3_ape_lock(struct tg3 *tp, int locknum)
527{
528 int i, off;
529 int ret = 0;
530 u32 status;
531
532 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
533 return 0;
534
535 switch (locknum) {
536 case TG3_APE_LOCK_MEM:
537 break;
538 default:
539 return -EINVAL;
540 }
541
542 off = 4 * locknum;
543
544 tg3_ape_write32(tp, TG3_APE_LOCK_REQ + off, APE_LOCK_REQ_DRIVER);
545
546 /* Wait for up to 1 millisecond to acquire lock. */
547 for (i = 0; i < 100; i++) {
548 status = tg3_ape_read32(tp, TG3_APE_LOCK_GRANT + off);
549 if (status == APE_LOCK_GRANT_DRIVER)
550 break;
551 udelay(10);
552 }
553
554 if (status != APE_LOCK_GRANT_DRIVER) {
555 /* Revoke the lock request. */
556 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off,
557 APE_LOCK_GRANT_DRIVER);
558
559 ret = -EBUSY;
560 }
561
562 return ret;
563}
564
565static void tg3_ape_unlock(struct tg3 *tp, int locknum)
566{
567 int off;
568
569 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
570 return;
571
572 switch (locknum) {
573 case TG3_APE_LOCK_MEM:
574 break;
575 default:
576 return;
577 }
578
579 off = 4 * locknum;
580 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off, APE_LOCK_GRANT_DRIVER);
581}
582
1da177e4
LT
583static void tg3_disable_ints(struct tg3 *tp)
584{
585 tw32(TG3PCI_MISC_HOST_CTRL,
586 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
09ee929c 587 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
1da177e4
LT
588}
589
590static inline void tg3_cond_int(struct tg3 *tp)
591{
38f3843e
MC
592 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
593 (tp->hw_status->status & SD_STATUS_UPDATED))
1da177e4 594 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
b5d3772c
MC
595 else
596 tw32(HOSTCC_MODE, tp->coalesce_mode |
597 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
1da177e4
LT
598}
599
600static void tg3_enable_ints(struct tg3 *tp)
601{
bbe832c0
MC
602 tp->irq_sync = 0;
603 wmb();
604
1da177e4
LT
605 tw32(TG3PCI_MISC_HOST_CTRL,
606 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
09ee929c
MC
607 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
608 (tp->last_tag << 24));
fcfa0a32
MC
609 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
610 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
611 (tp->last_tag << 24));
1da177e4
LT
612 tg3_cond_int(tp);
613}
614
04237ddd
MC
615static inline unsigned int tg3_has_work(struct tg3 *tp)
616{
617 struct tg3_hw_status *sblk = tp->hw_status;
618 unsigned int work_exists = 0;
619
620 /* check for phy events */
621 if (!(tp->tg3_flags &
622 (TG3_FLAG_USE_LINKCHG_REG |
623 TG3_FLAG_POLL_SERDES))) {
624 if (sblk->status & SD_STATUS_LINK_CHG)
625 work_exists = 1;
626 }
627 /* check for RX/TX work to do */
628 if (sblk->idx[0].tx_consumer != tp->tx_cons ||
629 sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
630 work_exists = 1;
631
632 return work_exists;
633}
634
1da177e4 635/* tg3_restart_ints
04237ddd
MC
636 * similar to tg3_enable_ints, but it accurately determines whether there
637 * is new work pending and can return without flushing the PIO write
6aa20a22 638 * which reenables interrupts
1da177e4
LT
639 */
640static void tg3_restart_ints(struct tg3 *tp)
641{
fac9b83e
DM
642 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
643 tp->last_tag << 24);
1da177e4
LT
644 mmiowb();
645
fac9b83e
DM
646 /* When doing tagged status, this work check is unnecessary.
647 * The last_tag we write above tells the chip which piece of
648 * work we've completed.
649 */
650 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
651 tg3_has_work(tp))
04237ddd
MC
652 tw32(HOSTCC_MODE, tp->coalesce_mode |
653 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
1da177e4
LT
654}
655
656static inline void tg3_netif_stop(struct tg3 *tp)
657{
bbe832c0 658 tp->dev->trans_start = jiffies; /* prevent tx timeout */
bea3348e 659 napi_disable(&tp->napi);
1da177e4
LT
660 netif_tx_disable(tp->dev);
661}
662
663static inline void tg3_netif_start(struct tg3 *tp)
664{
665 netif_wake_queue(tp->dev);
666 /* NOTE: unconditional netif_wake_queue is only appropriate
667 * so long as all callers are assured to have free tx slots
668 * (such as after tg3_init_hw)
669 */
bea3348e 670 napi_enable(&tp->napi);
f47c11ee
DM
671 tp->hw_status->status |= SD_STATUS_UPDATED;
672 tg3_enable_ints(tp);
1da177e4
LT
673}
674
675static void tg3_switch_clocks(struct tg3 *tp)
676{
677 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
678 u32 orig_clock_ctrl;
679
795d01c5
MC
680 if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
681 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
4cf78e4f
MC
682 return;
683
1da177e4
LT
684 orig_clock_ctrl = clock_ctrl;
685 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
686 CLOCK_CTRL_CLKRUN_OENABLE |
687 0x1f);
688 tp->pci_clock_ctrl = clock_ctrl;
689
690 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
691 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
b401e9e2
MC
692 tw32_wait_f(TG3PCI_CLOCK_CTRL,
693 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1da177e4
LT
694 }
695 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
b401e9e2
MC
696 tw32_wait_f(TG3PCI_CLOCK_CTRL,
697 clock_ctrl |
698 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
699 40);
700 tw32_wait_f(TG3PCI_CLOCK_CTRL,
701 clock_ctrl | (CLOCK_CTRL_ALTCLK),
702 40);
1da177e4 703 }
b401e9e2 704 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1da177e4
LT
705}
706
707#define PHY_BUSY_LOOPS 5000
708
709static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
710{
711 u32 frame_val;
712 unsigned int loops;
713 int ret;
714
715 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
716 tw32_f(MAC_MI_MODE,
717 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
718 udelay(80);
719 }
720
721 *val = 0x0;
722
723 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
724 MI_COM_PHY_ADDR_MASK);
725 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
726 MI_COM_REG_ADDR_MASK);
727 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
6aa20a22 728
1da177e4
LT
729 tw32_f(MAC_MI_COM, frame_val);
730
731 loops = PHY_BUSY_LOOPS;
732 while (loops != 0) {
733 udelay(10);
734 frame_val = tr32(MAC_MI_COM);
735
736 if ((frame_val & MI_COM_BUSY) == 0) {
737 udelay(5);
738 frame_val = tr32(MAC_MI_COM);
739 break;
740 }
741 loops -= 1;
742 }
743
744 ret = -EBUSY;
745 if (loops != 0) {
746 *val = frame_val & MI_COM_DATA_MASK;
747 ret = 0;
748 }
749
750 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
751 tw32_f(MAC_MI_MODE, tp->mi_mode);
752 udelay(80);
753 }
754
755 return ret;
756}
757
758static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
759{
760 u32 frame_val;
761 unsigned int loops;
762 int ret;
763
b5d3772c
MC
764 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
765 (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
766 return 0;
767
1da177e4
LT
768 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
769 tw32_f(MAC_MI_MODE,
770 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
771 udelay(80);
772 }
773
774 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
775 MI_COM_PHY_ADDR_MASK);
776 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
777 MI_COM_REG_ADDR_MASK);
778 frame_val |= (val & MI_COM_DATA_MASK);
779 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
6aa20a22 780
1da177e4
LT
781 tw32_f(MAC_MI_COM, frame_val);
782
783 loops = PHY_BUSY_LOOPS;
784 while (loops != 0) {
785 udelay(10);
786 frame_val = tr32(MAC_MI_COM);
787 if ((frame_val & MI_COM_BUSY) == 0) {
788 udelay(5);
789 frame_val = tr32(MAC_MI_COM);
790 break;
791 }
792 loops -= 1;
793 }
794
795 ret = -EBUSY;
796 if (loops != 0)
797 ret = 0;
798
799 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
800 tw32_f(MAC_MI_MODE, tp->mi_mode);
801 udelay(80);
802 }
803
804 return ret;
805}
806
b2a5c19c
MC
807static void tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
808{
809 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
810 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
811}
812
9ef8ca99
MC
813static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
814{
815 u32 phy;
816
817 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
818 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
819 return;
820
821 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
822 u32 ephy;
823
824 if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &ephy)) {
825 tg3_writephy(tp, MII_TG3_EPHY_TEST,
826 ephy | MII_TG3_EPHY_SHADOW_EN);
827 if (!tg3_readphy(tp, MII_TG3_EPHYTST_MISCCTRL, &phy)) {
828 if (enable)
829 phy |= MII_TG3_EPHYTST_MISCCTRL_MDIX;
830 else
831 phy &= ~MII_TG3_EPHYTST_MISCCTRL_MDIX;
832 tg3_writephy(tp, MII_TG3_EPHYTST_MISCCTRL, phy);
833 }
834 tg3_writephy(tp, MII_TG3_EPHY_TEST, ephy);
835 }
836 } else {
837 phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC |
838 MII_TG3_AUXCTL_SHDWSEL_MISC;
839 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) &&
840 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) {
841 if (enable)
842 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
843 else
844 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
845 phy |= MII_TG3_AUXCTL_MISC_WREN;
846 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
847 }
848 }
849}
850
1da177e4
LT
851static void tg3_phy_set_wirespeed(struct tg3 *tp)
852{
853 u32 val;
854
855 if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
856 return;
857
858 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
859 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
860 tg3_writephy(tp, MII_TG3_AUX_CTRL,
861 (val | (1 << 15) | (1 << 4)));
862}
863
864static int tg3_bmcr_reset(struct tg3 *tp)
865{
866 u32 phy_control;
867 int limit, err;
868
869 /* OK, reset it, and poll the BMCR_RESET bit until it
870 * clears or we time out.
871 */
872 phy_control = BMCR_RESET;
873 err = tg3_writephy(tp, MII_BMCR, phy_control);
874 if (err != 0)
875 return -EBUSY;
876
877 limit = 5000;
878 while (limit--) {
879 err = tg3_readphy(tp, MII_BMCR, &phy_control);
880 if (err != 0)
881 return -EBUSY;
882
883 if ((phy_control & BMCR_RESET) == 0) {
884 udelay(40);
885 break;
886 }
887 udelay(10);
888 }
889 if (limit <= 0)
890 return -EBUSY;
891
892 return 0;
893}
894
b2a5c19c
MC
895static void tg3_phy_apply_otp(struct tg3 *tp)
896{
897 u32 otp, phy;
898
899 if (!tp->phy_otp)
900 return;
901
902 otp = tp->phy_otp;
903
904 /* Enable SM_DSP clock and tx 6dB coding. */
905 phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
906 MII_TG3_AUXCTL_ACTL_SMDSP_ENA |
907 MII_TG3_AUXCTL_ACTL_TX_6DB;
908 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
909
910 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
911 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
912 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
913
914 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
915 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
916 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
917
918 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
919 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
920 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
921
922 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
923 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
924
925 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
926 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
927
928 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
929 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
930 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
931
932 /* Turn off SM_DSP clock. */
933 phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
934 MII_TG3_AUXCTL_ACTL_TX_6DB;
935 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
936}
937
1da177e4
LT
938static int tg3_wait_macro_done(struct tg3 *tp)
939{
940 int limit = 100;
941
942 while (limit--) {
943 u32 tmp32;
944
945 if (!tg3_readphy(tp, 0x16, &tmp32)) {
946 if ((tmp32 & 0x1000) == 0)
947 break;
948 }
949 }
950 if (limit <= 0)
951 return -EBUSY;
952
953 return 0;
954}
955
956static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
957{
958 static const u32 test_pat[4][6] = {
959 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
960 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
961 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
962 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
963 };
964 int chan;
965
966 for (chan = 0; chan < 4; chan++) {
967 int i;
968
969 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
970 (chan * 0x2000) | 0x0200);
971 tg3_writephy(tp, 0x16, 0x0002);
972
973 for (i = 0; i < 6; i++)
974 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
975 test_pat[chan][i]);
976
977 tg3_writephy(tp, 0x16, 0x0202);
978 if (tg3_wait_macro_done(tp)) {
979 *resetp = 1;
980 return -EBUSY;
981 }
982
983 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
984 (chan * 0x2000) | 0x0200);
985 tg3_writephy(tp, 0x16, 0x0082);
986 if (tg3_wait_macro_done(tp)) {
987 *resetp = 1;
988 return -EBUSY;
989 }
990
991 tg3_writephy(tp, 0x16, 0x0802);
992 if (tg3_wait_macro_done(tp)) {
993 *resetp = 1;
994 return -EBUSY;
995 }
996
997 for (i = 0; i < 6; i += 2) {
998 u32 low, high;
999
1000 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1001 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1002 tg3_wait_macro_done(tp)) {
1003 *resetp = 1;
1004 return -EBUSY;
1005 }
1006 low &= 0x7fff;
1007 high &= 0x000f;
1008 if (low != test_pat[chan][i] ||
1009 high != test_pat[chan][i+1]) {
1010 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1011 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1012 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1013
1014 return -EBUSY;
1015 }
1016 }
1017 }
1018
1019 return 0;
1020}
1021
1022static int tg3_phy_reset_chanpat(struct tg3 *tp)
1023{
1024 int chan;
1025
1026 for (chan = 0; chan < 4; chan++) {
1027 int i;
1028
1029 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1030 (chan * 0x2000) | 0x0200);
1031 tg3_writephy(tp, 0x16, 0x0002);
1032 for (i = 0; i < 6; i++)
1033 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1034 tg3_writephy(tp, 0x16, 0x0202);
1035 if (tg3_wait_macro_done(tp))
1036 return -EBUSY;
1037 }
1038
1039 return 0;
1040}
1041
1042static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1043{
1044 u32 reg32, phy9_orig;
1045 int retries, do_phy_reset, err;
1046
1047 retries = 10;
1048 do_phy_reset = 1;
1049 do {
1050 if (do_phy_reset) {
1051 err = tg3_bmcr_reset(tp);
1052 if (err)
1053 return err;
1054 do_phy_reset = 0;
1055 }
1056
1057 /* Disable transmitter and interrupt. */
1058 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
1059 continue;
1060
1061 reg32 |= 0x3000;
1062 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1063
1064 /* Set full-duplex, 1000 mbps. */
1065 tg3_writephy(tp, MII_BMCR,
1066 BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1067
1068 /* Set to master mode. */
1069 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1070 continue;
1071
1072 tg3_writephy(tp, MII_TG3_CTRL,
1073 (MII_TG3_CTRL_AS_MASTER |
1074 MII_TG3_CTRL_ENABLE_AS_MASTER));
1075
1076 /* Enable SM_DSP_CLOCK and 6dB. */
1077 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1078
1079 /* Block the PHY control access. */
1080 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1081 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
1082
1083 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
1084 if (!err)
1085 break;
1086 } while (--retries);
1087
1088 err = tg3_phy_reset_chanpat(tp);
1089 if (err)
1090 return err;
1091
1092 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1093 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
1094
1095 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
1096 tg3_writephy(tp, 0x16, 0x0000);
1097
1098 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1099 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1100 /* Set Extended packet length bit for jumbo frames */
1101 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
1102 }
1103 else {
1104 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1105 }
1106
1107 tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
1108
1109 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
1110 reg32 &= ~0x3000;
1111 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1112 } else if (!err)
1113 err = -EBUSY;
1114
1115 return err;
1116}
1117
c8e1e82b
MC
1118static void tg3_link_report(struct tg3 *);
1119
1da177e4
LT
1120/* This will reset the tigon3 PHY if there is no valid
1121 * link unless the FORCE argument is non-zero.
1122 */
1123static int tg3_phy_reset(struct tg3 *tp)
1124{
b2a5c19c 1125 u32 cpmuctrl;
1da177e4
LT
1126 u32 phy_status;
1127 int err;
1128
60189ddf
MC
1129 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1130 u32 val;
1131
1132 val = tr32(GRC_MISC_CFG);
1133 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
1134 udelay(40);
1135 }
1da177e4
LT
1136 err = tg3_readphy(tp, MII_BMSR, &phy_status);
1137 err |= tg3_readphy(tp, MII_BMSR, &phy_status);
1138 if (err != 0)
1139 return -EBUSY;
1140
c8e1e82b
MC
1141 if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
1142 netif_carrier_off(tp->dev);
1143 tg3_link_report(tp);
1144 }
1145
1da177e4
LT
1146 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1147 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1148 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1149 err = tg3_phy_reset_5703_4_5(tp);
1150 if (err)
1151 return err;
1152 goto out;
1153 }
1154
b2a5c19c
MC
1155 cpmuctrl = 0;
1156 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
1157 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
1158 cpmuctrl = tr32(TG3_CPMU_CTRL);
1159 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
1160 tw32(TG3_CPMU_CTRL,
1161 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
1162 }
1163
1da177e4
LT
1164 err = tg3_bmcr_reset(tp);
1165 if (err)
1166 return err;
1167
b2a5c19c
MC
1168 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
1169 u32 phy;
1170
1171 phy = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
1172 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, phy);
1173
1174 tw32(TG3_CPMU_CTRL, cpmuctrl);
1175 }
1176
b5af7126 1177 if (tp->tg3_flags3 & TG3_FLG3_5761_5784_AX_FIXES) {
ce057f01
MC
1178 u32 val;
1179
1180 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
1181 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
1182 CPMU_LSPD_1000MB_MACCLK_12_5) {
1183 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
1184 udelay(40);
1185 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
1186 }
662f38d2
MC
1187
1188 /* Disable GPHY autopowerdown. */
1189 tg3_writephy(tp, MII_TG3_MISC_SHDW,
1190 MII_TG3_MISC_SHDW_WREN |
1191 MII_TG3_MISC_SHDW_APD_SEL |
1192 MII_TG3_MISC_SHDW_APD_WKTM_84MS);
ce057f01
MC
1193 }
1194
b2a5c19c
MC
1195 tg3_phy_apply_otp(tp);
1196
1da177e4
LT
1197out:
1198 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
1199 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1200 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1201 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
1202 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1203 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
1204 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1205 }
1206 if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1207 tg3_writephy(tp, 0x1c, 0x8d68);
1208 tg3_writephy(tp, 0x1c, 0x8d68);
1209 }
1210 if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1211 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1212 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1213 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1214 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1215 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1216 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1217 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1218 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1219 }
c424cb24
MC
1220 else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1221 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1222 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
c1d2a196
MC
1223 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) {
1224 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
1225 tg3_writephy(tp, MII_TG3_TEST1,
1226 MII_TG3_TEST1_TRIM_EN | 0x4);
1227 } else
1228 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
c424cb24
MC
1229 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1230 }
1da177e4
LT
1231 /* Set Extended packet length bit (bit 14) on all chips that */
1232 /* support jumbo frames */
1233 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1234 /* Cannot do read-modify-write on 5401 */
1235 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
0f893dc6 1236 } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1da177e4
LT
1237 u32 phy_reg;
1238
1239 /* Set bit 14 with read-modify-write to preserve other bits */
1240 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1241 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1242 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1243 }
1244
1245 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1246 * jumbo frames transmission.
1247 */
0f893dc6 1248 if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1da177e4
LT
1249 u32 phy_reg;
1250
1251 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1252 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1253 phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1254 }
1255
715116a1 1256 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
715116a1
MC
1257 /* adjust output voltage */
1258 tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x12);
715116a1
MC
1259 }
1260
9ef8ca99 1261 tg3_phy_toggle_automdix(tp, 1);
1da177e4
LT
1262 tg3_phy_set_wirespeed(tp);
1263 return 0;
1264}
1265
1266static void tg3_frob_aux_power(struct tg3 *tp)
1267{
1268 struct tg3 *tp_peer = tp;
1269
9d26e213 1270 if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0)
1da177e4
LT
1271 return;
1272
8c2dc7e1
MC
1273 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1274 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1275 struct net_device *dev_peer;
1276
1277 dev_peer = pci_get_drvdata(tp->pdev_peer);
bc1c7567 1278 /* remove_one() may have been run on the peer. */
8c2dc7e1 1279 if (!dev_peer)
bc1c7567
MC
1280 tp_peer = tp;
1281 else
1282 tp_peer = netdev_priv(dev_peer);
1da177e4
LT
1283 }
1284
1da177e4 1285 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
6921d201
MC
1286 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1287 (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1288 (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
1da177e4
LT
1289 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1290 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
b401e9e2
MC
1291 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1292 (GRC_LCLCTRL_GPIO_OE0 |
1293 GRC_LCLCTRL_GPIO_OE1 |
1294 GRC_LCLCTRL_GPIO_OE2 |
1295 GRC_LCLCTRL_GPIO_OUTPUT0 |
1296 GRC_LCLCTRL_GPIO_OUTPUT1),
1297 100);
5f0c4a3c
MC
1298 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761) {
1299 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
1300 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
1301 GRC_LCLCTRL_GPIO_OE1 |
1302 GRC_LCLCTRL_GPIO_OE2 |
1303 GRC_LCLCTRL_GPIO_OUTPUT0 |
1304 GRC_LCLCTRL_GPIO_OUTPUT1 |
1305 tp->grc_local_ctrl;
1306 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
1307
1308 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
1309 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
1310
1311 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
1312 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
1da177e4
LT
1313 } else {
1314 u32 no_gpio2;
dc56b7d4 1315 u32 grc_local_ctrl = 0;
1da177e4
LT
1316
1317 if (tp_peer != tp &&
1318 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1319 return;
1320
dc56b7d4
MC
1321 /* Workaround to prevent overdrawing Amps. */
1322 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1323 ASIC_REV_5714) {
1324 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
b401e9e2
MC
1325 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1326 grc_local_ctrl, 100);
dc56b7d4
MC
1327 }
1328
1da177e4
LT
1329 /* On 5753 and variants, GPIO2 cannot be used. */
1330 no_gpio2 = tp->nic_sram_data_cfg &
1331 NIC_SRAM_DATA_CFG_NO_GPIO2;
1332
dc56b7d4 1333 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
1da177e4
LT
1334 GRC_LCLCTRL_GPIO_OE1 |
1335 GRC_LCLCTRL_GPIO_OE2 |
1336 GRC_LCLCTRL_GPIO_OUTPUT1 |
1337 GRC_LCLCTRL_GPIO_OUTPUT2;
1338 if (no_gpio2) {
1339 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1340 GRC_LCLCTRL_GPIO_OUTPUT2);
1341 }
b401e9e2
MC
1342 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1343 grc_local_ctrl, 100);
1da177e4
LT
1344
1345 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1346
b401e9e2
MC
1347 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1348 grc_local_ctrl, 100);
1da177e4
LT
1349
1350 if (!no_gpio2) {
1351 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
b401e9e2
MC
1352 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1353 grc_local_ctrl, 100);
1da177e4
LT
1354 }
1355 }
1356 } else {
1357 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1358 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1359 if (tp_peer != tp &&
1360 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1361 return;
1362
b401e9e2
MC
1363 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1364 (GRC_LCLCTRL_GPIO_OE1 |
1365 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1da177e4 1366
b401e9e2
MC
1367 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1368 GRC_LCLCTRL_GPIO_OE1, 100);
1da177e4 1369
b401e9e2
MC
1370 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1371 (GRC_LCLCTRL_GPIO_OE1 |
1372 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1da177e4
LT
1373 }
1374 }
1375}
1376
e8f3f6ca
MC
1377static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
1378{
1379 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
1380 return 1;
1381 else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411) {
1382 if (speed != SPEED_10)
1383 return 1;
1384 } else if (speed == SPEED_10)
1385 return 1;
1386
1387 return 0;
1388}
1389
1da177e4
LT
1390static int tg3_setup_phy(struct tg3 *, int);
1391
1392#define RESET_KIND_SHUTDOWN 0
1393#define RESET_KIND_INIT 1
1394#define RESET_KIND_SUSPEND 2
1395
1396static void tg3_write_sig_post_reset(struct tg3 *, int);
1397static int tg3_halt_cpu(struct tg3 *, u32);
6921d201
MC
1398static int tg3_nvram_lock(struct tg3 *);
1399static void tg3_nvram_unlock(struct tg3 *);
1da177e4 1400
15c3b696
MC
1401static void tg3_power_down_phy(struct tg3 *tp)
1402{
ce057f01
MC
1403 u32 val;
1404
5129724a
MC
1405 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
1406 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1407 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
1408 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
1409
1410 sg_dig_ctrl |=
1411 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
1412 tw32(SG_DIG_CTRL, sg_dig_ctrl);
1413 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
1414 }
3f7045c1 1415 return;
5129724a 1416 }
3f7045c1 1417
60189ddf 1418 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
60189ddf
MC
1419 tg3_bmcr_reset(tp);
1420 val = tr32(GRC_MISC_CFG);
1421 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
1422 udelay(40);
1423 return;
1424 } else {
715116a1
MC
1425 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1426 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
1427 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
1428 }
3f7045c1 1429
15c3b696
MC
1430 /* The PHY should not be powered down on some chips because
1431 * of bugs.
1432 */
1433 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1434 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1435 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
1436 (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
1437 return;
ce057f01 1438
b5af7126 1439 if (tp->tg3_flags3 & TG3_FLG3_5761_5784_AX_FIXES) {
ce057f01
MC
1440 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
1441 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
1442 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
1443 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
1444 }
1445
15c3b696
MC
1446 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
1447}
1448
bc1c7567 1449static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
1da177e4
LT
1450{
1451 u32 misc_host_ctrl;
1452 u16 power_control, power_caps;
1453 int pm = tp->pm_cap;
1454
1455 /* Make sure register accesses (indirect or otherwise)
1456 * will function correctly.
1457 */
1458 pci_write_config_dword(tp->pdev,
1459 TG3PCI_MISC_HOST_CTRL,
1460 tp->misc_host_ctrl);
1461
1462 pci_read_config_word(tp->pdev,
1463 pm + PCI_PM_CTRL,
1464 &power_control);
1465 power_control |= PCI_PM_CTRL_PME_STATUS;
1466 power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1467 switch (state) {
bc1c7567 1468 case PCI_D0:
1da177e4
LT
1469 power_control |= 0;
1470 pci_write_config_word(tp->pdev,
1471 pm + PCI_PM_CTRL,
1472 power_control);
8c6bda1a
MC
1473 udelay(100); /* Delay after power state change */
1474
9d26e213
MC
1475 /* Switch out of Vaux if it is a NIC */
1476 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
b401e9e2 1477 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
1da177e4
LT
1478
1479 return 0;
1480
bc1c7567 1481 case PCI_D1:
1da177e4
LT
1482 power_control |= 1;
1483 break;
1484
bc1c7567 1485 case PCI_D2:
1da177e4
LT
1486 power_control |= 2;
1487 break;
1488
bc1c7567 1489 case PCI_D3hot:
1da177e4
LT
1490 power_control |= 3;
1491 break;
1492
1493 default:
1494 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1495 "requested.\n",
1496 tp->dev->name, state);
1497 return -EINVAL;
1498 };
1499
1500 power_control |= PCI_PM_CTRL_PME_ENABLE;
1501
1502 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1503 tw32(TG3PCI_MISC_HOST_CTRL,
1504 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1505
1506 if (tp->link_config.phy_is_low_power == 0) {
1507 tp->link_config.phy_is_low_power = 1;
1508 tp->link_config.orig_speed = tp->link_config.speed;
1509 tp->link_config.orig_duplex = tp->link_config.duplex;
1510 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1511 }
1512
747e8f8b 1513 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
1da177e4
LT
1514 tp->link_config.speed = SPEED_10;
1515 tp->link_config.duplex = DUPLEX_HALF;
1516 tp->link_config.autoneg = AUTONEG_ENABLE;
1517 tg3_setup_phy(tp, 0);
1518 }
1519
b5d3772c
MC
1520 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1521 u32 val;
1522
1523 val = tr32(GRC_VCPU_EXT_CTRL);
1524 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
1525 } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
6921d201
MC
1526 int i;
1527 u32 val;
1528
1529 for (i = 0; i < 200; i++) {
1530 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
1531 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1532 break;
1533 msleep(1);
1534 }
1535 }
a85feb8c
GZ
1536 if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
1537 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
1538 WOL_DRV_STATE_SHUTDOWN |
1539 WOL_DRV_WOL |
1540 WOL_SET_MAGIC_PKT);
6921d201 1541
1da177e4
LT
1542 pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1543
1544 if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1545 u32 mac_mode;
1546
1547 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1548 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1549 udelay(40);
1550
3f7045c1
MC
1551 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
1552 mac_mode = MAC_MODE_PORT_MODE_GMII;
1553 else
1554 mac_mode = MAC_MODE_PORT_MODE_MII;
1da177e4 1555
e8f3f6ca
MC
1556 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
1557 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1558 ASIC_REV_5700) {
1559 u32 speed = (tp->tg3_flags &
1560 TG3_FLAG_WOL_SPEED_100MB) ?
1561 SPEED_100 : SPEED_10;
1562 if (tg3_5700_link_polarity(tp, speed))
1563 mac_mode |= MAC_MODE_LINK_POLARITY;
1564 else
1565 mac_mode &= ~MAC_MODE_LINK_POLARITY;
1566 }
1da177e4
LT
1567 } else {
1568 mac_mode = MAC_MODE_PORT_MODE_TBI;
1569 }
1570
cbf46853 1571 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
1da177e4
LT
1572 tw32(MAC_LED_CTRL, tp->led_ctrl);
1573
1574 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1575 (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1576 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1577
1578 tw32_f(MAC_MODE, mac_mode);
1579 udelay(100);
1580
1581 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1582 udelay(10);
1583 }
1584
1585 if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1586 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1587 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1588 u32 base_val;
1589
1590 base_val = tp->pci_clock_ctrl;
1591 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1592 CLOCK_CTRL_TXCLK_DISABLE);
1593
b401e9e2
MC
1594 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
1595 CLOCK_CTRL_PWRDOWN_PLL133, 40);
d7b0a857 1596 } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
795d01c5 1597 (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
d7b0a857 1598 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
4cf78e4f 1599 /* do nothing */
85e94ced 1600 } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
1da177e4
LT
1601 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1602 u32 newbits1, newbits2;
1603
1604 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1605 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1606 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1607 CLOCK_CTRL_TXCLK_DISABLE |
1608 CLOCK_CTRL_ALTCLK);
1609 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1610 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1611 newbits1 = CLOCK_CTRL_625_CORE;
1612 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1613 } else {
1614 newbits1 = CLOCK_CTRL_ALTCLK;
1615 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1616 }
1617
b401e9e2
MC
1618 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
1619 40);
1da177e4 1620
b401e9e2
MC
1621 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
1622 40);
1da177e4
LT
1623
1624 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1625 u32 newbits3;
1626
1627 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1628 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1629 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1630 CLOCK_CTRL_TXCLK_DISABLE |
1631 CLOCK_CTRL_44MHZ_CORE);
1632 } else {
1633 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1634 }
1635
b401e9e2
MC
1636 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1637 tp->pci_clock_ctrl | newbits3, 40);
1da177e4
LT
1638 }
1639 }
1640
6921d201 1641 if (!(tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
0d3031d9
MC
1642 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
1643 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
3f7045c1 1644 tg3_power_down_phy(tp);
6921d201 1645
1da177e4
LT
1646 tg3_frob_aux_power(tp);
1647
1648 /* Workaround for unstable PLL clock */
1649 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1650 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1651 u32 val = tr32(0x7d00);
1652
1653 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1654 tw32(0x7d00, val);
6921d201 1655 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
ec41c7df
MC
1656 int err;
1657
1658 err = tg3_nvram_lock(tp);
1da177e4 1659 tg3_halt_cpu(tp, RX_CPU_BASE);
ec41c7df
MC
1660 if (!err)
1661 tg3_nvram_unlock(tp);
6921d201 1662 }
1da177e4
LT
1663 }
1664
bbadf503
MC
1665 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1666
1da177e4
LT
1667 /* Finally, set the new power state. */
1668 pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
8c6bda1a 1669 udelay(100); /* Delay after power state change */
1da177e4 1670
1da177e4
LT
1671 return 0;
1672}
1673
7c5026aa
MC
1674/* tp->lock is held. */
1675static void tg3_wait_for_event_ack(struct tg3 *tp)
1676{
1677 int i;
1678
1679 /* Wait for up to 2.5 milliseconds */
1680 for (i = 0; i < 250000; i++) {
1681 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1682 break;
1683 udelay(10);
1684 }
1685}
1686
1687/* tp->lock is held. */
1688static void tg3_ump_link_report(struct tg3 *tp)
1689{
1690 u32 reg;
1691 u32 val;
1692
1693 if (!(tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
1694 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
1695 return;
1696
1697 tg3_wait_for_event_ack(tp);
1698
1699 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1700
1701 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1702
1703 val = 0;
1704 if (!tg3_readphy(tp, MII_BMCR, &reg))
1705 val = reg << 16;
1706 if (!tg3_readphy(tp, MII_BMSR, &reg))
1707 val |= (reg & 0xffff);
1708 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1709
1710 val = 0;
1711 if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1712 val = reg << 16;
1713 if (!tg3_readphy(tp, MII_LPA, &reg))
1714 val |= (reg & 0xffff);
1715 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1716
1717 val = 0;
1718 if (!(tp->tg3_flags2 & TG3_FLG2_MII_SERDES)) {
1719 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1720 val = reg << 16;
1721 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1722 val |= (reg & 0xffff);
1723 }
1724 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1725
1726 if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1727 val = reg << 16;
1728 else
1729 val = 0;
1730 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1731
1732 val = tr32(GRC_RX_CPU_EVENT);
1733 val |= GRC_RX_CPU_DRIVER_EVENT;
1734 tw32_f(GRC_RX_CPU_EVENT, val);
1735}
1736
1da177e4
LT
1737static void tg3_link_report(struct tg3 *tp)
1738{
1739 if (!netif_carrier_ok(tp->dev)) {
9f88f29f
MC
1740 if (netif_msg_link(tp))
1741 printk(KERN_INFO PFX "%s: Link is down.\n",
1742 tp->dev->name);
7c5026aa 1743 tg3_ump_link_report(tp);
9f88f29f 1744 } else if (netif_msg_link(tp)) {
1da177e4
LT
1745 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1746 tp->dev->name,
1747 (tp->link_config.active_speed == SPEED_1000 ?
1748 1000 :
1749 (tp->link_config.active_speed == SPEED_100 ?
1750 100 : 10)),
1751 (tp->link_config.active_duplex == DUPLEX_FULL ?
1752 "full" : "half"));
1753
8d018621
MC
1754 printk(KERN_INFO PFX
1755 "%s: Flow control is %s for TX and %s for RX.\n",
1da177e4 1756 tp->dev->name,
8d018621
MC
1757 (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_TX) ?
1758 "on" : "off",
1759 (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_RX) ?
1760 "on" : "off");
7c5026aa 1761 tg3_ump_link_report(tp);
1da177e4
LT
1762 }
1763}
1764
ba4d07a8
MC
1765static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1766{
1767 u16 miireg;
1768
1769 if ((flow_ctrl & TG3_FLOW_CTRL_TX) && (flow_ctrl & TG3_FLOW_CTRL_RX))
1770 miireg = ADVERTISE_PAUSE_CAP;
1771 else if (flow_ctrl & TG3_FLOW_CTRL_TX)
1772 miireg = ADVERTISE_PAUSE_ASYM;
1773 else if (flow_ctrl & TG3_FLOW_CTRL_RX)
1774 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1775 else
1776 miireg = 0;
1777
1778 return miireg;
1779}
1780
1781static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1782{
1783 u16 miireg;
1784
1785 if ((flow_ctrl & TG3_FLOW_CTRL_TX) && (flow_ctrl & TG3_FLOW_CTRL_RX))
1786 miireg = ADVERTISE_1000XPAUSE;
1787 else if (flow_ctrl & TG3_FLOW_CTRL_TX)
1788 miireg = ADVERTISE_1000XPSE_ASYM;
1789 else if (flow_ctrl & TG3_FLOW_CTRL_RX)
1790 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1791 else
1792 miireg = 0;
1793
1794 return miireg;
1795}
1796
95937268
MC
1797static u8 tg3_resolve_flowctrl_1000T(u16 lcladv, u16 rmtadv)
1798{
1799 u8 cap = 0;
1800
1801 if (lcladv & ADVERTISE_PAUSE_CAP) {
1802 if (lcladv & ADVERTISE_PAUSE_ASYM) {
1803 if (rmtadv & LPA_PAUSE_CAP)
1804 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1805 else if (rmtadv & LPA_PAUSE_ASYM)
1806 cap = TG3_FLOW_CTRL_RX;
1807 } else {
1808 if (rmtadv & LPA_PAUSE_CAP)
1809 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1810 }
1811 } else if (lcladv & ADVERTISE_PAUSE_ASYM) {
1812 if ((rmtadv & LPA_PAUSE_CAP) && (rmtadv & LPA_PAUSE_ASYM))
1813 cap = TG3_FLOW_CTRL_TX;
1814 }
1815
1816 return cap;
1817}
1818
1819static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1820{
1821 u8 cap = 0;
1822
1823 if (lcladv & ADVERTISE_1000XPAUSE) {
1824 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1825 if (rmtadv & LPA_1000XPAUSE)
1826 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1827 else if (rmtadv & LPA_1000XPAUSE_ASYM)
1828 cap = TG3_FLOW_CTRL_RX;
1829 } else {
1830 if (rmtadv & LPA_1000XPAUSE)
1831 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1832 }
1833 } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1834 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1835 cap = TG3_FLOW_CTRL_TX;
1836 }
1837
1838 return cap;
1839}
1840
1da177e4
LT
1841static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1842{
8d018621 1843 u8 new_tg3_flags = 0;
1da177e4
LT
1844 u32 old_rx_mode = tp->rx_mode;
1845 u32 old_tx_mode = tp->tx_mode;
1846
ef167e27
MC
1847 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
1848 (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)) {
5be73b47 1849 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
95937268
MC
1850 new_tg3_flags = tg3_resolve_flowctrl_1000X(local_adv,
1851 remote_adv);
1852 else
1853 new_tg3_flags = tg3_resolve_flowctrl_1000T(local_adv,
1854 remote_adv);
1da177e4 1855 } else {
8d018621 1856 new_tg3_flags = tp->link_config.flowctrl;
1da177e4
LT
1857 }
1858
8d018621
MC
1859 tp->link_config.active_flowctrl = new_tg3_flags;
1860
1861 if (new_tg3_flags & TG3_FLOW_CTRL_RX)
1da177e4
LT
1862 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1863 else
1864 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1865
1866 if (old_rx_mode != tp->rx_mode) {
1867 tw32_f(MAC_RX_MODE, tp->rx_mode);
1868 }
6aa20a22 1869
8d018621 1870 if (new_tg3_flags & TG3_FLOW_CTRL_TX)
1da177e4
LT
1871 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1872 else
1873 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1874
1875 if (old_tx_mode != tp->tx_mode) {
1876 tw32_f(MAC_TX_MODE, tp->tx_mode);
1877 }
1878}
1879
1880static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1881{
1882 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1883 case MII_TG3_AUX_STAT_10HALF:
1884 *speed = SPEED_10;
1885 *duplex = DUPLEX_HALF;
1886 break;
1887
1888 case MII_TG3_AUX_STAT_10FULL:
1889 *speed = SPEED_10;
1890 *duplex = DUPLEX_FULL;
1891 break;
1892
1893 case MII_TG3_AUX_STAT_100HALF:
1894 *speed = SPEED_100;
1895 *duplex = DUPLEX_HALF;
1896 break;
1897
1898 case MII_TG3_AUX_STAT_100FULL:
1899 *speed = SPEED_100;
1900 *duplex = DUPLEX_FULL;
1901 break;
1902
1903 case MII_TG3_AUX_STAT_1000HALF:
1904 *speed = SPEED_1000;
1905 *duplex = DUPLEX_HALF;
1906 break;
1907
1908 case MII_TG3_AUX_STAT_1000FULL:
1909 *speed = SPEED_1000;
1910 *duplex = DUPLEX_FULL;
1911 break;
1912
1913 default:
715116a1
MC
1914 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1915 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
1916 SPEED_10;
1917 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
1918 DUPLEX_HALF;
1919 break;
1920 }
1da177e4
LT
1921 *speed = SPEED_INVALID;
1922 *duplex = DUPLEX_INVALID;
1923 break;
1924 };
1925}
1926
1927static void tg3_phy_copper_begin(struct tg3 *tp)
1928{
1929 u32 new_adv;
1930 int i;
1931
1932 if (tp->link_config.phy_is_low_power) {
1933 /* Entering low power mode. Disable gigabit and
1934 * 100baseT advertisements.
1935 */
1936 tg3_writephy(tp, MII_TG3_CTRL, 0);
1937
1938 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1939 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1940 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1941 new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1942
1943 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1944 } else if (tp->link_config.speed == SPEED_INVALID) {
1da177e4
LT
1945 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1946 tp->link_config.advertising &=
1947 ~(ADVERTISED_1000baseT_Half |
1948 ADVERTISED_1000baseT_Full);
1949
ba4d07a8 1950 new_adv = ADVERTISE_CSMA;
1da177e4
LT
1951 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1952 new_adv |= ADVERTISE_10HALF;
1953 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1954 new_adv |= ADVERTISE_10FULL;
1955 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1956 new_adv |= ADVERTISE_100HALF;
1957 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1958 new_adv |= ADVERTISE_100FULL;
ba4d07a8
MC
1959
1960 new_adv |= tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
1961
1da177e4
LT
1962 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1963
1964 if (tp->link_config.advertising &
1965 (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1966 new_adv = 0;
1967 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1968 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1969 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1970 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1971 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1972 (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1973 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1974 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1975 MII_TG3_CTRL_ENABLE_AS_MASTER);
1976 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1977 } else {
1978 tg3_writephy(tp, MII_TG3_CTRL, 0);
1979 }
1980 } else {
ba4d07a8
MC
1981 new_adv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
1982 new_adv |= ADVERTISE_CSMA;
1983
1da177e4
LT
1984 /* Asking for a specific link mode. */
1985 if (tp->link_config.speed == SPEED_1000) {
1da177e4
LT
1986 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1987
1988 if (tp->link_config.duplex == DUPLEX_FULL)
1989 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1990 else
1991 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1992 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1993 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1994 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1995 MII_TG3_CTRL_ENABLE_AS_MASTER);
1da177e4 1996 } else {
1da177e4
LT
1997 if (tp->link_config.speed == SPEED_100) {
1998 if (tp->link_config.duplex == DUPLEX_FULL)
1999 new_adv |= ADVERTISE_100FULL;
2000 else
2001 new_adv |= ADVERTISE_100HALF;
2002 } else {
2003 if (tp->link_config.duplex == DUPLEX_FULL)
2004 new_adv |= ADVERTISE_10FULL;
2005 else
2006 new_adv |= ADVERTISE_10HALF;
2007 }
2008 tg3_writephy(tp, MII_ADVERTISE, new_adv);
ba4d07a8
MC
2009
2010 new_adv = 0;
1da177e4 2011 }
ba4d07a8
MC
2012
2013 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1da177e4
LT
2014 }
2015
2016 if (tp->link_config.autoneg == AUTONEG_DISABLE &&
2017 tp->link_config.speed != SPEED_INVALID) {
2018 u32 bmcr, orig_bmcr;
2019
2020 tp->link_config.active_speed = tp->link_config.speed;
2021 tp->link_config.active_duplex = tp->link_config.duplex;
2022
2023 bmcr = 0;
2024 switch (tp->link_config.speed) {
2025 default:
2026 case SPEED_10:
2027 break;
2028
2029 case SPEED_100:
2030 bmcr |= BMCR_SPEED100;
2031 break;
2032
2033 case SPEED_1000:
2034 bmcr |= TG3_BMCR_SPEED1000;
2035 break;
2036 };
2037
2038 if (tp->link_config.duplex == DUPLEX_FULL)
2039 bmcr |= BMCR_FULLDPLX;
2040
2041 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
2042 (bmcr != orig_bmcr)) {
2043 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
2044 for (i = 0; i < 1500; i++) {
2045 u32 tmp;
2046
2047 udelay(10);
2048 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
2049 tg3_readphy(tp, MII_BMSR, &tmp))
2050 continue;
2051 if (!(tmp & BMSR_LSTATUS)) {
2052 udelay(40);
2053 break;
2054 }
2055 }
2056 tg3_writephy(tp, MII_BMCR, bmcr);
2057 udelay(40);
2058 }
2059 } else {
2060 tg3_writephy(tp, MII_BMCR,
2061 BMCR_ANENABLE | BMCR_ANRESTART);
2062 }
2063}
2064
2065static int tg3_init_5401phy_dsp(struct tg3 *tp)
2066{
2067 int err;
2068
2069 /* Turn off tap power management. */
2070 /* Set Extended packet length bit */
2071 err = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
2072
2073 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
2074 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
2075
2076 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
2077 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
2078
2079 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2080 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
2081
2082 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2083 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
2084
2085 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
2086 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
2087
2088 udelay(40);
2089
2090 return err;
2091}
2092
3600d918 2093static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
1da177e4 2094{
3600d918
MC
2095 u32 adv_reg, all_mask = 0;
2096
2097 if (mask & ADVERTISED_10baseT_Half)
2098 all_mask |= ADVERTISE_10HALF;
2099 if (mask & ADVERTISED_10baseT_Full)
2100 all_mask |= ADVERTISE_10FULL;
2101 if (mask & ADVERTISED_100baseT_Half)
2102 all_mask |= ADVERTISE_100HALF;
2103 if (mask & ADVERTISED_100baseT_Full)
2104 all_mask |= ADVERTISE_100FULL;
1da177e4
LT
2105
2106 if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
2107 return 0;
2108
1da177e4
LT
2109 if ((adv_reg & all_mask) != all_mask)
2110 return 0;
2111 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
2112 u32 tg3_ctrl;
2113
3600d918
MC
2114 all_mask = 0;
2115 if (mask & ADVERTISED_1000baseT_Half)
2116 all_mask |= ADVERTISE_1000HALF;
2117 if (mask & ADVERTISED_1000baseT_Full)
2118 all_mask |= ADVERTISE_1000FULL;
2119
1da177e4
LT
2120 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
2121 return 0;
2122
1da177e4
LT
2123 if ((tg3_ctrl & all_mask) != all_mask)
2124 return 0;
2125 }
2126 return 1;
2127}
2128
ef167e27
MC
2129static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
2130{
2131 u32 curadv, reqadv;
2132
2133 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
2134 return 1;
2135
2136 curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
2137 reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2138
2139 if (tp->link_config.active_duplex == DUPLEX_FULL) {
2140 if (curadv != reqadv)
2141 return 0;
2142
2143 if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)
2144 tg3_readphy(tp, MII_LPA, rmtadv);
2145 } else {
2146 /* Reprogram the advertisement register, even if it
2147 * does not affect the current link. If the link
2148 * gets renegotiated in the future, we can save an
2149 * additional renegotiation cycle by advertising
2150 * it correctly in the first place.
2151 */
2152 if (curadv != reqadv) {
2153 *lcladv &= ~(ADVERTISE_PAUSE_CAP |
2154 ADVERTISE_PAUSE_ASYM);
2155 tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
2156 }
2157 }
2158
2159 return 1;
2160}
2161
1da177e4
LT
2162static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
2163{
2164 int current_link_up;
2165 u32 bmsr, dummy;
ef167e27 2166 u32 lcl_adv, rmt_adv;
1da177e4
LT
2167 u16 current_speed;
2168 u8 current_duplex;
2169 int i, err;
2170
2171 tw32(MAC_EVENT, 0);
2172
2173 tw32_f(MAC_STATUS,
2174 (MAC_STATUS_SYNC_CHANGED |
2175 MAC_STATUS_CFG_CHANGED |
2176 MAC_STATUS_MI_COMPLETION |
2177 MAC_STATUS_LNKSTATE_CHANGED));
2178 udelay(40);
2179
8ef21428
MC
2180 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
2181 tw32_f(MAC_MI_MODE,
2182 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
2183 udelay(80);
2184 }
1da177e4
LT
2185
2186 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
2187
2188 /* Some third-party PHYs need to be reset on link going
2189 * down.
2190 */
2191 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2192 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2193 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
2194 netif_carrier_ok(tp->dev)) {
2195 tg3_readphy(tp, MII_BMSR, &bmsr);
2196 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2197 !(bmsr & BMSR_LSTATUS))
2198 force_reset = 1;
2199 }
2200 if (force_reset)
2201 tg3_phy_reset(tp);
2202
2203 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
2204 tg3_readphy(tp, MII_BMSR, &bmsr);
2205 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
2206 !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
2207 bmsr = 0;
2208
2209 if (!(bmsr & BMSR_LSTATUS)) {
2210 err = tg3_init_5401phy_dsp(tp);
2211 if (err)
2212 return err;
2213
2214 tg3_readphy(tp, MII_BMSR, &bmsr);
2215 for (i = 0; i < 1000; i++) {
2216 udelay(10);
2217 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2218 (bmsr & BMSR_LSTATUS)) {
2219 udelay(40);
2220 break;
2221 }
2222 }
2223
2224 if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
2225 !(bmsr & BMSR_LSTATUS) &&
2226 tp->link_config.active_speed == SPEED_1000) {
2227 err = tg3_phy_reset(tp);
2228 if (!err)
2229 err = tg3_init_5401phy_dsp(tp);
2230 if (err)
2231 return err;
2232 }
2233 }
2234 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2235 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
2236 /* 5701 {A0,B0} CRC bug workaround */
2237 tg3_writephy(tp, 0x15, 0x0a75);
2238 tg3_writephy(tp, 0x1c, 0x8c68);
2239 tg3_writephy(tp, 0x1c, 0x8d68);
2240 tg3_writephy(tp, 0x1c, 0x8c68);
2241 }
2242
2243 /* Clear pending interrupts... */
2244 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
2245 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
2246
2247 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
2248 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
715116a1 2249 else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
1da177e4
LT
2250 tg3_writephy(tp, MII_TG3_IMASK, ~0);
2251
2252 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2253 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2254 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
2255 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2256 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
2257 else
2258 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
2259 }
2260
2261 current_link_up = 0;
2262 current_speed = SPEED_INVALID;
2263 current_duplex = DUPLEX_INVALID;
2264
2265 if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
2266 u32 val;
2267
2268 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
2269 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
2270 if (!(val & (1 << 10))) {
2271 val |= (1 << 10);
2272 tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
2273 goto relink;
2274 }
2275 }
2276
2277 bmsr = 0;
2278 for (i = 0; i < 100; i++) {
2279 tg3_readphy(tp, MII_BMSR, &bmsr);
2280 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2281 (bmsr & BMSR_LSTATUS))
2282 break;
2283 udelay(40);
2284 }
2285
2286 if (bmsr & BMSR_LSTATUS) {
2287 u32 aux_stat, bmcr;
2288
2289 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
2290 for (i = 0; i < 2000; i++) {
2291 udelay(10);
2292 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
2293 aux_stat)
2294 break;
2295 }
2296
2297 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
2298 &current_speed,
2299 &current_duplex);
2300
2301 bmcr = 0;
2302 for (i = 0; i < 200; i++) {
2303 tg3_readphy(tp, MII_BMCR, &bmcr);
2304 if (tg3_readphy(tp, MII_BMCR, &bmcr))
2305 continue;
2306 if (bmcr && bmcr != 0x7fff)
2307 break;
2308 udelay(10);
2309 }
2310
ef167e27
MC
2311 lcl_adv = 0;
2312 rmt_adv = 0;
1da177e4 2313
ef167e27
MC
2314 tp->link_config.active_speed = current_speed;
2315 tp->link_config.active_duplex = current_duplex;
2316
2317 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2318 if ((bmcr & BMCR_ANENABLE) &&
2319 tg3_copper_is_advertising_all(tp,
2320 tp->link_config.advertising)) {
2321 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
2322 &rmt_adv))
2323 current_link_up = 1;
1da177e4
LT
2324 }
2325 } else {
2326 if (!(bmcr & BMCR_ANENABLE) &&
2327 tp->link_config.speed == current_speed &&
ef167e27
MC
2328 tp->link_config.duplex == current_duplex &&
2329 tp->link_config.flowctrl ==
2330 tp->link_config.active_flowctrl) {
1da177e4 2331 current_link_up = 1;
1da177e4
LT
2332 }
2333 }
2334
ef167e27
MC
2335 if (current_link_up == 1 &&
2336 tp->link_config.active_duplex == DUPLEX_FULL)
2337 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1da177e4
LT
2338 }
2339
1da177e4 2340relink:
6921d201 2341 if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
1da177e4
LT
2342 u32 tmp;
2343
2344 tg3_phy_copper_begin(tp);
2345
2346 tg3_readphy(tp, MII_BMSR, &tmp);
2347 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
2348 (tmp & BMSR_LSTATUS))
2349 current_link_up = 1;
2350 }
2351
2352 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
2353 if (current_link_up == 1) {
2354 if (tp->link_config.active_speed == SPEED_100 ||
2355 tp->link_config.active_speed == SPEED_10)
2356 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
2357 else
2358 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2359 } else
2360 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2361
2362 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2363 if (tp->link_config.active_duplex == DUPLEX_HALF)
2364 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2365
1da177e4 2366 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
e8f3f6ca
MC
2367 if (current_link_up == 1 &&
2368 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
1da177e4 2369 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
e8f3f6ca
MC
2370 else
2371 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1da177e4
LT
2372 }
2373
2374 /* ??? Without this setting Netgear GA302T PHY does not
2375 * ??? send/receive packets...
2376 */
2377 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
2378 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
2379 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
2380 tw32_f(MAC_MI_MODE, tp->mi_mode);
2381 udelay(80);
2382 }
2383
2384 tw32_f(MAC_MODE, tp->mac_mode);
2385 udelay(40);
2386
2387 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
2388 /* Polled via timer. */
2389 tw32_f(MAC_EVENT, 0);
2390 } else {
2391 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2392 }
2393 udelay(40);
2394
2395 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
2396 current_link_up == 1 &&
2397 tp->link_config.active_speed == SPEED_1000 &&
2398 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
2399 (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
2400 udelay(120);
2401 tw32_f(MAC_STATUS,
2402 (MAC_STATUS_SYNC_CHANGED |
2403 MAC_STATUS_CFG_CHANGED));
2404 udelay(40);
2405 tg3_write_mem(tp,
2406 NIC_SRAM_FIRMWARE_MBOX,
2407 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
2408 }
2409
2410 if (current_link_up != netif_carrier_ok(tp->dev)) {
2411 if (current_link_up)
2412 netif_carrier_on(tp->dev);
2413 else
2414 netif_carrier_off(tp->dev);
2415 tg3_link_report(tp);
2416 }
2417
2418 return 0;
2419}
2420
2421struct tg3_fiber_aneginfo {
2422 int state;
2423#define ANEG_STATE_UNKNOWN 0
2424#define ANEG_STATE_AN_ENABLE 1
2425#define ANEG_STATE_RESTART_INIT 2
2426#define ANEG_STATE_RESTART 3
2427#define ANEG_STATE_DISABLE_LINK_OK 4
2428#define ANEG_STATE_ABILITY_DETECT_INIT 5
2429#define ANEG_STATE_ABILITY_DETECT 6
2430#define ANEG_STATE_ACK_DETECT_INIT 7
2431#define ANEG_STATE_ACK_DETECT 8
2432#define ANEG_STATE_COMPLETE_ACK_INIT 9
2433#define ANEG_STATE_COMPLETE_ACK 10
2434#define ANEG_STATE_IDLE_DETECT_INIT 11
2435#define ANEG_STATE_IDLE_DETECT 12
2436#define ANEG_STATE_LINK_OK 13
2437#define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
2438#define ANEG_STATE_NEXT_PAGE_WAIT 15
2439
2440 u32 flags;
2441#define MR_AN_ENABLE 0x00000001
2442#define MR_RESTART_AN 0x00000002
2443#define MR_AN_COMPLETE 0x00000004
2444#define MR_PAGE_RX 0x00000008
2445#define MR_NP_LOADED 0x00000010
2446#define MR_TOGGLE_TX 0x00000020
2447#define MR_LP_ADV_FULL_DUPLEX 0x00000040
2448#define MR_LP_ADV_HALF_DUPLEX 0x00000080
2449#define MR_LP_ADV_SYM_PAUSE 0x00000100
2450#define MR_LP_ADV_ASYM_PAUSE 0x00000200
2451#define MR_LP_ADV_REMOTE_FAULT1 0x00000400
2452#define MR_LP_ADV_REMOTE_FAULT2 0x00000800
2453#define MR_LP_ADV_NEXT_PAGE 0x00001000
2454#define MR_TOGGLE_RX 0x00002000
2455#define MR_NP_RX 0x00004000
2456
2457#define MR_LINK_OK 0x80000000
2458
2459 unsigned long link_time, cur_time;
2460
2461 u32 ability_match_cfg;
2462 int ability_match_count;
2463
2464 char ability_match, idle_match, ack_match;
2465
2466 u32 txconfig, rxconfig;
2467#define ANEG_CFG_NP 0x00000080
2468#define ANEG_CFG_ACK 0x00000040
2469#define ANEG_CFG_RF2 0x00000020
2470#define ANEG_CFG_RF1 0x00000010
2471#define ANEG_CFG_PS2 0x00000001
2472#define ANEG_CFG_PS1 0x00008000
2473#define ANEG_CFG_HD 0x00004000
2474#define ANEG_CFG_FD 0x00002000
2475#define ANEG_CFG_INVAL 0x00001f06
2476
2477};
2478#define ANEG_OK 0
2479#define ANEG_DONE 1
2480#define ANEG_TIMER_ENAB 2
2481#define ANEG_FAILED -1
2482
2483#define ANEG_STATE_SETTLE_TIME 10000
2484
2485static int tg3_fiber_aneg_smachine(struct tg3 *tp,
2486 struct tg3_fiber_aneginfo *ap)
2487{
5be73b47 2488 u16 flowctrl;
1da177e4
LT
2489 unsigned long delta;
2490 u32 rx_cfg_reg;
2491 int ret;
2492
2493 if (ap->state == ANEG_STATE_UNKNOWN) {
2494 ap->rxconfig = 0;
2495 ap->link_time = 0;
2496 ap->cur_time = 0;
2497 ap->ability_match_cfg = 0;
2498 ap->ability_match_count = 0;
2499 ap->ability_match = 0;
2500 ap->idle_match = 0;
2501 ap->ack_match = 0;
2502 }
2503 ap->cur_time++;
2504
2505 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
2506 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
2507
2508 if (rx_cfg_reg != ap->ability_match_cfg) {
2509 ap->ability_match_cfg = rx_cfg_reg;
2510 ap->ability_match = 0;
2511 ap->ability_match_count = 0;
2512 } else {
2513 if (++ap->ability_match_count > 1) {
2514 ap->ability_match = 1;
2515 ap->ability_match_cfg = rx_cfg_reg;
2516 }
2517 }
2518 if (rx_cfg_reg & ANEG_CFG_ACK)
2519 ap->ack_match = 1;
2520 else
2521 ap->ack_match = 0;
2522
2523 ap->idle_match = 0;
2524 } else {
2525 ap->idle_match = 1;
2526 ap->ability_match_cfg = 0;
2527 ap->ability_match_count = 0;
2528 ap->ability_match = 0;
2529 ap->ack_match = 0;
2530
2531 rx_cfg_reg = 0;
2532 }
2533
2534 ap->rxconfig = rx_cfg_reg;
2535 ret = ANEG_OK;
2536
2537 switch(ap->state) {
2538 case ANEG_STATE_UNKNOWN:
2539 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
2540 ap->state = ANEG_STATE_AN_ENABLE;
2541
2542 /* fallthru */
2543 case ANEG_STATE_AN_ENABLE:
2544 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
2545 if (ap->flags & MR_AN_ENABLE) {
2546 ap->link_time = 0;
2547 ap->cur_time = 0;
2548 ap->ability_match_cfg = 0;
2549 ap->ability_match_count = 0;
2550 ap->ability_match = 0;
2551 ap->idle_match = 0;
2552 ap->ack_match = 0;
2553
2554 ap->state = ANEG_STATE_RESTART_INIT;
2555 } else {
2556 ap->state = ANEG_STATE_DISABLE_LINK_OK;
2557 }
2558 break;
2559
2560 case ANEG_STATE_RESTART_INIT:
2561 ap->link_time = ap->cur_time;
2562 ap->flags &= ~(MR_NP_LOADED);
2563 ap->txconfig = 0;
2564 tw32(MAC_TX_AUTO_NEG, 0);
2565 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2566 tw32_f(MAC_MODE, tp->mac_mode);
2567 udelay(40);
2568
2569 ret = ANEG_TIMER_ENAB;
2570 ap->state = ANEG_STATE_RESTART;
2571
2572 /* fallthru */
2573 case ANEG_STATE_RESTART:
2574 delta = ap->cur_time - ap->link_time;
2575 if (delta > ANEG_STATE_SETTLE_TIME) {
2576 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
2577 } else {
2578 ret = ANEG_TIMER_ENAB;
2579 }
2580 break;
2581
2582 case ANEG_STATE_DISABLE_LINK_OK:
2583 ret = ANEG_DONE;
2584 break;
2585
2586 case ANEG_STATE_ABILITY_DETECT_INIT:
2587 ap->flags &= ~(MR_TOGGLE_TX);
5be73b47
MC
2588 ap->txconfig = ANEG_CFG_FD;
2589 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
2590 if (flowctrl & ADVERTISE_1000XPAUSE)
2591 ap->txconfig |= ANEG_CFG_PS1;
2592 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
2593 ap->txconfig |= ANEG_CFG_PS2;
1da177e4
LT
2594 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2595 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2596 tw32_f(MAC_MODE, tp->mac_mode);
2597 udelay(40);
2598
2599 ap->state = ANEG_STATE_ABILITY_DETECT;
2600 break;
2601
2602 case ANEG_STATE_ABILITY_DETECT:
2603 if (ap->ability_match != 0 && ap->rxconfig != 0) {
2604 ap->state = ANEG_STATE_ACK_DETECT_INIT;
2605 }
2606 break;
2607
2608 case ANEG_STATE_ACK_DETECT_INIT:
2609 ap->txconfig |= ANEG_CFG_ACK;
2610 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2611 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2612 tw32_f(MAC_MODE, tp->mac_mode);
2613 udelay(40);
2614
2615 ap->state = ANEG_STATE_ACK_DETECT;
2616
2617 /* fallthru */
2618 case ANEG_STATE_ACK_DETECT:
2619 if (ap->ack_match != 0) {
2620 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
2621 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
2622 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
2623 } else {
2624 ap->state = ANEG_STATE_AN_ENABLE;
2625 }
2626 } else if (ap->ability_match != 0 &&
2627 ap->rxconfig == 0) {
2628 ap->state = ANEG_STATE_AN_ENABLE;
2629 }
2630 break;
2631
2632 case ANEG_STATE_COMPLETE_ACK_INIT:
2633 if (ap->rxconfig & ANEG_CFG_INVAL) {
2634 ret = ANEG_FAILED;
2635 break;
2636 }
2637 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
2638 MR_LP_ADV_HALF_DUPLEX |
2639 MR_LP_ADV_SYM_PAUSE |
2640 MR_LP_ADV_ASYM_PAUSE |
2641 MR_LP_ADV_REMOTE_FAULT1 |
2642 MR_LP_ADV_REMOTE_FAULT2 |
2643 MR_LP_ADV_NEXT_PAGE |
2644 MR_TOGGLE_RX |
2645 MR_NP_RX);
2646 if (ap->rxconfig & ANEG_CFG_FD)
2647 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
2648 if (ap->rxconfig & ANEG_CFG_HD)
2649 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
2650 if (ap->rxconfig & ANEG_CFG_PS1)
2651 ap->flags |= MR_LP_ADV_SYM_PAUSE;
2652 if (ap->rxconfig & ANEG_CFG_PS2)
2653 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
2654 if (ap->rxconfig & ANEG_CFG_RF1)
2655 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
2656 if (ap->rxconfig & ANEG_CFG_RF2)
2657 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
2658 if (ap->rxconfig & ANEG_CFG_NP)
2659 ap->flags |= MR_LP_ADV_NEXT_PAGE;
2660
2661 ap->link_time = ap->cur_time;
2662
2663 ap->flags ^= (MR_TOGGLE_TX);
2664 if (ap->rxconfig & 0x0008)
2665 ap->flags |= MR_TOGGLE_RX;
2666 if (ap->rxconfig & ANEG_CFG_NP)
2667 ap->flags |= MR_NP_RX;
2668 ap->flags |= MR_PAGE_RX;
2669
2670 ap->state = ANEG_STATE_COMPLETE_ACK;
2671 ret = ANEG_TIMER_ENAB;
2672 break;
2673
2674 case ANEG_STATE_COMPLETE_ACK:
2675 if (ap->ability_match != 0 &&
2676 ap->rxconfig == 0) {
2677 ap->state = ANEG_STATE_AN_ENABLE;
2678 break;
2679 }
2680 delta = ap->cur_time - ap->link_time;
2681 if (delta > ANEG_STATE_SETTLE_TIME) {
2682 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2683 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2684 } else {
2685 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2686 !(ap->flags & MR_NP_RX)) {
2687 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2688 } else {
2689 ret = ANEG_FAILED;
2690 }
2691 }
2692 }
2693 break;
2694
2695 case ANEG_STATE_IDLE_DETECT_INIT:
2696 ap->link_time = ap->cur_time;
2697 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2698 tw32_f(MAC_MODE, tp->mac_mode);
2699 udelay(40);
2700
2701 ap->state = ANEG_STATE_IDLE_DETECT;
2702 ret = ANEG_TIMER_ENAB;
2703 break;
2704
2705 case ANEG_STATE_IDLE_DETECT:
2706 if (ap->ability_match != 0 &&
2707 ap->rxconfig == 0) {
2708 ap->state = ANEG_STATE_AN_ENABLE;
2709 break;
2710 }
2711 delta = ap->cur_time - ap->link_time;
2712 if (delta > ANEG_STATE_SETTLE_TIME) {
2713 /* XXX another gem from the Broadcom driver :( */
2714 ap->state = ANEG_STATE_LINK_OK;
2715 }
2716 break;
2717
2718 case ANEG_STATE_LINK_OK:
2719 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2720 ret = ANEG_DONE;
2721 break;
2722
2723 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2724 /* ??? unimplemented */
2725 break;
2726
2727 case ANEG_STATE_NEXT_PAGE_WAIT:
2728 /* ??? unimplemented */
2729 break;
2730
2731 default:
2732 ret = ANEG_FAILED;
2733 break;
2734 };
2735
2736 return ret;
2737}
2738
5be73b47 2739static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
1da177e4
LT
2740{
2741 int res = 0;
2742 struct tg3_fiber_aneginfo aninfo;
2743 int status = ANEG_FAILED;
2744 unsigned int tick;
2745 u32 tmp;
2746
2747 tw32_f(MAC_TX_AUTO_NEG, 0);
2748
2749 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2750 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2751 udelay(40);
2752
2753 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2754 udelay(40);
2755
2756 memset(&aninfo, 0, sizeof(aninfo));
2757 aninfo.flags |= MR_AN_ENABLE;
2758 aninfo.state = ANEG_STATE_UNKNOWN;
2759 aninfo.cur_time = 0;
2760 tick = 0;
2761 while (++tick < 195000) {
2762 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2763 if (status == ANEG_DONE || status == ANEG_FAILED)
2764 break;
2765
2766 udelay(1);
2767 }
2768
2769 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2770 tw32_f(MAC_MODE, tp->mac_mode);
2771 udelay(40);
2772
5be73b47
MC
2773 *txflags = aninfo.txconfig;
2774 *rxflags = aninfo.flags;
1da177e4
LT
2775
2776 if (status == ANEG_DONE &&
2777 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2778 MR_LP_ADV_FULL_DUPLEX)))
2779 res = 1;
2780
2781 return res;
2782}
2783
2784static void tg3_init_bcm8002(struct tg3 *tp)
2785{
2786 u32 mac_status = tr32(MAC_STATUS);
2787 int i;
2788
2789 /* Reset when initting first time or we have a link. */
2790 if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2791 !(mac_status & MAC_STATUS_PCS_SYNCED))
2792 return;
2793
2794 /* Set PLL lock range. */
2795 tg3_writephy(tp, 0x16, 0x8007);
2796
2797 /* SW reset */
2798 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2799
2800 /* Wait for reset to complete. */
2801 /* XXX schedule_timeout() ... */
2802 for (i = 0; i < 500; i++)
2803 udelay(10);
2804
2805 /* Config mode; select PMA/Ch 1 regs. */
2806 tg3_writephy(tp, 0x10, 0x8411);
2807
2808 /* Enable auto-lock and comdet, select txclk for tx. */
2809 tg3_writephy(tp, 0x11, 0x0a10);
2810
2811 tg3_writephy(tp, 0x18, 0x00a0);
2812 tg3_writephy(tp, 0x16, 0x41ff);
2813
2814 /* Assert and deassert POR. */
2815 tg3_writephy(tp, 0x13, 0x0400);
2816 udelay(40);
2817 tg3_writephy(tp, 0x13, 0x0000);
2818
2819 tg3_writephy(tp, 0x11, 0x0a50);
2820 udelay(40);
2821 tg3_writephy(tp, 0x11, 0x0a10);
2822
2823 /* Wait for signal to stabilize */
2824 /* XXX schedule_timeout() ... */
2825 for (i = 0; i < 15000; i++)
2826 udelay(10);
2827
2828 /* Deselect the channel register so we can read the PHYID
2829 * later.
2830 */
2831 tg3_writephy(tp, 0x10, 0x8011);
2832}
2833
2834static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2835{
82cd3d11 2836 u16 flowctrl;
1da177e4
LT
2837 u32 sg_dig_ctrl, sg_dig_status;
2838 u32 serdes_cfg, expected_sg_dig_ctrl;
2839 int workaround, port_a;
2840 int current_link_up;
2841
2842 serdes_cfg = 0;
2843 expected_sg_dig_ctrl = 0;
2844 workaround = 0;
2845 port_a = 1;
2846 current_link_up = 0;
2847
2848 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2849 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2850 workaround = 1;
2851 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2852 port_a = 0;
2853
2854 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2855 /* preserve bits 20-23 for voltage regulator */
2856 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2857 }
2858
2859 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2860
2861 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
c98f6e3b 2862 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
1da177e4
LT
2863 if (workaround) {
2864 u32 val = serdes_cfg;
2865
2866 if (port_a)
2867 val |= 0xc010000;
2868 else
2869 val |= 0x4010000;
2870 tw32_f(MAC_SERDES_CFG, val);
2871 }
c98f6e3b
MC
2872
2873 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
1da177e4
LT
2874 }
2875 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2876 tg3_setup_flow_control(tp, 0, 0);
2877 current_link_up = 1;
2878 }
2879 goto out;
2880 }
2881
2882 /* Want auto-negotiation. */
c98f6e3b 2883 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
1da177e4 2884
82cd3d11
MC
2885 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
2886 if (flowctrl & ADVERTISE_1000XPAUSE)
2887 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
2888 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
2889 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
1da177e4
LT
2890
2891 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
3d3ebe74
MC
2892 if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
2893 tp->serdes_counter &&
2894 ((mac_status & (MAC_STATUS_PCS_SYNCED |
2895 MAC_STATUS_RCVD_CFG)) ==
2896 MAC_STATUS_PCS_SYNCED)) {
2897 tp->serdes_counter--;
2898 current_link_up = 1;
2899 goto out;
2900 }
2901restart_autoneg:
1da177e4
LT
2902 if (workaround)
2903 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
c98f6e3b 2904 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
1da177e4
LT
2905 udelay(5);
2906 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2907
3d3ebe74
MC
2908 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
2909 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
1da177e4
LT
2910 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2911 MAC_STATUS_SIGNAL_DET)) {
3d3ebe74 2912 sg_dig_status = tr32(SG_DIG_STATUS);
1da177e4
LT
2913 mac_status = tr32(MAC_STATUS);
2914
c98f6e3b 2915 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
1da177e4 2916 (mac_status & MAC_STATUS_PCS_SYNCED)) {
82cd3d11
MC
2917 u32 local_adv = 0, remote_adv = 0;
2918
2919 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
2920 local_adv |= ADVERTISE_1000XPAUSE;
2921 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
2922 local_adv |= ADVERTISE_1000XPSE_ASYM;
1da177e4 2923
c98f6e3b 2924 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
82cd3d11 2925 remote_adv |= LPA_1000XPAUSE;
c98f6e3b 2926 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
82cd3d11 2927 remote_adv |= LPA_1000XPAUSE_ASYM;
1da177e4
LT
2928
2929 tg3_setup_flow_control(tp, local_adv, remote_adv);
2930 current_link_up = 1;
3d3ebe74
MC
2931 tp->serdes_counter = 0;
2932 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
c98f6e3b 2933 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
3d3ebe74
MC
2934 if (tp->serdes_counter)
2935 tp->serdes_counter--;
1da177e4
LT
2936 else {
2937 if (workaround) {
2938 u32 val = serdes_cfg;
2939
2940 if (port_a)
2941 val |= 0xc010000;
2942 else
2943 val |= 0x4010000;
2944
2945 tw32_f(MAC_SERDES_CFG, val);
2946 }
2947
c98f6e3b 2948 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
1da177e4
LT
2949 udelay(40);
2950
2951 /* Link parallel detection - link is up */
2952 /* only if we have PCS_SYNC and not */
2953 /* receiving config code words */
2954 mac_status = tr32(MAC_STATUS);
2955 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2956 !(mac_status & MAC_STATUS_RCVD_CFG)) {
2957 tg3_setup_flow_control(tp, 0, 0);
2958 current_link_up = 1;
3d3ebe74
MC
2959 tp->tg3_flags2 |=
2960 TG3_FLG2_PARALLEL_DETECT;
2961 tp->serdes_counter =
2962 SERDES_PARALLEL_DET_TIMEOUT;
2963 } else
2964 goto restart_autoneg;
1da177e4
LT
2965 }
2966 }
3d3ebe74
MC
2967 } else {
2968 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
2969 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
1da177e4
LT
2970 }
2971
2972out:
2973 return current_link_up;
2974}
2975
2976static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2977{
2978 int current_link_up = 0;
2979
5cf64b8a 2980 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
1da177e4 2981 goto out;
1da177e4
LT
2982
2983 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5be73b47 2984 u32 txflags, rxflags;
1da177e4 2985 int i;
6aa20a22 2986
5be73b47
MC
2987 if (fiber_autoneg(tp, &txflags, &rxflags)) {
2988 u32 local_adv = 0, remote_adv = 0;
1da177e4 2989
5be73b47
MC
2990 if (txflags & ANEG_CFG_PS1)
2991 local_adv |= ADVERTISE_1000XPAUSE;
2992 if (txflags & ANEG_CFG_PS2)
2993 local_adv |= ADVERTISE_1000XPSE_ASYM;
2994
2995 if (rxflags & MR_LP_ADV_SYM_PAUSE)
2996 remote_adv |= LPA_1000XPAUSE;
2997 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
2998 remote_adv |= LPA_1000XPAUSE_ASYM;
1da177e4
LT
2999
3000 tg3_setup_flow_control(tp, local_adv, remote_adv);
3001
1da177e4
LT
3002 current_link_up = 1;
3003 }
3004 for (i = 0; i < 30; i++) {
3005 udelay(20);
3006 tw32_f(MAC_STATUS,
3007 (MAC_STATUS_SYNC_CHANGED |
3008 MAC_STATUS_CFG_CHANGED));
3009 udelay(40);
3010 if ((tr32(MAC_STATUS) &
3011 (MAC_STATUS_SYNC_CHANGED |
3012 MAC_STATUS_CFG_CHANGED)) == 0)
3013 break;
3014 }
3015
3016 mac_status = tr32(MAC_STATUS);
3017 if (current_link_up == 0 &&
3018 (mac_status & MAC_STATUS_PCS_SYNCED) &&
3019 !(mac_status & MAC_STATUS_RCVD_CFG))
3020 current_link_up = 1;
3021 } else {
5be73b47
MC
3022 tg3_setup_flow_control(tp, 0, 0);
3023
1da177e4
LT
3024 /* Forcing 1000FD link up. */
3025 current_link_up = 1;
1da177e4
LT
3026
3027 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
3028 udelay(40);
e8f3f6ca
MC
3029
3030 tw32_f(MAC_MODE, tp->mac_mode);
3031 udelay(40);
1da177e4
LT
3032 }
3033
3034out:
3035 return current_link_up;
3036}
3037
3038static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
3039{
3040 u32 orig_pause_cfg;
3041 u16 orig_active_speed;
3042 u8 orig_active_duplex;
3043 u32 mac_status;
3044 int current_link_up;
3045 int i;
3046
8d018621 3047 orig_pause_cfg = tp->link_config.active_flowctrl;
1da177e4
LT
3048 orig_active_speed = tp->link_config.active_speed;
3049 orig_active_duplex = tp->link_config.active_duplex;
3050
3051 if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
3052 netif_carrier_ok(tp->dev) &&
3053 (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
3054 mac_status = tr32(MAC_STATUS);
3055 mac_status &= (MAC_STATUS_PCS_SYNCED |
3056 MAC_STATUS_SIGNAL_DET |
3057 MAC_STATUS_CFG_CHANGED |
3058 MAC_STATUS_RCVD_CFG);
3059 if (mac_status == (MAC_STATUS_PCS_SYNCED |
3060 MAC_STATUS_SIGNAL_DET)) {
3061 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3062 MAC_STATUS_CFG_CHANGED));
3063 return 0;
3064 }
3065 }
3066
3067 tw32_f(MAC_TX_AUTO_NEG, 0);
3068
3069 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
3070 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
3071 tw32_f(MAC_MODE, tp->mac_mode);
3072 udelay(40);
3073
3074 if (tp->phy_id == PHY_ID_BCM8002)
3075 tg3_init_bcm8002(tp);
3076
3077 /* Enable link change event even when serdes polling. */
3078 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3079 udelay(40);
3080
3081 current_link_up = 0;
3082 mac_status = tr32(MAC_STATUS);
3083
3084 if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
3085 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
3086 else
3087 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
3088
1da177e4
LT
3089 tp->hw_status->status =
3090 (SD_STATUS_UPDATED |
3091 (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
3092
3093 for (i = 0; i < 100; i++) {
3094 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3095 MAC_STATUS_CFG_CHANGED));
3096 udelay(5);
3097 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
3d3ebe74
MC
3098 MAC_STATUS_CFG_CHANGED |
3099 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
1da177e4
LT
3100 break;
3101 }
3102
3103 mac_status = tr32(MAC_STATUS);
3104 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
3105 current_link_up = 0;
3d3ebe74
MC
3106 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
3107 tp->serdes_counter == 0) {
1da177e4
LT
3108 tw32_f(MAC_MODE, (tp->mac_mode |
3109 MAC_MODE_SEND_CONFIGS));
3110 udelay(1);
3111 tw32_f(MAC_MODE, tp->mac_mode);
3112 }
3113 }
3114
3115 if (current_link_up == 1) {
3116 tp->link_config.active_speed = SPEED_1000;
3117 tp->link_config.active_duplex = DUPLEX_FULL;
3118 tw32(MAC_LED_CTRL, (tp->led_ctrl |
3119 LED_CTRL_LNKLED_OVERRIDE |
3120 LED_CTRL_1000MBPS_ON));
3121 } else {
3122 tp->link_config.active_speed = SPEED_INVALID;
3123 tp->link_config.active_duplex = DUPLEX_INVALID;
3124 tw32(MAC_LED_CTRL, (tp->led_ctrl |
3125 LED_CTRL_LNKLED_OVERRIDE |
3126 LED_CTRL_TRAFFIC_OVERRIDE));
3127 }
3128
3129 if (current_link_up != netif_carrier_ok(tp->dev)) {
3130 if (current_link_up)
3131 netif_carrier_on(tp->dev);
3132 else
3133 netif_carrier_off(tp->dev);
3134 tg3_link_report(tp);
3135 } else {
8d018621 3136 u32 now_pause_cfg = tp->link_config.active_flowctrl;
1da177e4
LT
3137 if (orig_pause_cfg != now_pause_cfg ||
3138 orig_active_speed != tp->link_config.active_speed ||
3139 orig_active_duplex != tp->link_config.active_duplex)
3140 tg3_link_report(tp);
3141 }
3142
3143 return 0;
3144}
3145
747e8f8b
MC
3146static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
3147{
3148 int current_link_up, err = 0;
3149 u32 bmsr, bmcr;
3150 u16 current_speed;
3151 u8 current_duplex;
ef167e27 3152 u32 local_adv, remote_adv;
747e8f8b
MC
3153
3154 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3155 tw32_f(MAC_MODE, tp->mac_mode);
3156 udelay(40);
3157
3158 tw32(MAC_EVENT, 0);
3159
3160 tw32_f(MAC_STATUS,
3161 (MAC_STATUS_SYNC_CHANGED |
3162 MAC_STATUS_CFG_CHANGED |
3163 MAC_STATUS_MI_COMPLETION |
3164 MAC_STATUS_LNKSTATE_CHANGED));
3165 udelay(40);
3166
3167 if (force_reset)
3168 tg3_phy_reset(tp);
3169
3170 current_link_up = 0;
3171 current_speed = SPEED_INVALID;
3172 current_duplex = DUPLEX_INVALID;
3173
3174 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3175 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
d4d2c558
MC
3176 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
3177 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
3178 bmsr |= BMSR_LSTATUS;
3179 else
3180 bmsr &= ~BMSR_LSTATUS;
3181 }
747e8f8b
MC
3182
3183 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
3184
3185 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
2bd3ed04 3186 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
747e8f8b
MC
3187 /* do nothing, just check for link up at the end */
3188 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3189 u32 adv, new_adv;
3190
3191 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
3192 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
3193 ADVERTISE_1000XPAUSE |
3194 ADVERTISE_1000XPSE_ASYM |
3195 ADVERTISE_SLCT);
3196
ba4d07a8 3197 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
747e8f8b
MC
3198
3199 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
3200 new_adv |= ADVERTISE_1000XHALF;
3201 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
3202 new_adv |= ADVERTISE_1000XFULL;
3203
3204 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
3205 tg3_writephy(tp, MII_ADVERTISE, new_adv);
3206 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
3207 tg3_writephy(tp, MII_BMCR, bmcr);
3208
3209 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3d3ebe74 3210 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
747e8f8b
MC
3211 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3212
3213 return err;
3214 }
3215 } else {
3216 u32 new_bmcr;
3217
3218 bmcr &= ~BMCR_SPEED1000;
3219 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
3220
3221 if (tp->link_config.duplex == DUPLEX_FULL)
3222 new_bmcr |= BMCR_FULLDPLX;
3223
3224 if (new_bmcr != bmcr) {
3225 /* BMCR_SPEED1000 is a reserved bit that needs
3226 * to be set on write.
3227 */
3228 new_bmcr |= BMCR_SPEED1000;
3229
3230 /* Force a linkdown */
3231 if (netif_carrier_ok(tp->dev)) {
3232 u32 adv;
3233
3234 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
3235 adv &= ~(ADVERTISE_1000XFULL |
3236 ADVERTISE_1000XHALF |
3237 ADVERTISE_SLCT);
3238 tg3_writephy(tp, MII_ADVERTISE, adv);
3239 tg3_writephy(tp, MII_BMCR, bmcr |
3240 BMCR_ANRESTART |
3241 BMCR_ANENABLE);
3242 udelay(10);
3243 netif_carrier_off(tp->dev);
3244 }
3245 tg3_writephy(tp, MII_BMCR, new_bmcr);
3246 bmcr = new_bmcr;
3247 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3248 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
d4d2c558
MC
3249 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3250 ASIC_REV_5714) {
3251 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
3252 bmsr |= BMSR_LSTATUS;
3253 else
3254 bmsr &= ~BMSR_LSTATUS;
3255 }
747e8f8b
MC
3256 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3257 }
3258 }
3259
3260 if (bmsr & BMSR_LSTATUS) {
3261 current_speed = SPEED_1000;
3262 current_link_up = 1;
3263 if (bmcr & BMCR_FULLDPLX)
3264 current_duplex = DUPLEX_FULL;
3265 else
3266 current_duplex = DUPLEX_HALF;
3267
ef167e27
MC
3268 local_adv = 0;
3269 remote_adv = 0;
3270
747e8f8b 3271 if (bmcr & BMCR_ANENABLE) {
ef167e27 3272 u32 common;
747e8f8b
MC
3273
3274 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
3275 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
3276 common = local_adv & remote_adv;
3277 if (common & (ADVERTISE_1000XHALF |
3278 ADVERTISE_1000XFULL)) {
3279 if (common & ADVERTISE_1000XFULL)
3280 current_duplex = DUPLEX_FULL;
3281 else
3282 current_duplex = DUPLEX_HALF;
747e8f8b
MC
3283 }
3284 else
3285 current_link_up = 0;
3286 }
3287 }
3288
ef167e27
MC
3289 if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
3290 tg3_setup_flow_control(tp, local_adv, remote_adv);
3291
747e8f8b
MC
3292 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3293 if (tp->link_config.active_duplex == DUPLEX_HALF)
3294 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3295
3296 tw32_f(MAC_MODE, tp->mac_mode);
3297 udelay(40);
3298
3299 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3300
3301 tp->link_config.active_speed = current_speed;
3302 tp->link_config.active_duplex = current_duplex;
3303
3304 if (current_link_up != netif_carrier_ok(tp->dev)) {
3305 if (current_link_up)
3306 netif_carrier_on(tp->dev);
3307 else {
3308 netif_carrier_off(tp->dev);
3309 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3310 }
3311 tg3_link_report(tp);
3312 }
3313 return err;
3314}
3315
3316static void tg3_serdes_parallel_detect(struct tg3 *tp)
3317{
3d3ebe74 3318 if (tp->serdes_counter) {
747e8f8b 3319 /* Give autoneg time to complete. */
3d3ebe74 3320 tp->serdes_counter--;
747e8f8b
MC
3321 return;
3322 }
3323 if (!netif_carrier_ok(tp->dev) &&
3324 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
3325 u32 bmcr;
3326
3327 tg3_readphy(tp, MII_BMCR, &bmcr);
3328 if (bmcr & BMCR_ANENABLE) {
3329 u32 phy1, phy2;
3330
3331 /* Select shadow register 0x1f */
3332 tg3_writephy(tp, 0x1c, 0x7c00);
3333 tg3_readphy(tp, 0x1c, &phy1);
3334
3335 /* Select expansion interrupt status register */
3336 tg3_writephy(tp, 0x17, 0x0f01);
3337 tg3_readphy(tp, 0x15, &phy2);
3338 tg3_readphy(tp, 0x15, &phy2);
3339
3340 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
3341 /* We have signal detect and not receiving
3342 * config code words, link is up by parallel
3343 * detection.
3344 */
3345
3346 bmcr &= ~BMCR_ANENABLE;
3347 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
3348 tg3_writephy(tp, MII_BMCR, bmcr);
3349 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
3350 }
3351 }
3352 }
3353 else if (netif_carrier_ok(tp->dev) &&
3354 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
3355 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
3356 u32 phy2;
3357
3358 /* Select expansion interrupt status register */
3359 tg3_writephy(tp, 0x17, 0x0f01);
3360 tg3_readphy(tp, 0x15, &phy2);
3361 if (phy2 & 0x20) {
3362 u32 bmcr;
3363
3364 /* Config code words received, turn on autoneg. */
3365 tg3_readphy(tp, MII_BMCR, &bmcr);
3366 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
3367
3368 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3369
3370 }
3371 }
3372}
3373
1da177e4
LT
3374static int tg3_setup_phy(struct tg3 *tp, int force_reset)
3375{
3376 int err;
3377
3378 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
3379 err = tg3_setup_fiber_phy(tp, force_reset);
747e8f8b
MC
3380 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
3381 err = tg3_setup_fiber_mii_phy(tp, force_reset);
1da177e4
LT
3382 } else {
3383 err = tg3_setup_copper_phy(tp, force_reset);
3384 }
3385
b5af7126
MC
3386 if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
3387 tp->pci_chip_rev_id == CHIPREV_ID_5784_A1) {
aa6c91fe
MC
3388 u32 val, scale;
3389
3390 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
3391 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
3392 scale = 65;
3393 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
3394 scale = 6;
3395 else
3396 scale = 12;
3397
3398 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
3399 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
3400 tw32(GRC_MISC_CFG, val);
3401 }
3402
1da177e4
LT
3403 if (tp->link_config.active_speed == SPEED_1000 &&
3404 tp->link_config.active_duplex == DUPLEX_HALF)
3405 tw32(MAC_TX_LENGTHS,
3406 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3407 (6 << TX_LENGTHS_IPG_SHIFT) |
3408 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
3409 else
3410 tw32(MAC_TX_LENGTHS,
3411 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3412 (6 << TX_LENGTHS_IPG_SHIFT) |
3413 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
3414
3415 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
3416 if (netif_carrier_ok(tp->dev)) {
3417 tw32(HOSTCC_STAT_COAL_TICKS,
15f9850d 3418 tp->coal.stats_block_coalesce_usecs);
1da177e4
LT
3419 } else {
3420 tw32(HOSTCC_STAT_COAL_TICKS, 0);
3421 }
3422 }
3423
8ed5d97e
MC
3424 if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) {
3425 u32 val = tr32(PCIE_PWR_MGMT_THRESH);
3426 if (!netif_carrier_ok(tp->dev))
3427 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
3428 tp->pwrmgmt_thresh;
3429 else
3430 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
3431 tw32(PCIE_PWR_MGMT_THRESH, val);
3432 }
3433
1da177e4
LT
3434 return err;
3435}
3436
df3e6548
MC
3437/* This is called whenever we suspect that the system chipset is re-
3438 * ordering the sequence of MMIO to the tx send mailbox. The symptom
3439 * is bogus tx completions. We try to recover by setting the
3440 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
3441 * in the workqueue.
3442 */
3443static void tg3_tx_recover(struct tg3 *tp)
3444{
3445 BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
3446 tp->write32_tx_mbox == tg3_write_indirect_mbox);
3447
3448 printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
3449 "mapped I/O cycles to the network device, attempting to "
3450 "recover. Please report the problem to the driver maintainer "
3451 "and include system chipset information.\n", tp->dev->name);
3452
3453 spin_lock(&tp->lock);
df3e6548 3454 tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
df3e6548
MC
3455 spin_unlock(&tp->lock);
3456}
3457
1b2a7205
MC
3458static inline u32 tg3_tx_avail(struct tg3 *tp)
3459{
3460 smp_mb();
3461 return (tp->tx_pending -
3462 ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1)));
3463}
3464
1da177e4
LT
3465/* Tigon3 never reports partial packet sends. So we do not
3466 * need special logic to handle SKBs that have not had all
3467 * of their frags sent yet, like SunGEM does.
3468 */
3469static void tg3_tx(struct tg3 *tp)
3470{
3471 u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
3472 u32 sw_idx = tp->tx_cons;
3473
3474 while (sw_idx != hw_idx) {
3475 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
3476 struct sk_buff *skb = ri->skb;
df3e6548
MC
3477 int i, tx_bug = 0;
3478
3479 if (unlikely(skb == NULL)) {
3480 tg3_tx_recover(tp);
3481 return;
3482 }
1da177e4 3483
1da177e4
LT
3484 pci_unmap_single(tp->pdev,
3485 pci_unmap_addr(ri, mapping),
3486 skb_headlen(skb),
3487 PCI_DMA_TODEVICE);
3488
3489 ri->skb = NULL;
3490
3491 sw_idx = NEXT_TX(sw_idx);
3492
3493 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1da177e4 3494 ri = &tp->tx_buffers[sw_idx];
df3e6548
MC
3495 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
3496 tx_bug = 1;
1da177e4
LT
3497
3498 pci_unmap_page(tp->pdev,
3499 pci_unmap_addr(ri, mapping),
3500 skb_shinfo(skb)->frags[i].size,
3501 PCI_DMA_TODEVICE);
3502
3503 sw_idx = NEXT_TX(sw_idx);
3504 }
3505
f47c11ee 3506 dev_kfree_skb(skb);
df3e6548
MC
3507
3508 if (unlikely(tx_bug)) {
3509 tg3_tx_recover(tp);
3510 return;
3511 }
1da177e4
LT
3512 }
3513
3514 tp->tx_cons = sw_idx;
3515
1b2a7205
MC
3516 /* Need to make the tx_cons update visible to tg3_start_xmit()
3517 * before checking for netif_queue_stopped(). Without the
3518 * memory barrier, there is a small possibility that tg3_start_xmit()
3519 * will miss it and cause the queue to be stopped forever.
3520 */
3521 smp_mb();
3522
3523 if (unlikely(netif_queue_stopped(tp->dev) &&
42952231 3524 (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))) {
1b2a7205 3525 netif_tx_lock(tp->dev);
51b91468 3526 if (netif_queue_stopped(tp->dev) &&
42952231 3527 (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))
51b91468 3528 netif_wake_queue(tp->dev);
1b2a7205 3529 netif_tx_unlock(tp->dev);
51b91468 3530 }
1da177e4
LT
3531}
3532
3533/* Returns size of skb allocated or < 0 on error.
3534 *
3535 * We only need to fill in the address because the other members
3536 * of the RX descriptor are invariant, see tg3_init_rings.
3537 *
3538 * Note the purposeful assymetry of cpu vs. chip accesses. For
3539 * posting buffers we only dirty the first cache line of the RX
3540 * descriptor (containing the address). Whereas for the RX status
3541 * buffers the cpu only reads the last cacheline of the RX descriptor
3542 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
3543 */
3544static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
3545 int src_idx, u32 dest_idx_unmasked)
3546{
3547 struct tg3_rx_buffer_desc *desc;
3548 struct ring_info *map, *src_map;
3549 struct sk_buff *skb;
3550 dma_addr_t mapping;
3551 int skb_size, dest_idx;
3552
3553 src_map = NULL;
3554 switch (opaque_key) {
3555 case RXD_OPAQUE_RING_STD:
3556 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3557 desc = &tp->rx_std[dest_idx];
3558 map = &tp->rx_std_buffers[dest_idx];
3559 if (src_idx >= 0)
3560 src_map = &tp->rx_std_buffers[src_idx];
7e72aad4 3561 skb_size = tp->rx_pkt_buf_sz;
1da177e4
LT
3562 break;
3563
3564 case RXD_OPAQUE_RING_JUMBO:
3565 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3566 desc = &tp->rx_jumbo[dest_idx];
3567 map = &tp->rx_jumbo_buffers[dest_idx];
3568 if (src_idx >= 0)
3569 src_map = &tp->rx_jumbo_buffers[src_idx];
3570 skb_size = RX_JUMBO_PKT_BUF_SZ;
3571 break;
3572
3573 default:
3574 return -EINVAL;
3575 };
3576
3577 /* Do not overwrite any of the map or rp information
3578 * until we are sure we can commit to a new buffer.
3579 *
3580 * Callers depend upon this behavior and assume that
3581 * we leave everything unchanged if we fail.
3582 */
a20e9c62 3583 skb = netdev_alloc_skb(tp->dev, skb_size);
1da177e4
LT
3584 if (skb == NULL)
3585 return -ENOMEM;
3586
1da177e4
LT
3587 skb_reserve(skb, tp->rx_offset);
3588
3589 mapping = pci_map_single(tp->pdev, skb->data,
3590 skb_size - tp->rx_offset,
3591 PCI_DMA_FROMDEVICE);
3592
3593 map->skb = skb;
3594 pci_unmap_addr_set(map, mapping, mapping);
3595
3596 if (src_map != NULL)
3597 src_map->skb = NULL;
3598
3599 desc->addr_hi = ((u64)mapping >> 32);
3600 desc->addr_lo = ((u64)mapping & 0xffffffff);
3601
3602 return skb_size;
3603}
3604
3605/* We only need to move over in the address because the other
3606 * members of the RX descriptor are invariant. See notes above
3607 * tg3_alloc_rx_skb for full details.
3608 */
3609static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
3610 int src_idx, u32 dest_idx_unmasked)
3611{
3612 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
3613 struct ring_info *src_map, *dest_map;
3614 int dest_idx;
3615
3616 switch (opaque_key) {
3617 case RXD_OPAQUE_RING_STD:
3618 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3619 dest_desc = &tp->rx_std[dest_idx];
3620 dest_map = &tp->rx_std_buffers[dest_idx];
3621 src_desc = &tp->rx_std[src_idx];
3622 src_map = &tp->rx_std_buffers[src_idx];
3623 break;
3624
3625 case RXD_OPAQUE_RING_JUMBO:
3626 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3627 dest_desc = &tp->rx_jumbo[dest_idx];
3628 dest_map = &tp->rx_jumbo_buffers[dest_idx];
3629 src_desc = &tp->rx_jumbo[src_idx];
3630 src_map = &tp->rx_jumbo_buffers[src_idx];
3631 break;
3632
3633 default:
3634 return;
3635 };
3636
3637 dest_map->skb = src_map->skb;
3638 pci_unmap_addr_set(dest_map, mapping,
3639 pci_unmap_addr(src_map, mapping));
3640 dest_desc->addr_hi = src_desc->addr_hi;
3641 dest_desc->addr_lo = src_desc->addr_lo;
3642
3643 src_map->skb = NULL;
3644}
3645
3646#if TG3_VLAN_TAG_USED
3647static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
3648{
3649 return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
3650}
3651#endif
3652
3653/* The RX ring scheme is composed of multiple rings which post fresh
3654 * buffers to the chip, and one special ring the chip uses to report
3655 * status back to the host.
3656 *
3657 * The special ring reports the status of received packets to the
3658 * host. The chip does not write into the original descriptor the
3659 * RX buffer was obtained from. The chip simply takes the original
3660 * descriptor as provided by the host, updates the status and length
3661 * field, then writes this into the next status ring entry.
3662 *
3663 * Each ring the host uses to post buffers to the chip is described
3664 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
3665 * it is first placed into the on-chip ram. When the packet's length
3666 * is known, it walks down the TG3_BDINFO entries to select the ring.
3667 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
3668 * which is within the range of the new packet's length is chosen.
3669 *
3670 * The "separate ring for rx status" scheme may sound queer, but it makes
3671 * sense from a cache coherency perspective. If only the host writes
3672 * to the buffer post rings, and only the chip writes to the rx status
3673 * rings, then cache lines never move beyond shared-modified state.
3674 * If both the host and chip were to write into the same ring, cache line
3675 * eviction could occur since both entities want it in an exclusive state.
3676 */
3677static int tg3_rx(struct tg3 *tp, int budget)
3678{
f92905de 3679 u32 work_mask, rx_std_posted = 0;
483ba50b
MC
3680 u32 sw_idx = tp->rx_rcb_ptr;
3681 u16 hw_idx;
1da177e4
LT
3682 int received;
3683
3684 hw_idx = tp->hw_status->idx[0].rx_producer;
3685 /*
3686 * We need to order the read of hw_idx and the read of
3687 * the opaque cookie.
3688 */
3689 rmb();
1da177e4
LT
3690 work_mask = 0;
3691 received = 0;
3692 while (sw_idx != hw_idx && budget > 0) {
3693 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
3694 unsigned int len;
3695 struct sk_buff *skb;
3696 dma_addr_t dma_addr;
3697 u32 opaque_key, desc_idx, *post_ptr;
3698
3699 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
3700 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
3701 if (opaque_key == RXD_OPAQUE_RING_STD) {
3702 dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
3703 mapping);
3704 skb = tp->rx_std_buffers[desc_idx].skb;
3705 post_ptr = &tp->rx_std_ptr;
f92905de 3706 rx_std_posted++;
1da177e4
LT
3707 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
3708 dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
3709 mapping);
3710 skb = tp->rx_jumbo_buffers[desc_idx].skb;
3711 post_ptr = &tp->rx_jumbo_ptr;
3712 }
3713 else {
3714 goto next_pkt_nopost;
3715 }
3716
3717 work_mask |= opaque_key;
3718
3719 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
3720 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
3721 drop_it:
3722 tg3_recycle_rx(tp, opaque_key,
3723 desc_idx, *post_ptr);
3724 drop_it_no_recycle:
3725 /* Other statistics kept track of by card. */
3726 tp->net_stats.rx_dropped++;
3727 goto next_pkt;
3728 }
3729
3730 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
3731
6aa20a22 3732 if (len > RX_COPY_THRESHOLD
1da177e4
LT
3733 && tp->rx_offset == 2
3734 /* rx_offset != 2 iff this is a 5701 card running
3735 * in PCI-X mode [see tg3_get_invariants()] */
3736 ) {
3737 int skb_size;
3738
3739 skb_size = tg3_alloc_rx_skb(tp, opaque_key,
3740 desc_idx, *post_ptr);
3741 if (skb_size < 0)
3742 goto drop_it;
3743
3744 pci_unmap_single(tp->pdev, dma_addr,
3745 skb_size - tp->rx_offset,
3746 PCI_DMA_FROMDEVICE);
3747
3748 skb_put(skb, len);
3749 } else {
3750 struct sk_buff *copy_skb;
3751
3752 tg3_recycle_rx(tp, opaque_key,
3753 desc_idx, *post_ptr);
3754
a20e9c62 3755 copy_skb = netdev_alloc_skb(tp->dev, len + 2);
1da177e4
LT
3756 if (copy_skb == NULL)
3757 goto drop_it_no_recycle;
3758
1da177e4
LT
3759 skb_reserve(copy_skb, 2);
3760 skb_put(copy_skb, len);
3761 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
d626f62b 3762 skb_copy_from_linear_data(skb, copy_skb->data, len);
1da177e4
LT
3763 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3764
3765 /* We'll reuse the original ring buffer. */
3766 skb = copy_skb;
3767 }
3768
3769 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
3770 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
3771 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
3772 >> RXD_TCPCSUM_SHIFT) == 0xffff))
3773 skb->ip_summed = CHECKSUM_UNNECESSARY;
3774 else
3775 skb->ip_summed = CHECKSUM_NONE;
3776
3777 skb->protocol = eth_type_trans(skb, tp->dev);
3778#if TG3_VLAN_TAG_USED
3779 if (tp->vlgrp != NULL &&
3780 desc->type_flags & RXD_FLAG_VLAN) {
3781 tg3_vlan_rx(tp, skb,
3782 desc->err_vlan & RXD_VLAN_MASK);
3783 } else
3784#endif
3785 netif_receive_skb(skb);
3786
3787 tp->dev->last_rx = jiffies;
3788 received++;
3789 budget--;
3790
3791next_pkt:
3792 (*post_ptr)++;
f92905de
MC
3793
3794 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
3795 u32 idx = *post_ptr % TG3_RX_RING_SIZE;
3796
3797 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
3798 TG3_64BIT_REG_LOW, idx);
3799 work_mask &= ~RXD_OPAQUE_RING_STD;
3800 rx_std_posted = 0;
3801 }
1da177e4 3802next_pkt_nopost:
483ba50b 3803 sw_idx++;
6b31a515 3804 sw_idx &= (TG3_RX_RCB_RING_SIZE(tp) - 1);
52f6d697
MC
3805
3806 /* Refresh hw_idx to see if there is new work */
3807 if (sw_idx == hw_idx) {
3808 hw_idx = tp->hw_status->idx[0].rx_producer;
3809 rmb();
3810 }
1da177e4
LT
3811 }
3812
3813 /* ACK the status ring. */
483ba50b
MC
3814 tp->rx_rcb_ptr = sw_idx;
3815 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
1da177e4
LT
3816
3817 /* Refill RX ring(s). */
3818 if (work_mask & RXD_OPAQUE_RING_STD) {
3819 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
3820 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
3821 sw_idx);
3822 }
3823 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
3824 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
3825 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
3826 sw_idx);
3827 }
3828 mmiowb();
3829
3830 return received;
3831}
3832
6f535763 3833static int tg3_poll_work(struct tg3 *tp, int work_done, int budget)
1da177e4 3834{
1da177e4 3835 struct tg3_hw_status *sblk = tp->hw_status;
1da177e4 3836
1da177e4
LT
3837 /* handle link change and other phy events */
3838 if (!(tp->tg3_flags &
3839 (TG3_FLAG_USE_LINKCHG_REG |
3840 TG3_FLAG_POLL_SERDES))) {
3841 if (sblk->status & SD_STATUS_LINK_CHG) {
3842 sblk->status = SD_STATUS_UPDATED |
3843 (sblk->status & ~SD_STATUS_LINK_CHG);
f47c11ee 3844 spin_lock(&tp->lock);
1da177e4 3845 tg3_setup_phy(tp, 0);
f47c11ee 3846 spin_unlock(&tp->lock);
1da177e4
LT
3847 }
3848 }
3849
3850 /* run TX completion thread */
3851 if (sblk->idx[0].tx_consumer != tp->tx_cons) {
1da177e4 3852 tg3_tx(tp);
6f535763 3853 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
4fd7ab59 3854 return work_done;
1da177e4
LT
3855 }
3856
1da177e4
LT
3857 /* run RX thread, within the bounds set by NAPI.
3858 * All RX "locking" is done by ensuring outside
bea3348e 3859 * code synchronizes with tg3->napi.poll()
1da177e4 3860 */
bea3348e 3861 if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
6f535763 3862 work_done += tg3_rx(tp, budget - work_done);
1da177e4 3863
6f535763
DM
3864 return work_done;
3865}
3866
3867static int tg3_poll(struct napi_struct *napi, int budget)
3868{
3869 struct tg3 *tp = container_of(napi, struct tg3, napi);
3870 int work_done = 0;
4fd7ab59 3871 struct tg3_hw_status *sblk = tp->hw_status;
6f535763
DM
3872
3873 while (1) {
3874 work_done = tg3_poll_work(tp, work_done, budget);
3875
3876 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
3877 goto tx_recovery;
3878
3879 if (unlikely(work_done >= budget))
3880 break;
3881
4fd7ab59
MC
3882 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
3883 /* tp->last_tag is used in tg3_restart_ints() below
3884 * to tell the hw how much work has been processed,
3885 * so we must read it before checking for more work.
3886 */
3887 tp->last_tag = sblk->status_tag;
3888 rmb();
3889 } else
3890 sblk->status &= ~SD_STATUS_UPDATED;
6f535763 3891
4fd7ab59 3892 if (likely(!tg3_has_work(tp))) {
6f535763
DM
3893 netif_rx_complete(tp->dev, napi);
3894 tg3_restart_ints(tp);
3895 break;
3896 }
1da177e4
LT
3897 }
3898
bea3348e 3899 return work_done;
6f535763
DM
3900
3901tx_recovery:
4fd7ab59 3902 /* work_done is guaranteed to be less than budget. */
6f535763
DM
3903 netif_rx_complete(tp->dev, napi);
3904 schedule_work(&tp->reset_task);
4fd7ab59 3905 return work_done;
1da177e4
LT
3906}
3907
f47c11ee
DM
3908static void tg3_irq_quiesce(struct tg3 *tp)
3909{
3910 BUG_ON(tp->irq_sync);
3911
3912 tp->irq_sync = 1;
3913 smp_mb();
3914
3915 synchronize_irq(tp->pdev->irq);
3916}
3917
3918static inline int tg3_irq_sync(struct tg3 *tp)
3919{
3920 return tp->irq_sync;
3921}
3922
3923/* Fully shutdown all tg3 driver activity elsewhere in the system.
3924 * If irq_sync is non-zero, then the IRQ handler must be synchronized
3925 * with as well. Most of the time, this is not necessary except when
3926 * shutting down the device.
3927 */
3928static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
3929{
46966545 3930 spin_lock_bh(&tp->lock);
f47c11ee
DM
3931 if (irq_sync)
3932 tg3_irq_quiesce(tp);
f47c11ee
DM
3933}
3934
3935static inline void tg3_full_unlock(struct tg3 *tp)
3936{
f47c11ee
DM
3937 spin_unlock_bh(&tp->lock);
3938}
3939
fcfa0a32
MC
3940/* One-shot MSI handler - Chip automatically disables interrupt
3941 * after sending MSI so driver doesn't have to do it.
3942 */
7d12e780 3943static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
fcfa0a32
MC
3944{
3945 struct net_device *dev = dev_id;
3946 struct tg3 *tp = netdev_priv(dev);
3947
3948 prefetch(tp->hw_status);
3949 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3950
3951 if (likely(!tg3_irq_sync(tp)))
bea3348e 3952 netif_rx_schedule(dev, &tp->napi);
fcfa0a32
MC
3953
3954 return IRQ_HANDLED;
3955}
3956
88b06bc2
MC
3957/* MSI ISR - No need to check for interrupt sharing and no need to
3958 * flush status block and interrupt mailbox. PCI ordering rules
3959 * guarantee that MSI will arrive after the status block.
3960 */
7d12e780 3961static irqreturn_t tg3_msi(int irq, void *dev_id)
88b06bc2
MC
3962{
3963 struct net_device *dev = dev_id;
3964 struct tg3 *tp = netdev_priv(dev);
88b06bc2 3965
61487480
MC
3966 prefetch(tp->hw_status);
3967 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
88b06bc2 3968 /*
fac9b83e 3969 * Writing any value to intr-mbox-0 clears PCI INTA# and
88b06bc2 3970 * chip-internal interrupt pending events.
fac9b83e 3971 * Writing non-zero to intr-mbox-0 additional tells the
88b06bc2
MC
3972 * NIC to stop sending us irqs, engaging "in-intr-handler"
3973 * event coalescing.
3974 */
3975 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
61487480 3976 if (likely(!tg3_irq_sync(tp)))
bea3348e 3977 netif_rx_schedule(dev, &tp->napi);
61487480 3978
88b06bc2
MC
3979 return IRQ_RETVAL(1);
3980}
3981
7d12e780 3982static irqreturn_t tg3_interrupt(int irq, void *dev_id)
1da177e4
LT
3983{
3984 struct net_device *dev = dev_id;
3985 struct tg3 *tp = netdev_priv(dev);
3986 struct tg3_hw_status *sblk = tp->hw_status;
1da177e4
LT
3987 unsigned int handled = 1;
3988
1da177e4
LT
3989 /* In INTx mode, it is possible for the interrupt to arrive at
3990 * the CPU before the status block posted prior to the interrupt.
3991 * Reading the PCI State register will confirm whether the
3992 * interrupt is ours and will flush the status block.
3993 */
d18edcb2
MC
3994 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
3995 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
3996 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3997 handled = 0;
f47c11ee 3998 goto out;
fac9b83e 3999 }
d18edcb2
MC
4000 }
4001
4002 /*
4003 * Writing any value to intr-mbox-0 clears PCI INTA# and
4004 * chip-internal interrupt pending events.
4005 * Writing non-zero to intr-mbox-0 additional tells the
4006 * NIC to stop sending us irqs, engaging "in-intr-handler"
4007 * event coalescing.
c04cb347
MC
4008 *
4009 * Flush the mailbox to de-assert the IRQ immediately to prevent
4010 * spurious interrupts. The flush impacts performance but
4011 * excessive spurious interrupts can be worse in some cases.
d18edcb2 4012 */
c04cb347 4013 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
d18edcb2
MC
4014 if (tg3_irq_sync(tp))
4015 goto out;
4016 sblk->status &= ~SD_STATUS_UPDATED;
4017 if (likely(tg3_has_work(tp))) {
4018 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
bea3348e 4019 netif_rx_schedule(dev, &tp->napi);
d18edcb2
MC
4020 } else {
4021 /* No work, shared interrupt perhaps? re-enable
4022 * interrupts, and flush that PCI write
4023 */
4024 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
4025 0x00000000);
fac9b83e 4026 }
f47c11ee 4027out:
fac9b83e
DM
4028 return IRQ_RETVAL(handled);
4029}
4030
7d12e780 4031static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
fac9b83e
DM
4032{
4033 struct net_device *dev = dev_id;
4034 struct tg3 *tp = netdev_priv(dev);
4035 struct tg3_hw_status *sblk = tp->hw_status;
fac9b83e
DM
4036 unsigned int handled = 1;
4037
fac9b83e
DM
4038 /* In INTx mode, it is possible for the interrupt to arrive at
4039 * the CPU before the status block posted prior to the interrupt.
4040 * Reading the PCI State register will confirm whether the
4041 * interrupt is ours and will flush the status block.
4042 */
d18edcb2
MC
4043 if (unlikely(sblk->status_tag == tp->last_tag)) {
4044 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
4045 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4046 handled = 0;
f47c11ee 4047 goto out;
1da177e4 4048 }
d18edcb2
MC
4049 }
4050
4051 /*
4052 * writing any value to intr-mbox-0 clears PCI INTA# and
4053 * chip-internal interrupt pending events.
4054 * writing non-zero to intr-mbox-0 additional tells the
4055 * NIC to stop sending us irqs, engaging "in-intr-handler"
4056 * event coalescing.
c04cb347
MC
4057 *
4058 * Flush the mailbox to de-assert the IRQ immediately to prevent
4059 * spurious interrupts. The flush impacts performance but
4060 * excessive spurious interrupts can be worse in some cases.
d18edcb2 4061 */
c04cb347 4062 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
d18edcb2
MC
4063 if (tg3_irq_sync(tp))
4064 goto out;
bea3348e 4065 if (netif_rx_schedule_prep(dev, &tp->napi)) {
d18edcb2
MC
4066 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4067 /* Update last_tag to mark that this status has been
4068 * seen. Because interrupt may be shared, we may be
4069 * racing with tg3_poll(), so only update last_tag
4070 * if tg3_poll() is not scheduled.
4071 */
4072 tp->last_tag = sblk->status_tag;
bea3348e 4073 __netif_rx_schedule(dev, &tp->napi);
1da177e4 4074 }
f47c11ee 4075out:
1da177e4
LT
4076 return IRQ_RETVAL(handled);
4077}
4078
7938109f 4079/* ISR for interrupt test */
7d12e780 4080static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7938109f
MC
4081{
4082 struct net_device *dev = dev_id;
4083 struct tg3 *tp = netdev_priv(dev);
4084 struct tg3_hw_status *sblk = tp->hw_status;
4085
f9804ddb
MC
4086 if ((sblk->status & SD_STATUS_UPDATED) ||
4087 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
b16250e3 4088 tg3_disable_ints(tp);
7938109f
MC
4089 return IRQ_RETVAL(1);
4090 }
4091 return IRQ_RETVAL(0);
4092}
4093
8e7a22e3 4094static int tg3_init_hw(struct tg3 *, int);
944d980e 4095static int tg3_halt(struct tg3 *, int, int);
1da177e4 4096
b9ec6c1b
MC
4097/* Restart hardware after configuration changes, self-test, etc.
4098 * Invoked with tp->lock held.
4099 */
4100static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
78c6146f
ED
4101 __releases(tp->lock)
4102 __acquires(tp->lock)
b9ec6c1b
MC
4103{
4104 int err;
4105
4106 err = tg3_init_hw(tp, reset_phy);
4107 if (err) {
4108 printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
4109 "aborting.\n", tp->dev->name);
4110 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4111 tg3_full_unlock(tp);
4112 del_timer_sync(&tp->timer);
4113 tp->irq_sync = 0;
bea3348e 4114 napi_enable(&tp->napi);
b9ec6c1b
MC
4115 dev_close(tp->dev);
4116 tg3_full_lock(tp, 0);
4117 }
4118 return err;
4119}
4120
1da177e4
LT
4121#ifdef CONFIG_NET_POLL_CONTROLLER
4122static void tg3_poll_controller(struct net_device *dev)
4123{
88b06bc2
MC
4124 struct tg3 *tp = netdev_priv(dev);
4125
7d12e780 4126 tg3_interrupt(tp->pdev->irq, dev);
1da177e4
LT
4127}
4128#endif
4129
c4028958 4130static void tg3_reset_task(struct work_struct *work)
1da177e4 4131{
c4028958 4132 struct tg3 *tp = container_of(work, struct tg3, reset_task);
1da177e4
LT
4133 unsigned int restart_timer;
4134
7faa006f 4135 tg3_full_lock(tp, 0);
7faa006f
MC
4136
4137 if (!netif_running(tp->dev)) {
7faa006f
MC
4138 tg3_full_unlock(tp);
4139 return;
4140 }
4141
4142 tg3_full_unlock(tp);
4143
1da177e4
LT
4144 tg3_netif_stop(tp);
4145
f47c11ee 4146 tg3_full_lock(tp, 1);
1da177e4
LT
4147
4148 restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
4149 tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
4150
df3e6548
MC
4151 if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
4152 tp->write32_tx_mbox = tg3_write32_tx_mbox;
4153 tp->write32_rx_mbox = tg3_write_flush_reg32;
4154 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
4155 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
4156 }
4157
944d980e 4158 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
b9ec6c1b
MC
4159 if (tg3_init_hw(tp, 1))
4160 goto out;
1da177e4
LT
4161
4162 tg3_netif_start(tp);
4163
1da177e4
LT
4164 if (restart_timer)
4165 mod_timer(&tp->timer, jiffies + 1);
7faa006f 4166
b9ec6c1b 4167out:
7faa006f 4168 tg3_full_unlock(tp);
1da177e4
LT
4169}
4170
b0408751
MC
4171static void tg3_dump_short_state(struct tg3 *tp)
4172{
4173 printk(KERN_ERR PFX "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n",
4174 tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS));
4175 printk(KERN_ERR PFX "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n",
4176 tr32(RDMAC_STATUS), tr32(WDMAC_STATUS));
4177}
4178
1da177e4
LT
4179static void tg3_tx_timeout(struct net_device *dev)
4180{
4181 struct tg3 *tp = netdev_priv(dev);
4182
b0408751 4183 if (netif_msg_tx_err(tp)) {
9f88f29f
MC
4184 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
4185 dev->name);
b0408751
MC
4186 tg3_dump_short_state(tp);
4187 }
1da177e4
LT
4188
4189 schedule_work(&tp->reset_task);
4190}
4191
c58ec932
MC
4192/* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
4193static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
4194{
4195 u32 base = (u32) mapping & 0xffffffff;
4196
4197 return ((base > 0xffffdcc0) &&
4198 (base + len + 8 < base));
4199}
4200
72f2afb8
MC
4201/* Test for DMA addresses > 40-bit */
4202static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
4203 int len)
4204{
4205#if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
6728a8e2 4206 if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
72f2afb8
MC
4207 return (((u64) mapping + len) > DMA_40BIT_MASK);
4208 return 0;
4209#else
4210 return 0;
4211#endif
4212}
4213
1da177e4
LT
4214static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
4215
72f2afb8
MC
4216/* Workaround 4GB and 40-bit hardware DMA bugs. */
4217static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
c58ec932
MC
4218 u32 last_plus_one, u32 *start,
4219 u32 base_flags, u32 mss)
1da177e4 4220{
41588ba1 4221 struct sk_buff *new_skb;
c58ec932 4222 dma_addr_t new_addr = 0;
1da177e4 4223 u32 entry = *start;
c58ec932 4224 int i, ret = 0;
1da177e4 4225
41588ba1
MC
4226 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
4227 new_skb = skb_copy(skb, GFP_ATOMIC);
4228 else {
4229 int more_headroom = 4 - ((unsigned long)skb->data & 3);
4230
4231 new_skb = skb_copy_expand(skb,
4232 skb_headroom(skb) + more_headroom,
4233 skb_tailroom(skb), GFP_ATOMIC);
4234 }
4235
1da177e4 4236 if (!new_skb) {
c58ec932
MC
4237 ret = -1;
4238 } else {
4239 /* New SKB is guaranteed to be linear. */
4240 entry = *start;
4241 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
4242 PCI_DMA_TODEVICE);
4243 /* Make sure new skb does not cross any 4G boundaries.
4244 * Drop the packet if it does.
4245 */
4246 if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
4247 ret = -1;
4248 dev_kfree_skb(new_skb);
4249 new_skb = NULL;
4250 } else {
4251 tg3_set_txd(tp, entry, new_addr, new_skb->len,
4252 base_flags, 1 | (mss << 1));
4253 *start = NEXT_TX(entry);
4254 }
1da177e4
LT
4255 }
4256
1da177e4
LT
4257 /* Now clean up the sw ring entries. */
4258 i = 0;
4259 while (entry != last_plus_one) {
4260 int len;
4261
4262 if (i == 0)
4263 len = skb_headlen(skb);
4264 else
4265 len = skb_shinfo(skb)->frags[i-1].size;
4266 pci_unmap_single(tp->pdev,
4267 pci_unmap_addr(&tp->tx_buffers[entry], mapping),
4268 len, PCI_DMA_TODEVICE);
4269 if (i == 0) {
4270 tp->tx_buffers[entry].skb = new_skb;
4271 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
4272 } else {
4273 tp->tx_buffers[entry].skb = NULL;
4274 }
4275 entry = NEXT_TX(entry);
4276 i++;
4277 }
4278
4279 dev_kfree_skb(skb);
4280
c58ec932 4281 return ret;
1da177e4
LT
4282}
4283
4284static void tg3_set_txd(struct tg3 *tp, int entry,
4285 dma_addr_t mapping, int len, u32 flags,
4286 u32 mss_and_is_end)
4287{
4288 struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
4289 int is_end = (mss_and_is_end & 0x1);
4290 u32 mss = (mss_and_is_end >> 1);
4291 u32 vlan_tag = 0;
4292
4293 if (is_end)
4294 flags |= TXD_FLAG_END;
4295 if (flags & TXD_FLAG_VLAN) {
4296 vlan_tag = flags >> 16;
4297 flags &= 0xffff;
4298 }
4299 vlan_tag |= (mss << TXD_MSS_SHIFT);
4300
4301 txd->addr_hi = ((u64) mapping >> 32);
4302 txd->addr_lo = ((u64) mapping & 0xffffffff);
4303 txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
4304 txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
4305}
4306
5a6f3074
MC
4307/* hard_start_xmit for devices that don't have any bugs and
4308 * support TG3_FLG2_HW_TSO_2 only.
4309 */
1da177e4 4310static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
5a6f3074
MC
4311{
4312 struct tg3 *tp = netdev_priv(dev);
4313 dma_addr_t mapping;
4314 u32 len, entry, base_flags, mss;
4315
4316 len = skb_headlen(skb);
4317
00b70504 4318 /* We are running in BH disabled context with netif_tx_lock
bea3348e 4319 * and TX reclaim runs via tp->napi.poll inside of a software
5a6f3074
MC
4320 * interrupt. Furthermore, IRQ processing runs lockless so we have
4321 * no IRQ context deadlocks to worry about either. Rejoice!
4322 */
1b2a7205 4323 if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
5a6f3074
MC
4324 if (!netif_queue_stopped(dev)) {
4325 netif_stop_queue(dev);
4326
4327 /* This is a hard error, log it. */
4328 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4329 "queue awake!\n", dev->name);
4330 }
5a6f3074
MC
4331 return NETDEV_TX_BUSY;
4332 }
4333
4334 entry = tp->tx_prod;
4335 base_flags = 0;
5a6f3074 4336 mss = 0;
c13e3713 4337 if ((mss = skb_shinfo(skb)->gso_size) != 0) {
5a6f3074
MC
4338 int tcp_opt_len, ip_tcp_len;
4339
4340 if (skb_header_cloned(skb) &&
4341 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4342 dev_kfree_skb(skb);
4343 goto out_unlock;
4344 }
4345
b0026624
MC
4346 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
4347 mss |= (skb_headlen(skb) - ETH_HLEN) << 9;
4348 else {
eddc9ec5
ACM
4349 struct iphdr *iph = ip_hdr(skb);
4350
ab6a5bb6 4351 tcp_opt_len = tcp_optlen(skb);
c9bdd4b5 4352 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
b0026624 4353
eddc9ec5
ACM
4354 iph->check = 0;
4355 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
b0026624
MC
4356 mss |= (ip_tcp_len + tcp_opt_len) << 9;
4357 }
5a6f3074
MC
4358
4359 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4360 TXD_FLAG_CPU_POST_DMA);
4361
aa8223c7 4362 tcp_hdr(skb)->check = 0;
5a6f3074 4363
5a6f3074 4364 }
84fa7933 4365 else if (skb->ip_summed == CHECKSUM_PARTIAL)
5a6f3074 4366 base_flags |= TXD_FLAG_TCPUDP_CSUM;
5a6f3074
MC
4367#if TG3_VLAN_TAG_USED
4368 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4369 base_flags |= (TXD_FLAG_VLAN |
4370 (vlan_tx_tag_get(skb) << 16));
4371#endif
4372
4373 /* Queue skb data, a.k.a. the main skb fragment. */
4374 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4375
4376 tp->tx_buffers[entry].skb = skb;
4377 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4378
4379 tg3_set_txd(tp, entry, mapping, len, base_flags,
4380 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4381
4382 entry = NEXT_TX(entry);
4383
4384 /* Now loop through additional data fragments, and queue them. */
4385 if (skb_shinfo(skb)->nr_frags > 0) {
4386 unsigned int i, last;
4387
4388 last = skb_shinfo(skb)->nr_frags - 1;
4389 for (i = 0; i <= last; i++) {
4390 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4391
4392 len = frag->size;
4393 mapping = pci_map_page(tp->pdev,
4394 frag->page,
4395 frag->page_offset,
4396 len, PCI_DMA_TODEVICE);
4397
4398 tp->tx_buffers[entry].skb = NULL;
4399 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4400
4401 tg3_set_txd(tp, entry, mapping, len,
4402 base_flags, (i == last) | (mss << 1));
4403
4404 entry = NEXT_TX(entry);
4405 }
4406 }
4407
4408 /* Packets are ready, update Tx producer idx local and on card. */
4409 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4410
4411 tp->tx_prod = entry;
1b2a7205 4412 if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
5a6f3074 4413 netif_stop_queue(dev);
42952231 4414 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
5a6f3074
MC
4415 netif_wake_queue(tp->dev);
4416 }
4417
4418out_unlock:
4419 mmiowb();
5a6f3074
MC
4420
4421 dev->trans_start = jiffies;
4422
4423 return NETDEV_TX_OK;
4424}
4425
52c0fd83
MC
4426static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
4427
4428/* Use GSO to workaround a rare TSO bug that may be triggered when the
4429 * TSO header is greater than 80 bytes.
4430 */
4431static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
4432{
4433 struct sk_buff *segs, *nskb;
4434
4435 /* Estimate the number of fragments in the worst case */
1b2a7205 4436 if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) {
52c0fd83 4437 netif_stop_queue(tp->dev);
7f62ad5d
MC
4438 if (tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))
4439 return NETDEV_TX_BUSY;
4440
4441 netif_wake_queue(tp->dev);
52c0fd83
MC
4442 }
4443
4444 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
801678c5 4445 if (IS_ERR(segs))
52c0fd83
MC
4446 goto tg3_tso_bug_end;
4447
4448 do {
4449 nskb = segs;
4450 segs = segs->next;
4451 nskb->next = NULL;
4452 tg3_start_xmit_dma_bug(nskb, tp->dev);
4453 } while (segs);
4454
4455tg3_tso_bug_end:
4456 dev_kfree_skb(skb);
4457
4458 return NETDEV_TX_OK;
4459}
52c0fd83 4460
5a6f3074
MC
4461/* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
4462 * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
4463 */
4464static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
1da177e4
LT
4465{
4466 struct tg3 *tp = netdev_priv(dev);
4467 dma_addr_t mapping;
1da177e4
LT
4468 u32 len, entry, base_flags, mss;
4469 int would_hit_hwbug;
1da177e4
LT
4470
4471 len = skb_headlen(skb);
4472
00b70504 4473 /* We are running in BH disabled context with netif_tx_lock
bea3348e 4474 * and TX reclaim runs via tp->napi.poll inside of a software
f47c11ee
DM
4475 * interrupt. Furthermore, IRQ processing runs lockless so we have
4476 * no IRQ context deadlocks to worry about either. Rejoice!
1da177e4 4477 */
1b2a7205 4478 if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
1f064a87
SH
4479 if (!netif_queue_stopped(dev)) {
4480 netif_stop_queue(dev);
4481
4482 /* This is a hard error, log it. */
4483 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4484 "queue awake!\n", dev->name);
4485 }
1da177e4
LT
4486 return NETDEV_TX_BUSY;
4487 }
4488
4489 entry = tp->tx_prod;
4490 base_flags = 0;
84fa7933 4491 if (skb->ip_summed == CHECKSUM_PARTIAL)
1da177e4 4492 base_flags |= TXD_FLAG_TCPUDP_CSUM;
1da177e4 4493 mss = 0;
c13e3713 4494 if ((mss = skb_shinfo(skb)->gso_size) != 0) {
eddc9ec5 4495 struct iphdr *iph;
52c0fd83 4496 int tcp_opt_len, ip_tcp_len, hdr_len;
1da177e4
LT
4497
4498 if (skb_header_cloned(skb) &&
4499 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4500 dev_kfree_skb(skb);
4501 goto out_unlock;
4502 }
4503
ab6a5bb6 4504 tcp_opt_len = tcp_optlen(skb);
c9bdd4b5 4505 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
1da177e4 4506
52c0fd83
MC
4507 hdr_len = ip_tcp_len + tcp_opt_len;
4508 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7f62ad5d 4509 (tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
52c0fd83
MC
4510 return (tg3_tso_bug(tp, skb));
4511
1da177e4
LT
4512 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4513 TXD_FLAG_CPU_POST_DMA);
4514
eddc9ec5
ACM
4515 iph = ip_hdr(skb);
4516 iph->check = 0;
4517 iph->tot_len = htons(mss + hdr_len);
1da177e4 4518 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
aa8223c7 4519 tcp_hdr(skb)->check = 0;
1da177e4 4520 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
aa8223c7
ACM
4521 } else
4522 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
4523 iph->daddr, 0,
4524 IPPROTO_TCP,
4525 0);
1da177e4
LT
4526
4527 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
4528 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
eddc9ec5 4529 if (tcp_opt_len || iph->ihl > 5) {
1da177e4
LT
4530 int tsflags;
4531
eddc9ec5 4532 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
1da177e4
LT
4533 mss |= (tsflags << 11);
4534 }
4535 } else {
eddc9ec5 4536 if (tcp_opt_len || iph->ihl > 5) {
1da177e4
LT
4537 int tsflags;
4538
eddc9ec5 4539 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
1da177e4
LT
4540 base_flags |= tsflags << 12;
4541 }
4542 }
4543 }
1da177e4
LT
4544#if TG3_VLAN_TAG_USED
4545 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4546 base_flags |= (TXD_FLAG_VLAN |
4547 (vlan_tx_tag_get(skb) << 16));
4548#endif
4549
4550 /* Queue skb data, a.k.a. the main skb fragment. */
4551 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4552
4553 tp->tx_buffers[entry].skb = skb;
4554 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4555
4556 would_hit_hwbug = 0;
4557
41588ba1
MC
4558 if (tp->tg3_flags3 & TG3_FLG3_5701_DMA_BUG)
4559 would_hit_hwbug = 1;
4560 else if (tg3_4g_overflow_test(mapping, len))
c58ec932 4561 would_hit_hwbug = 1;
1da177e4
LT
4562
4563 tg3_set_txd(tp, entry, mapping, len, base_flags,
4564 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4565
4566 entry = NEXT_TX(entry);
4567
4568 /* Now loop through additional data fragments, and queue them. */
4569 if (skb_shinfo(skb)->nr_frags > 0) {
4570 unsigned int i, last;
4571
4572 last = skb_shinfo(skb)->nr_frags - 1;
4573 for (i = 0; i <= last; i++) {
4574 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4575
4576 len = frag->size;
4577 mapping = pci_map_page(tp->pdev,
4578 frag->page,
4579 frag->page_offset,
4580 len, PCI_DMA_TODEVICE);
4581
4582 tp->tx_buffers[entry].skb = NULL;
4583 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4584
c58ec932
MC
4585 if (tg3_4g_overflow_test(mapping, len))
4586 would_hit_hwbug = 1;
1da177e4 4587
72f2afb8
MC
4588 if (tg3_40bit_overflow_test(tp, mapping, len))
4589 would_hit_hwbug = 1;
4590
1da177e4
LT
4591 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
4592 tg3_set_txd(tp, entry, mapping, len,
4593 base_flags, (i == last)|(mss << 1));
4594 else
4595 tg3_set_txd(tp, entry, mapping, len,
4596 base_flags, (i == last));
4597
4598 entry = NEXT_TX(entry);
4599 }
4600 }
4601
4602 if (would_hit_hwbug) {
4603 u32 last_plus_one = entry;
4604 u32 start;
1da177e4 4605
c58ec932
MC
4606 start = entry - 1 - skb_shinfo(skb)->nr_frags;
4607 start &= (TG3_TX_RING_SIZE - 1);
1da177e4
LT
4608
4609 /* If the workaround fails due to memory/mapping
4610 * failure, silently drop this packet.
4611 */
72f2afb8 4612 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
c58ec932 4613 &start, base_flags, mss))
1da177e4
LT
4614 goto out_unlock;
4615
4616 entry = start;
4617 }
4618
4619 /* Packets are ready, update Tx producer idx local and on card. */
4620 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4621
4622 tp->tx_prod = entry;
1b2a7205 4623 if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
1da177e4 4624 netif_stop_queue(dev);
42952231 4625 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
51b91468
MC
4626 netif_wake_queue(tp->dev);
4627 }
1da177e4
LT
4628
4629out_unlock:
4630 mmiowb();
1da177e4
LT
4631
4632 dev->trans_start = jiffies;
4633
4634 return NETDEV_TX_OK;
4635}
4636
4637static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
4638 int new_mtu)
4639{
4640 dev->mtu = new_mtu;
4641
ef7f5ec0 4642 if (new_mtu > ETH_DATA_LEN) {
a4e2b347 4643 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
ef7f5ec0
MC
4644 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
4645 ethtool_op_set_tso(dev, 0);
4646 }
4647 else
4648 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
4649 } else {
a4e2b347 4650 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
ef7f5ec0 4651 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
0f893dc6 4652 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
ef7f5ec0 4653 }
1da177e4
LT
4654}
4655
4656static int tg3_change_mtu(struct net_device *dev, int new_mtu)
4657{
4658 struct tg3 *tp = netdev_priv(dev);
b9ec6c1b 4659 int err;
1da177e4
LT
4660
4661 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
4662 return -EINVAL;
4663
4664 if (!netif_running(dev)) {
4665 /* We'll just catch it later when the
4666 * device is up'd.
4667 */
4668 tg3_set_mtu(dev, tp, new_mtu);
4669 return 0;
4670 }
4671
4672 tg3_netif_stop(tp);
f47c11ee
DM
4673
4674 tg3_full_lock(tp, 1);
1da177e4 4675
944d980e 4676 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
1da177e4
LT
4677
4678 tg3_set_mtu(dev, tp, new_mtu);
4679
b9ec6c1b 4680 err = tg3_restart_hw(tp, 0);
1da177e4 4681
b9ec6c1b
MC
4682 if (!err)
4683 tg3_netif_start(tp);
1da177e4 4684
f47c11ee 4685 tg3_full_unlock(tp);
1da177e4 4686
b9ec6c1b 4687 return err;
1da177e4
LT
4688}
4689
4690/* Free up pending packets in all rx/tx rings.
4691 *
4692 * The chip has been shut down and the driver detached from
4693 * the networking, so no interrupts or new tx packets will
4694 * end up in the driver. tp->{tx,}lock is not held and we are not
4695 * in an interrupt context and thus may sleep.
4696 */
4697static void tg3_free_rings(struct tg3 *tp)
4698{
4699 struct ring_info *rxp;
4700 int i;
4701
4702 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4703 rxp = &tp->rx_std_buffers[i];
4704
4705 if (rxp->skb == NULL)
4706 continue;
4707 pci_unmap_single(tp->pdev,
4708 pci_unmap_addr(rxp, mapping),
7e72aad4 4709 tp->rx_pkt_buf_sz - tp->rx_offset,
1da177e4
LT
4710 PCI_DMA_FROMDEVICE);
4711 dev_kfree_skb_any(rxp->skb);
4712 rxp->skb = NULL;
4713 }
4714
4715 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4716 rxp = &tp->rx_jumbo_buffers[i];
4717
4718 if (rxp->skb == NULL)
4719 continue;
4720 pci_unmap_single(tp->pdev,
4721 pci_unmap_addr(rxp, mapping),
4722 RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
4723 PCI_DMA_FROMDEVICE);
4724 dev_kfree_skb_any(rxp->skb);
4725 rxp->skb = NULL;
4726 }
4727
4728 for (i = 0; i < TG3_TX_RING_SIZE; ) {
4729 struct tx_ring_info *txp;
4730 struct sk_buff *skb;
4731 int j;
4732
4733 txp = &tp->tx_buffers[i];
4734 skb = txp->skb;
4735
4736 if (skb == NULL) {
4737 i++;
4738 continue;
4739 }
4740
4741 pci_unmap_single(tp->pdev,
4742 pci_unmap_addr(txp, mapping),
4743 skb_headlen(skb),
4744 PCI_DMA_TODEVICE);
4745 txp->skb = NULL;
4746
4747 i++;
4748
4749 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
4750 txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
4751 pci_unmap_page(tp->pdev,
4752 pci_unmap_addr(txp, mapping),
4753 skb_shinfo(skb)->frags[j].size,
4754 PCI_DMA_TODEVICE);
4755 i++;
4756 }
4757
4758 dev_kfree_skb_any(skb);
4759 }
4760}
4761
4762/* Initialize tx/rx rings for packet processing.
4763 *
4764 * The chip has been shut down and the driver detached from
4765 * the networking, so no interrupts or new tx packets will
4766 * end up in the driver. tp->{tx,}lock are held and thus
4767 * we may not sleep.
4768 */
32d8c572 4769static int tg3_init_rings(struct tg3 *tp)
1da177e4
LT
4770{
4771 u32 i;
4772
4773 /* Free up all the SKBs. */
4774 tg3_free_rings(tp);
4775
4776 /* Zero out all descriptors. */
4777 memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
4778 memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
4779 memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
4780 memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
4781
7e72aad4 4782 tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
a4e2b347 4783 if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
7e72aad4
MC
4784 (tp->dev->mtu > ETH_DATA_LEN))
4785 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
4786
1da177e4
LT
4787 /* Initialize invariants of the rings, we only set this
4788 * stuff once. This works because the card does not
4789 * write into the rx buffer posting rings.
4790 */
4791 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4792 struct tg3_rx_buffer_desc *rxd;
4793
4794 rxd = &tp->rx_std[i];
7e72aad4 4795 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
1da177e4
LT
4796 << RXD_LEN_SHIFT;
4797 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
4798 rxd->opaque = (RXD_OPAQUE_RING_STD |
4799 (i << RXD_OPAQUE_INDEX_SHIFT));
4800 }
4801
0f893dc6 4802 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
1da177e4
LT
4803 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4804 struct tg3_rx_buffer_desc *rxd;
4805
4806 rxd = &tp->rx_jumbo[i];
4807 rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
4808 << RXD_LEN_SHIFT;
4809 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
4810 RXD_FLAG_JUMBO;
4811 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
4812 (i << RXD_OPAQUE_INDEX_SHIFT));
4813 }
4814 }
4815
4816 /* Now allocate fresh SKBs for each rx ring. */
4817 for (i = 0; i < tp->rx_pending; i++) {
32d8c572
MC
4818 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) {
4819 printk(KERN_WARNING PFX
4820 "%s: Using a smaller RX standard ring, "
4821 "only %d out of %d buffers were allocated "
4822 "successfully.\n",
4823 tp->dev->name, i, tp->rx_pending);
4824 if (i == 0)
4825 return -ENOMEM;
4826 tp->rx_pending = i;
1da177e4 4827 break;
32d8c572 4828 }
1da177e4
LT
4829 }
4830
0f893dc6 4831 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
1da177e4
LT
4832 for (i = 0; i < tp->rx_jumbo_pending; i++) {
4833 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
32d8c572
MC
4834 -1, i) < 0) {
4835 printk(KERN_WARNING PFX
4836 "%s: Using a smaller RX jumbo ring, "
4837 "only %d out of %d buffers were "
4838 "allocated successfully.\n",
4839 tp->dev->name, i, tp->rx_jumbo_pending);
4840 if (i == 0) {
4841 tg3_free_rings(tp);
4842 return -ENOMEM;
4843 }
4844 tp->rx_jumbo_pending = i;
1da177e4 4845 break;
32d8c572 4846 }
1da177e4
LT
4847 }
4848 }
32d8c572 4849 return 0;
1da177e4
LT
4850}
4851
4852/*
4853 * Must not be invoked with interrupt sources disabled and
4854 * the hardware shutdown down.
4855 */
4856static void tg3_free_consistent(struct tg3 *tp)
4857{
b4558ea9
JJ
4858 kfree(tp->rx_std_buffers);
4859 tp->rx_std_buffers = NULL;
1da177e4
LT
4860 if (tp->rx_std) {
4861 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
4862 tp->rx_std, tp->rx_std_mapping);
4863 tp->rx_std = NULL;
4864 }
4865 if (tp->rx_jumbo) {
4866 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4867 tp->rx_jumbo, tp->rx_jumbo_mapping);
4868 tp->rx_jumbo = NULL;
4869 }
4870 if (tp->rx_rcb) {
4871 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4872 tp->rx_rcb, tp->rx_rcb_mapping);
4873 tp->rx_rcb = NULL;
4874 }
4875 if (tp->tx_ring) {
4876 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
4877 tp->tx_ring, tp->tx_desc_mapping);
4878 tp->tx_ring = NULL;
4879 }
4880 if (tp->hw_status) {
4881 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
4882 tp->hw_status, tp->status_mapping);
4883 tp->hw_status = NULL;
4884 }
4885 if (tp->hw_stats) {
4886 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
4887 tp->hw_stats, tp->stats_mapping);
4888 tp->hw_stats = NULL;
4889 }
4890}
4891
4892/*
4893 * Must not be invoked with interrupt sources disabled and
4894 * the hardware shutdown down. Can sleep.
4895 */
4896static int tg3_alloc_consistent(struct tg3 *tp)
4897{
bd2b3343 4898 tp->rx_std_buffers = kzalloc((sizeof(struct ring_info) *
1da177e4
LT
4899 (TG3_RX_RING_SIZE +
4900 TG3_RX_JUMBO_RING_SIZE)) +
4901 (sizeof(struct tx_ring_info) *
4902 TG3_TX_RING_SIZE),
4903 GFP_KERNEL);
4904 if (!tp->rx_std_buffers)
4905 return -ENOMEM;
4906
1da177e4
LT
4907 tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
4908 tp->tx_buffers = (struct tx_ring_info *)
4909 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
4910
4911 tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
4912 &tp->rx_std_mapping);
4913 if (!tp->rx_std)
4914 goto err_out;
4915
4916 tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4917 &tp->rx_jumbo_mapping);
4918
4919 if (!tp->rx_jumbo)
4920 goto err_out;
4921
4922 tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4923 &tp->rx_rcb_mapping);
4924 if (!tp->rx_rcb)
4925 goto err_out;
4926
4927 tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
4928 &tp->tx_desc_mapping);
4929 if (!tp->tx_ring)
4930 goto err_out;
4931
4932 tp->hw_status = pci_alloc_consistent(tp->pdev,
4933 TG3_HW_STATUS_SIZE,
4934 &tp->status_mapping);
4935 if (!tp->hw_status)
4936 goto err_out;
4937
4938 tp->hw_stats = pci_alloc_consistent(tp->pdev,
4939 sizeof(struct tg3_hw_stats),
4940 &tp->stats_mapping);
4941 if (!tp->hw_stats)
4942 goto err_out;
4943
4944 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4945 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4946
4947 return 0;
4948
4949err_out:
4950 tg3_free_consistent(tp);
4951 return -ENOMEM;
4952}
4953
4954#define MAX_WAIT_CNT 1000
4955
4956/* To stop a block, clear the enable bit and poll till it
4957 * clears. tp->lock is held.
4958 */
b3b7d6be 4959static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
1da177e4
LT
4960{
4961 unsigned int i;
4962 u32 val;
4963
4964 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
4965 switch (ofs) {
4966 case RCVLSC_MODE:
4967 case DMAC_MODE:
4968 case MBFREE_MODE:
4969 case BUFMGR_MODE:
4970 case MEMARB_MODE:
4971 /* We can't enable/disable these bits of the
4972 * 5705/5750, just say success.
4973 */
4974 return 0;
4975
4976 default:
4977 break;
4978 };
4979 }
4980
4981 val = tr32(ofs);
4982 val &= ~enable_bit;
4983 tw32_f(ofs, val);
4984
4985 for (i = 0; i < MAX_WAIT_CNT; i++) {
4986 udelay(100);
4987 val = tr32(ofs);
4988 if ((val & enable_bit) == 0)
4989 break;
4990 }
4991
b3b7d6be 4992 if (i == MAX_WAIT_CNT && !silent) {
1da177e4
LT
4993 printk(KERN_ERR PFX "tg3_stop_block timed out, "
4994 "ofs=%lx enable_bit=%x\n",
4995 ofs, enable_bit);
4996 return -ENODEV;
4997 }
4998
4999 return 0;
5000}
5001
5002/* tp->lock is held. */
b3b7d6be 5003static int tg3_abort_hw(struct tg3 *tp, int silent)
1da177e4
LT
5004{
5005 int i, err;
5006
5007 tg3_disable_ints(tp);
5008
5009 tp->rx_mode &= ~RX_MODE_ENABLE;
5010 tw32_f(MAC_RX_MODE, tp->rx_mode);
5011 udelay(10);
5012
b3b7d6be
DM
5013 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
5014 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
5015 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
5016 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
5017 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
5018 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
5019
5020 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
5021 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
5022 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
5023 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
5024 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
5025 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
5026 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
1da177e4
LT
5027
5028 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
5029 tw32_f(MAC_MODE, tp->mac_mode);
5030 udelay(40);
5031
5032 tp->tx_mode &= ~TX_MODE_ENABLE;
5033 tw32_f(MAC_TX_MODE, tp->tx_mode);
5034
5035 for (i = 0; i < MAX_WAIT_CNT; i++) {
5036 udelay(100);
5037 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
5038 break;
5039 }
5040 if (i >= MAX_WAIT_CNT) {
5041 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
5042 "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
5043 tp->dev->name, tr32(MAC_TX_MODE));
e6de8ad1 5044 err |= -ENODEV;
1da177e4
LT
5045 }
5046
e6de8ad1 5047 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
b3b7d6be
DM
5048 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
5049 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
1da177e4
LT
5050
5051 tw32(FTQ_RESET, 0xffffffff);
5052 tw32(FTQ_RESET, 0x00000000);
5053
b3b7d6be
DM
5054 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
5055 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
1da177e4
LT
5056
5057 if (tp->hw_status)
5058 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5059 if (tp->hw_stats)
5060 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
5061
1da177e4
LT
5062 return err;
5063}
5064
5065/* tp->lock is held. */
5066static int tg3_nvram_lock(struct tg3 *tp)
5067{
5068 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
5069 int i;
5070
ec41c7df
MC
5071 if (tp->nvram_lock_cnt == 0) {
5072 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
5073 for (i = 0; i < 8000; i++) {
5074 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
5075 break;
5076 udelay(20);
5077 }
5078 if (i == 8000) {
5079 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
5080 return -ENODEV;
5081 }
1da177e4 5082 }
ec41c7df 5083 tp->nvram_lock_cnt++;
1da177e4
LT
5084 }
5085 return 0;
5086}
5087
5088/* tp->lock is held. */
5089static void tg3_nvram_unlock(struct tg3 *tp)
5090{
ec41c7df
MC
5091 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
5092 if (tp->nvram_lock_cnt > 0)
5093 tp->nvram_lock_cnt--;
5094 if (tp->nvram_lock_cnt == 0)
5095 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
5096 }
1da177e4
LT
5097}
5098
e6af301b
MC
5099/* tp->lock is held. */
5100static void tg3_enable_nvram_access(struct tg3 *tp)
5101{
5102 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
5103 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
5104 u32 nvaccess = tr32(NVRAM_ACCESS);
5105
5106 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
5107 }
5108}
5109
5110/* tp->lock is held. */
5111static void tg3_disable_nvram_access(struct tg3 *tp)
5112{
5113 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
5114 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
5115 u32 nvaccess = tr32(NVRAM_ACCESS);
5116
5117 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
5118 }
5119}
5120
0d3031d9
MC
5121static void tg3_ape_send_event(struct tg3 *tp, u32 event)
5122{
5123 int i;
5124 u32 apedata;
5125
5126 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
5127 if (apedata != APE_SEG_SIG_MAGIC)
5128 return;
5129
5130 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
5131 if (apedata != APE_FW_STATUS_READY)
5132 return;
5133
5134 /* Wait for up to 1 millisecond for APE to service previous event. */
5135 for (i = 0; i < 10; i++) {
5136 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
5137 return;
5138
5139 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
5140
5141 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5142 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
5143 event | APE_EVENT_STATUS_EVENT_PENDING);
5144
5145 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
5146
5147 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5148 break;
5149
5150 udelay(100);
5151 }
5152
5153 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5154 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
5155}
5156
5157static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
5158{
5159 u32 event;
5160 u32 apedata;
5161
5162 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
5163 return;
5164
5165 switch (kind) {
5166 case RESET_KIND_INIT:
5167 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
5168 APE_HOST_SEG_SIG_MAGIC);
5169 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
5170 APE_HOST_SEG_LEN_MAGIC);
5171 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
5172 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
5173 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
5174 APE_HOST_DRIVER_ID_MAGIC);
5175 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
5176 APE_HOST_BEHAV_NO_PHYLOCK);
5177
5178 event = APE_EVENT_STATUS_STATE_START;
5179 break;
5180 case RESET_KIND_SHUTDOWN:
5181 event = APE_EVENT_STATUS_STATE_UNLOAD;
5182 break;
5183 case RESET_KIND_SUSPEND:
5184 event = APE_EVENT_STATUS_STATE_SUSPEND;
5185 break;
5186 default:
5187 return;
5188 }
5189
5190 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
5191
5192 tg3_ape_send_event(tp, event);
5193}
5194
1da177e4
LT
5195/* tp->lock is held. */
5196static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
5197{
f49639e6
DM
5198 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
5199 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1da177e4
LT
5200
5201 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
5202 switch (kind) {
5203 case RESET_KIND_INIT:
5204 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5205 DRV_STATE_START);
5206 break;
5207
5208 case RESET_KIND_SHUTDOWN:
5209 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5210 DRV_STATE_UNLOAD);
5211 break;
5212
5213 case RESET_KIND_SUSPEND:
5214 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5215 DRV_STATE_SUSPEND);
5216 break;
5217
5218 default:
5219 break;
5220 };
5221 }
0d3031d9
MC
5222
5223 if (kind == RESET_KIND_INIT ||
5224 kind == RESET_KIND_SUSPEND)
5225 tg3_ape_driver_state_change(tp, kind);
1da177e4
LT
5226}
5227
5228/* tp->lock is held. */
5229static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
5230{
5231 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
5232 switch (kind) {
5233 case RESET_KIND_INIT:
5234 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5235 DRV_STATE_START_DONE);
5236 break;
5237
5238 case RESET_KIND_SHUTDOWN:
5239 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5240 DRV_STATE_UNLOAD_DONE);
5241 break;
5242
5243 default:
5244 break;
5245 };
5246 }
0d3031d9
MC
5247
5248 if (kind == RESET_KIND_SHUTDOWN)
5249 tg3_ape_driver_state_change(tp, kind);
1da177e4
LT
5250}
5251
5252/* tp->lock is held. */
5253static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
5254{
5255 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5256 switch (kind) {
5257 case RESET_KIND_INIT:
5258 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5259 DRV_STATE_START);
5260 break;
5261
5262 case RESET_KIND_SHUTDOWN:
5263 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5264 DRV_STATE_UNLOAD);
5265 break;
5266
5267 case RESET_KIND_SUSPEND:
5268 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5269 DRV_STATE_SUSPEND);
5270 break;
5271
5272 default:
5273 break;
5274 };
5275 }
5276}
5277
7a6f4369
MC
5278static int tg3_poll_fw(struct tg3 *tp)
5279{
5280 int i;
5281 u32 val;
5282
b5d3772c 5283 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
0ccead18
GZ
5284 /* Wait up to 20ms for init done. */
5285 for (i = 0; i < 200; i++) {
b5d3772c
MC
5286 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
5287 return 0;
0ccead18 5288 udelay(100);
b5d3772c
MC
5289 }
5290 return -ENODEV;
5291 }
5292
7a6f4369
MC
5293 /* Wait for firmware initialization to complete. */
5294 for (i = 0; i < 100000; i++) {
5295 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
5296 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
5297 break;
5298 udelay(10);
5299 }
5300
5301 /* Chip might not be fitted with firmware. Some Sun onboard
5302 * parts are configured like that. So don't signal the timeout
5303 * of the above loop as an error, but do report the lack of
5304 * running firmware once.
5305 */
5306 if (i >= 100000 &&
5307 !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
5308 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
5309
5310 printk(KERN_INFO PFX "%s: No firmware running.\n",
5311 tp->dev->name);
5312 }
5313
5314 return 0;
5315}
5316
ee6a99b5
MC
5317/* Save PCI command register before chip reset */
5318static void tg3_save_pci_state(struct tg3 *tp)
5319{
8a6eac90 5320 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
ee6a99b5
MC
5321}
5322
5323/* Restore PCI state after chip reset */
5324static void tg3_restore_pci_state(struct tg3 *tp)
5325{
5326 u32 val;
5327
5328 /* Re-enable indirect register accesses. */
5329 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
5330 tp->misc_host_ctrl);
5331
5332 /* Set MAX PCI retry to zero. */
5333 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
5334 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5335 (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
5336 val |= PCISTATE_RETRY_SAME_DMA;
0d3031d9
MC
5337 /* Allow reads and writes to the APE register and memory space. */
5338 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
5339 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
5340 PCISTATE_ALLOW_APE_SHMEM_WR;
ee6a99b5
MC
5341 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
5342
8a6eac90 5343 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
ee6a99b5 5344
5f5c51e3
MC
5345 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
5346 pcie_set_readrq(tp->pdev, 4096);
5347 else {
114342f2
MC
5348 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
5349 tp->pci_cacheline_sz);
5350 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
5351 tp->pci_lat_timer);
5352 }
5f5c51e3 5353
ee6a99b5 5354 /* Make sure PCI-X relaxed ordering bit is clear. */
9974a356
MC
5355 if (tp->pcix_cap) {
5356 u16 pcix_cmd;
5357
5358 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5359 &pcix_cmd);
5360 pcix_cmd &= ~PCI_X_CMD_ERO;
5361 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5362 pcix_cmd);
5363 }
ee6a99b5
MC
5364
5365 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
ee6a99b5
MC
5366
5367 /* Chip reset on 5780 will reset MSI enable bit,
5368 * so need to restore it.
5369 */
5370 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
5371 u16 ctrl;
5372
5373 pci_read_config_word(tp->pdev,
5374 tp->msi_cap + PCI_MSI_FLAGS,
5375 &ctrl);
5376 pci_write_config_word(tp->pdev,
5377 tp->msi_cap + PCI_MSI_FLAGS,
5378 ctrl | PCI_MSI_FLAGS_ENABLE);
5379 val = tr32(MSGINT_MODE);
5380 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
5381 }
5382 }
5383}
5384
1da177e4
LT
5385static void tg3_stop_fw(struct tg3 *);
5386
5387/* tp->lock is held. */
5388static int tg3_chip_reset(struct tg3 *tp)
5389{
5390 u32 val;
1ee582d8 5391 void (*write_op)(struct tg3 *, u32, u32);
7a6f4369 5392 int err;
1da177e4 5393
f49639e6
DM
5394 tg3_nvram_lock(tp);
5395
5396 /* No matching tg3_nvram_unlock() after this because
5397 * chip reset below will undo the nvram lock.
5398 */
5399 tp->nvram_lock_cnt = 0;
1da177e4 5400
ee6a99b5
MC
5401 /* GRC_MISC_CFG core clock reset will clear the memory
5402 * enable bit in PCI register 4 and the MSI enable bit
5403 * on some chips, so we save relevant registers here.
5404 */
5405 tg3_save_pci_state(tp);
5406
d9ab5ad1 5407 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
af36e6b6 5408 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
d30cdd28 5409 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
9936bcf6
MC
5410 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
5411 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
d9ab5ad1
MC
5412 tw32(GRC_FASTBOOT_PC, 0);
5413
1da177e4
LT
5414 /*
5415 * We must avoid the readl() that normally takes place.
5416 * It locks machines, causes machine checks, and other
5417 * fun things. So, temporarily disable the 5701
5418 * hardware workaround, while we do the reset.
5419 */
1ee582d8
MC
5420 write_op = tp->write32;
5421 if (write_op == tg3_write_flush_reg32)
5422 tp->write32 = tg3_write32;
1da177e4 5423
d18edcb2
MC
5424 /* Prevent the irq handler from reading or writing PCI registers
5425 * during chip reset when the memory enable bit in the PCI command
5426 * register may be cleared. The chip does not generate interrupt
5427 * at this time, but the irq handler may still be called due to irq
5428 * sharing or irqpoll.
5429 */
5430 tp->tg3_flags |= TG3_FLAG_CHIP_RESETTING;
b8fa2f3a
MC
5431 if (tp->hw_status) {
5432 tp->hw_status->status = 0;
5433 tp->hw_status->status_tag = 0;
5434 }
d18edcb2
MC
5435 tp->last_tag = 0;
5436 smp_mb();
5437 synchronize_irq(tp->pdev->irq);
5438
1da177e4
LT
5439 /* do the reset */
5440 val = GRC_MISC_CFG_CORECLK_RESET;
5441
5442 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
5443 if (tr32(0x7e2c) == 0x60) {
5444 tw32(0x7e2c, 0x20);
5445 }
5446 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
5447 tw32(GRC_MISC_CFG, (1 << 29));
5448 val |= (1 << 29);
5449 }
5450 }
5451
b5d3772c
MC
5452 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5453 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
5454 tw32(GRC_VCPU_EXT_CTRL,
5455 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
5456 }
5457
1da177e4
LT
5458 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5459 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
5460 tw32(GRC_MISC_CFG, val);
5461
1ee582d8
MC
5462 /* restore 5701 hardware bug workaround write method */
5463 tp->write32 = write_op;
1da177e4
LT
5464
5465 /* Unfortunately, we have to delay before the PCI read back.
5466 * Some 575X chips even will not respond to a PCI cfg access
5467 * when the reset command is given to the chip.
5468 *
5469 * How do these hardware designers expect things to work
5470 * properly if the PCI write is posted for a long period
5471 * of time? It is always necessary to have some method by
5472 * which a register read back can occur to push the write
5473 * out which does the reset.
5474 *
5475 * For most tg3 variants the trick below was working.
5476 * Ho hum...
5477 */
5478 udelay(120);
5479
5480 /* Flush PCI posted writes. The normal MMIO registers
5481 * are inaccessible at this time so this is the only
5482 * way to make this reliably (actually, this is no longer
5483 * the case, see above). I tried to use indirect
5484 * register read/write but this upset some 5701 variants.
5485 */
5486 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
5487
5488 udelay(120);
5489
5490 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
5491 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
5492 int i;
5493 u32 cfg_val;
5494
5495 /* Wait for link training to complete. */
5496 for (i = 0; i < 5000; i++)
5497 udelay(100);
5498
5499 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
5500 pci_write_config_dword(tp->pdev, 0xc4,
5501 cfg_val | (1 << 15));
5502 }
5503 /* Set PCIE max payload size and clear error status. */
5504 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
5505 }
5506
ee6a99b5 5507 tg3_restore_pci_state(tp);
1da177e4 5508
d18edcb2
MC
5509 tp->tg3_flags &= ~TG3_FLAG_CHIP_RESETTING;
5510
ee6a99b5
MC
5511 val = 0;
5512 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
4cf78e4f 5513 val = tr32(MEMARB_MODE);
ee6a99b5 5514 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
1da177e4
LT
5515
5516 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
5517 tg3_stop_fw(tp);
5518 tw32(0x5000, 0x400);
5519 }
5520
5521 tw32(GRC_MODE, tp->grc_mode);
5522
5523 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
ab0049b4 5524 val = tr32(0xc4);
1da177e4
LT
5525
5526 tw32(0xc4, val | (1 << 15));
5527 }
5528
5529 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
5530 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5531 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
5532 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
5533 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
5534 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5535 }
5536
5537 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5538 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
5539 tw32_f(MAC_MODE, tp->mac_mode);
747e8f8b
MC
5540 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
5541 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
5542 tw32_f(MAC_MODE, tp->mac_mode);
1da177e4
LT
5543 } else
5544 tw32_f(MAC_MODE, 0);
5545 udelay(40);
5546
7a6f4369
MC
5547 err = tg3_poll_fw(tp);
5548 if (err)
5549 return err;
1da177e4
LT
5550
5551 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
5552 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
ab0049b4 5553 val = tr32(0x7c00);
1da177e4
LT
5554
5555 tw32(0x7c00, val | (1 << 25));
5556 }
5557
5558 /* Reprobe ASF enable state. */
5559 tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
5560 tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
5561 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
5562 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
5563 u32 nic_cfg;
5564
5565 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
5566 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
5567 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
cbf46853 5568 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
1da177e4
LT
5569 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
5570 }
5571 }
5572
5573 return 0;
5574}
5575
5576/* tp->lock is held. */
5577static void tg3_stop_fw(struct tg3 *tp)
5578{
0d3031d9
MC
5579 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
5580 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
1da177e4 5581 u32 val;
7c5026aa
MC
5582
5583 /* Wait for RX cpu to ACK the previous event. */
5584 tg3_wait_for_event_ack(tp);
1da177e4
LT
5585
5586 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
5587 val = tr32(GRC_RX_CPU_EVENT);
7c5026aa 5588 val |= GRC_RX_CPU_DRIVER_EVENT;
1da177e4
LT
5589 tw32(GRC_RX_CPU_EVENT, val);
5590
7c5026aa
MC
5591 /* Wait for RX cpu to ACK this event. */
5592 tg3_wait_for_event_ack(tp);
1da177e4
LT
5593 }
5594}
5595
5596/* tp->lock is held. */
944d980e 5597static int tg3_halt(struct tg3 *tp, int kind, int silent)
1da177e4
LT
5598{
5599 int err;
5600
5601 tg3_stop_fw(tp);
5602
944d980e 5603 tg3_write_sig_pre_reset(tp, kind);
1da177e4 5604
b3b7d6be 5605 tg3_abort_hw(tp, silent);
1da177e4
LT
5606 err = tg3_chip_reset(tp);
5607
944d980e
MC
5608 tg3_write_sig_legacy(tp, kind);
5609 tg3_write_sig_post_reset(tp, kind);
1da177e4
LT
5610
5611 if (err)
5612 return err;
5613
5614 return 0;
5615}
5616
5617#define TG3_FW_RELEASE_MAJOR 0x0
5618#define TG3_FW_RELASE_MINOR 0x0
5619#define TG3_FW_RELEASE_FIX 0x0
5620#define TG3_FW_START_ADDR 0x08000000
5621#define TG3_FW_TEXT_ADDR 0x08000000
5622#define TG3_FW_TEXT_LEN 0x9c0
5623#define TG3_FW_RODATA_ADDR 0x080009c0
5624#define TG3_FW_RODATA_LEN 0x60
5625#define TG3_FW_DATA_ADDR 0x08000a40
5626#define TG3_FW_DATA_LEN 0x20
5627#define TG3_FW_SBSS_ADDR 0x08000a60
5628#define TG3_FW_SBSS_LEN 0xc
5629#define TG3_FW_BSS_ADDR 0x08000a70
5630#define TG3_FW_BSS_LEN 0x10
5631
50da859d 5632static const u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
1da177e4
LT
5633 0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
5634 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
5635 0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
5636 0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
5637 0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
5638 0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
5639 0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
5640 0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
5641 0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
5642 0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
5643 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
5644 0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
5645 0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
5646 0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
5647 0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
5648 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5649 0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
5650 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
5651 0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
5652 0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5653 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
5654 0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
5655 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5656 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5657 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5658 0, 0, 0, 0, 0, 0,
5659 0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
5660 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5661 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5662 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5663 0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
5664 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
5665 0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
5666 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
5667 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5668 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5669 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
5670 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5671 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5672 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5673 0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
5674 0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
5675 0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
5676 0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
5677 0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
5678 0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
5679 0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
5680 0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
5681 0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
5682 0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
5683 0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
5684 0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
5685 0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
5686 0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
5687 0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
5688 0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
5689 0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
5690 0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
5691 0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
5692 0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
5693 0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
5694 0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
5695 0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
5696 0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
5697 0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
5698 0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
5699 0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
5700 0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
5701 0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
5702 0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
5703 0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
5704 0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
5705 0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
5706 0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
5707 0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
5708 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
5709 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
5710 0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
5711 0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
5712 0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
5713 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
5714 0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
5715 0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
5716 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
5717 0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
5718 0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
5719 0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
5720 0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
5721 0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
5722 0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
5723 0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
5724};
5725
50da859d 5726static const u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
1da177e4
LT
5727 0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
5728 0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
5729 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5730 0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
5731 0x00000000
5732};
5733
5734#if 0 /* All zeros, don't eat up space with it. */
5735u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
5736 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5737 0x00000000, 0x00000000, 0x00000000, 0x00000000
5738};
5739#endif
5740
5741#define RX_CPU_SCRATCH_BASE 0x30000
5742#define RX_CPU_SCRATCH_SIZE 0x04000
5743#define TX_CPU_SCRATCH_BASE 0x34000
5744#define TX_CPU_SCRATCH_SIZE 0x04000
5745
5746/* tp->lock is held. */
5747static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
5748{
5749 int i;
5750
5d9428de
ES
5751 BUG_ON(offset == TX_CPU_BASE &&
5752 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
1da177e4 5753
b5d3772c
MC
5754 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5755 u32 val = tr32(GRC_VCPU_EXT_CTRL);
5756
5757 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
5758 return 0;
5759 }
1da177e4
LT
5760 if (offset == RX_CPU_BASE) {
5761 for (i = 0; i < 10000; i++) {
5762 tw32(offset + CPU_STATE, 0xffffffff);
5763 tw32(offset + CPU_MODE, CPU_MODE_HALT);
5764 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5765 break;
5766 }
5767
5768 tw32(offset + CPU_STATE, 0xffffffff);
5769 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
5770 udelay(10);
5771 } else {
5772 for (i = 0; i < 10000; i++) {
5773 tw32(offset + CPU_STATE, 0xffffffff);
5774 tw32(offset + CPU_MODE, CPU_MODE_HALT);
5775 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5776 break;
5777 }
5778 }
5779
5780 if (i >= 10000) {
5781 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
5782 "and %s CPU\n",
5783 tp->dev->name,
5784 (offset == RX_CPU_BASE ? "RX" : "TX"));
5785 return -ENODEV;
5786 }
ec41c7df
MC
5787
5788 /* Clear firmware's nvram arbitration. */
5789 if (tp->tg3_flags & TG3_FLAG_NVRAM)
5790 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
1da177e4
LT
5791 return 0;
5792}
5793
5794struct fw_info {
5795 unsigned int text_base;
5796 unsigned int text_len;
50da859d 5797 const u32 *text_data;
1da177e4
LT
5798 unsigned int rodata_base;
5799 unsigned int rodata_len;
50da859d 5800 const u32 *rodata_data;
1da177e4
LT
5801 unsigned int data_base;
5802 unsigned int data_len;
50da859d 5803 const u32 *data_data;
1da177e4
LT
5804};
5805
5806/* tp->lock is held. */
5807static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
5808 int cpu_scratch_size, struct fw_info *info)
5809{
ec41c7df 5810 int err, lock_err, i;
1da177e4
LT
5811 void (*write_op)(struct tg3 *, u32, u32);
5812
5813 if (cpu_base == TX_CPU_BASE &&
5814 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5815 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
5816 "TX cpu firmware on %s which is 5705.\n",
5817 tp->dev->name);
5818 return -EINVAL;
5819 }
5820
5821 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5822 write_op = tg3_write_mem;
5823 else
5824 write_op = tg3_write_indirect_reg32;
5825
1b628151
MC
5826 /* It is possible that bootcode is still loading at this point.
5827 * Get the nvram lock first before halting the cpu.
5828 */
ec41c7df 5829 lock_err = tg3_nvram_lock(tp);
1da177e4 5830 err = tg3_halt_cpu(tp, cpu_base);
ec41c7df
MC
5831 if (!lock_err)
5832 tg3_nvram_unlock(tp);
1da177e4
LT
5833 if (err)
5834 goto out;
5835
5836 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
5837 write_op(tp, cpu_scratch_base + i, 0);
5838 tw32(cpu_base + CPU_STATE, 0xffffffff);
5839 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
5840 for (i = 0; i < (info->text_len / sizeof(u32)); i++)
5841 write_op(tp, (cpu_scratch_base +
5842 (info->text_base & 0xffff) +
5843 (i * sizeof(u32))),
5844 (info->text_data ?
5845 info->text_data[i] : 0));
5846 for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
5847 write_op(tp, (cpu_scratch_base +
5848 (info->rodata_base & 0xffff) +
5849 (i * sizeof(u32))),
5850 (info->rodata_data ?
5851 info->rodata_data[i] : 0));
5852 for (i = 0; i < (info->data_len / sizeof(u32)); i++)
5853 write_op(tp, (cpu_scratch_base +
5854 (info->data_base & 0xffff) +
5855 (i * sizeof(u32))),
5856 (info->data_data ?
5857 info->data_data[i] : 0));
5858
5859 err = 0;
5860
5861out:
1da177e4
LT
5862 return err;
5863}
5864
5865/* tp->lock is held. */
5866static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
5867{
5868 struct fw_info info;
5869 int err, i;
5870
5871 info.text_base = TG3_FW_TEXT_ADDR;
5872 info.text_len = TG3_FW_TEXT_LEN;
5873 info.text_data = &tg3FwText[0];
5874 info.rodata_base = TG3_FW_RODATA_ADDR;
5875 info.rodata_len = TG3_FW_RODATA_LEN;
5876 info.rodata_data = &tg3FwRodata[0];
5877 info.data_base = TG3_FW_DATA_ADDR;
5878 info.data_len = TG3_FW_DATA_LEN;
5879 info.data_data = NULL;
5880
5881 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
5882 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
5883 &info);
5884 if (err)
5885 return err;
5886
5887 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
5888 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
5889 &info);
5890 if (err)
5891 return err;
5892
5893 /* Now startup only the RX cpu. */
5894 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5895 tw32_f(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR);
5896
5897 for (i = 0; i < 5; i++) {
5898 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
5899 break;
5900 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5901 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
5902 tw32_f(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR);
5903 udelay(1000);
5904 }
5905 if (i >= 5) {
5906 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
5907 "to set RX CPU PC, is %08x should be %08x\n",
5908 tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
5909 TG3_FW_TEXT_ADDR);
5910 return -ENODEV;
5911 }
5912 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5913 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
5914
5915 return 0;
5916}
5917
1da177e4
LT
5918
5919#define TG3_TSO_FW_RELEASE_MAJOR 0x1
5920#define TG3_TSO_FW_RELASE_MINOR 0x6
5921#define TG3_TSO_FW_RELEASE_FIX 0x0
5922#define TG3_TSO_FW_START_ADDR 0x08000000
5923#define TG3_TSO_FW_TEXT_ADDR 0x08000000
5924#define TG3_TSO_FW_TEXT_LEN 0x1aa0
5925#define TG3_TSO_FW_RODATA_ADDR 0x08001aa0
5926#define TG3_TSO_FW_RODATA_LEN 0x60
5927#define TG3_TSO_FW_DATA_ADDR 0x08001b20
5928#define TG3_TSO_FW_DATA_LEN 0x30
5929#define TG3_TSO_FW_SBSS_ADDR 0x08001b50
5930#define TG3_TSO_FW_SBSS_LEN 0x2c
5931#define TG3_TSO_FW_BSS_ADDR 0x08001b80
5932#define TG3_TSO_FW_BSS_LEN 0x894
5933
50da859d 5934static const u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
1da177e4
LT
5935 0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
5936 0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
5937 0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5938 0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
5939 0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
5940 0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
5941 0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
5942 0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
5943 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
5944 0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
5945 0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
5946 0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
5947 0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
5948 0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
5949 0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
5950 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
5951 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
5952 0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
5953 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5954 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
5955 0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
5956 0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
5957 0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
5958 0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
5959 0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
5960 0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
5961 0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
5962 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
5963 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
5964 0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5965 0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
5966 0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
5967 0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
5968 0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
5969 0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
5970 0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
5971 0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
5972 0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
5973 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5974 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
5975 0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
5976 0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
5977 0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
5978 0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
5979 0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
5980 0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
5981 0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
5982 0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5983 0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
5984 0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5985 0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
5986 0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
5987 0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
5988 0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
5989 0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
5990 0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
5991 0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
5992 0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
5993 0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
5994 0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
5995 0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
5996 0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
5997 0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
5998 0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
5999 0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
6000 0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
6001 0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
6002 0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
6003 0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
6004 0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
6005 0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
6006 0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
6007 0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
6008 0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
6009 0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
6010 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
6011 0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
6012 0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
6013 0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
6014 0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
6015 0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
6016 0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
6017 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
6018 0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
6019 0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
6020 0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
6021 0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
6022 0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
6023 0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
6024 0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
6025 0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
6026 0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
6027 0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
6028 0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
6029 0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
6030 0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
6031 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
6032 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
6033 0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
6034 0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
6035 0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
6036 0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
6037 0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
6038 0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
6039 0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
6040 0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
6041 0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
6042 0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
6043 0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
6044 0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
6045 0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
6046 0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
6047 0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
6048 0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
6049 0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
6050 0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
6051 0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
6052 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
6053 0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
6054 0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
6055 0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
6056 0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
6057 0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
6058 0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
6059 0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
6060 0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
6061 0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
6062 0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
6063 0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
6064 0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
6065 0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
6066 0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
6067 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
6068 0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
6069 0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
6070 0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
6071 0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
6072 0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
6073 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
6074 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
6075 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
6076 0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
6077 0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
6078 0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
6079 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
6080 0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
6081 0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
6082 0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
6083 0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
6084 0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
6085 0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
6086 0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
6087 0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
6088 0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
6089 0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
6090 0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
6091 0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
6092 0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
6093 0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
6094 0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
6095 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
6096 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
6097 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
6098 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
6099 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
6100 0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
6101 0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
6102 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
6103 0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
6104 0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
6105 0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
6106 0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
6107 0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
6108 0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
6109 0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
6110 0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
6111 0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
6112 0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
6113 0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
6114 0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
6115 0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
6116 0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
6117 0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
6118 0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
6119 0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
6120 0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
6121 0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
6122 0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
6123 0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
6124 0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
6125 0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
6126 0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
6127 0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
6128 0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
6129 0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
6130 0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
6131 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
6132 0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
6133 0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
6134 0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
6135 0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
6136 0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
6137 0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
6138 0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
6139 0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
6140 0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
6141 0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
6142 0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
6143 0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
6144 0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
6145 0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
6146 0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
6147 0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
6148 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
6149 0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
6150 0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
6151 0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
6152 0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
6153 0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
6154 0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
6155 0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
6156 0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
6157 0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
6158 0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
6159 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
6160 0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
6161 0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
6162 0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
6163 0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
6164 0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
6165 0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
6166 0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
6167 0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
6168 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
6169 0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
6170 0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
6171 0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
6172 0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
6173 0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
6174 0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
6175 0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
6176 0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
6177 0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
6178 0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
6179 0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
6180 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
6181 0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
6182 0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
6183 0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
6184 0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
6185 0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
6186 0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
6187 0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
6188 0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
6189 0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
6190 0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
6191 0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
6192 0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
6193 0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
6194 0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
6195 0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
6196 0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
6197 0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
6198 0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
6199 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
6200 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
6201 0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
6202 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
6203 0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
6204 0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
6205 0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
6206 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
6207 0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
6208 0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
6209 0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
6210 0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
6211 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
6212 0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
6213 0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
6214 0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
6215 0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
6216 0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
6217 0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
6218 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
6219};
6220
50da859d 6221static const u32 tg3TsoFwRodata[] = {
1da177e4
LT
6222 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
6223 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
6224 0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
6225 0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
6226 0x00000000,
6227};
6228
50da859d 6229static const u32 tg3TsoFwData[] = {
1da177e4
LT
6230 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
6231 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
6232 0x00000000,
6233};
6234
6235/* 5705 needs a special version of the TSO firmware. */
6236#define TG3_TSO5_FW_RELEASE_MAJOR 0x1
6237#define TG3_TSO5_FW_RELASE_MINOR 0x2
6238#define TG3_TSO5_FW_RELEASE_FIX 0x0
6239#define TG3_TSO5_FW_START_ADDR 0x00010000
6240#define TG3_TSO5_FW_TEXT_ADDR 0x00010000
6241#define TG3_TSO5_FW_TEXT_LEN 0xe90
6242#define TG3_TSO5_FW_RODATA_ADDR 0x00010e90
6243#define TG3_TSO5_FW_RODATA_LEN 0x50
6244#define TG3_TSO5_FW_DATA_ADDR 0x00010f00
6245#define TG3_TSO5_FW_DATA_LEN 0x20
6246#define TG3_TSO5_FW_SBSS_ADDR 0x00010f20
6247#define TG3_TSO5_FW_SBSS_LEN 0x28
6248#define TG3_TSO5_FW_BSS_ADDR 0x00010f50
6249#define TG3_TSO5_FW_BSS_LEN 0x88
6250
50da859d 6251static const u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
1da177e4
LT
6252 0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
6253 0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
6254 0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
6255 0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
6256 0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
6257 0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
6258 0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6259 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
6260 0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
6261 0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
6262 0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
6263 0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
6264 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
6265 0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
6266 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
6267 0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
6268 0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
6269 0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
6270 0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
6271 0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
6272 0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
6273 0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
6274 0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
6275 0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
6276 0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
6277 0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
6278 0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
6279 0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
6280 0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
6281 0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
6282 0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
6283 0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
6284 0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
6285 0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
6286 0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
6287 0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
6288 0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
6289 0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
6290 0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
6291 0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
6292 0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
6293 0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
6294 0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
6295 0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
6296 0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
6297 0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
6298 0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
6299 0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
6300 0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
6301 0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
6302 0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
6303 0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
6304 0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
6305 0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
6306 0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
6307 0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
6308 0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
6309 0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
6310 0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
6311 0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
6312 0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
6313 0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
6314 0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
6315 0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
6316 0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
6317 0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
6318 0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
6319 0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
6320 0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
6321 0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
6322 0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
6323 0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
6324 0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
6325 0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
6326 0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
6327 0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
6328 0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
6329 0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
6330 0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
6331 0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
6332 0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
6333 0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
6334 0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
6335 0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
6336 0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
6337 0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
6338 0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
6339 0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
6340 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
6341 0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
6342 0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
6343 0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
6344 0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
6345 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
6346 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
6347 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
6348 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
6349 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
6350 0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
6351 0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
6352 0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
6353 0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
6354 0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
6355 0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
6356 0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
6357 0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
6358 0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
6359 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
6360 0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
6361 0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
6362 0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
6363 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
6364 0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
6365 0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
6366 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
6367 0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
6368 0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
6369 0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
6370 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
6371 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
6372 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
6373 0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
6374 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
6375 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6376 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
6377 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
6378 0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
6379 0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
6380 0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
6381 0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
6382 0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
6383 0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
6384 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
6385 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
6386 0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
6387 0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
6388 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
6389 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
6390 0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
6391 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
6392 0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
6393 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
6394 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
6395 0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
6396 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
6397 0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
6398 0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
6399 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
6400 0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
6401 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
6402 0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
6403 0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
6404 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
6405 0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
6406 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
6407 0x00000000, 0x00000000, 0x00000000,
6408};
6409
50da859d 6410static const u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
1da177e4
LT
6411 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
6412 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
6413 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
6414 0x00000000, 0x00000000, 0x00000000,
6415};
6416
50da859d 6417static const u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
1da177e4
LT
6418 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
6419 0x00000000, 0x00000000, 0x00000000,
6420};
6421
6422/* tp->lock is held. */
6423static int tg3_load_tso_firmware(struct tg3 *tp)
6424{
6425 struct fw_info info;
6426 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
6427 int err, i;
6428
6429 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6430 return 0;
6431
6432 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6433 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
6434 info.text_len = TG3_TSO5_FW_TEXT_LEN;
6435 info.text_data = &tg3Tso5FwText[0];
6436 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
6437 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
6438 info.rodata_data = &tg3Tso5FwRodata[0];
6439 info.data_base = TG3_TSO5_FW_DATA_ADDR;
6440 info.data_len = TG3_TSO5_FW_DATA_LEN;
6441 info.data_data = &tg3Tso5FwData[0];
6442 cpu_base = RX_CPU_BASE;
6443 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
6444 cpu_scratch_size = (info.text_len +
6445 info.rodata_len +
6446 info.data_len +
6447 TG3_TSO5_FW_SBSS_LEN +
6448 TG3_TSO5_FW_BSS_LEN);
6449 } else {
6450 info.text_base = TG3_TSO_FW_TEXT_ADDR;
6451 info.text_len = TG3_TSO_FW_TEXT_LEN;
6452 info.text_data = &tg3TsoFwText[0];
6453 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
6454 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
6455 info.rodata_data = &tg3TsoFwRodata[0];
6456 info.data_base = TG3_TSO_FW_DATA_ADDR;
6457 info.data_len = TG3_TSO_FW_DATA_LEN;
6458 info.data_data = &tg3TsoFwData[0];
6459 cpu_base = TX_CPU_BASE;
6460 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
6461 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
6462 }
6463
6464 err = tg3_load_firmware_cpu(tp, cpu_base,
6465 cpu_scratch_base, cpu_scratch_size,
6466 &info);
6467 if (err)
6468 return err;
6469
6470 /* Now startup the cpu. */
6471 tw32(cpu_base + CPU_STATE, 0xffffffff);
6472 tw32_f(cpu_base + CPU_PC, info.text_base);
6473
6474 for (i = 0; i < 5; i++) {
6475 if (tr32(cpu_base + CPU_PC) == info.text_base)
6476 break;
6477 tw32(cpu_base + CPU_STATE, 0xffffffff);
6478 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
6479 tw32_f(cpu_base + CPU_PC, info.text_base);
6480 udelay(1000);
6481 }
6482 if (i >= 5) {
6483 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
6484 "to set CPU PC, is %08x should be %08x\n",
6485 tp->dev->name, tr32(cpu_base + CPU_PC),
6486 info.text_base);
6487 return -ENODEV;
6488 }
6489 tw32(cpu_base + CPU_STATE, 0xffffffff);
6490 tw32_f(cpu_base + CPU_MODE, 0x00000000);
6491 return 0;
6492}
6493
1da177e4
LT
6494
6495/* tp->lock is held. */
986e0aeb 6496static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
1da177e4
LT
6497{
6498 u32 addr_high, addr_low;
6499 int i;
6500
6501 addr_high = ((tp->dev->dev_addr[0] << 8) |
6502 tp->dev->dev_addr[1]);
6503 addr_low = ((tp->dev->dev_addr[2] << 24) |
6504 (tp->dev->dev_addr[3] << 16) |
6505 (tp->dev->dev_addr[4] << 8) |
6506 (tp->dev->dev_addr[5] << 0));
6507 for (i = 0; i < 4; i++) {
986e0aeb
MC
6508 if (i == 1 && skip_mac_1)
6509 continue;
1da177e4
LT
6510 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
6511 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
6512 }
6513
6514 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
6515 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
6516 for (i = 0; i < 12; i++) {
6517 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
6518 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
6519 }
6520 }
6521
6522 addr_high = (tp->dev->dev_addr[0] +
6523 tp->dev->dev_addr[1] +
6524 tp->dev->dev_addr[2] +
6525 tp->dev->dev_addr[3] +
6526 tp->dev->dev_addr[4] +
6527 tp->dev->dev_addr[5]) &
6528 TX_BACKOFF_SEED_MASK;
6529 tw32(MAC_TX_BACKOFF_SEED, addr_high);
6530}
6531
6532static int tg3_set_mac_addr(struct net_device *dev, void *p)
6533{
6534 struct tg3 *tp = netdev_priv(dev);
6535 struct sockaddr *addr = p;
986e0aeb 6536 int err = 0, skip_mac_1 = 0;
1da177e4 6537
f9804ddb
MC
6538 if (!is_valid_ether_addr(addr->sa_data))
6539 return -EINVAL;
6540
1da177e4
LT
6541 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6542
e75f7c90
MC
6543 if (!netif_running(dev))
6544 return 0;
6545
58712ef9 6546 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
986e0aeb 6547 u32 addr0_high, addr0_low, addr1_high, addr1_low;
58712ef9 6548
986e0aeb
MC
6549 addr0_high = tr32(MAC_ADDR_0_HIGH);
6550 addr0_low = tr32(MAC_ADDR_0_LOW);
6551 addr1_high = tr32(MAC_ADDR_1_HIGH);
6552 addr1_low = tr32(MAC_ADDR_1_LOW);
6553
6554 /* Skip MAC addr 1 if ASF is using it. */
6555 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
6556 !(addr1_high == 0 && addr1_low == 0))
6557 skip_mac_1 = 1;
58712ef9 6558 }
986e0aeb
MC
6559 spin_lock_bh(&tp->lock);
6560 __tg3_set_mac_addr(tp, skip_mac_1);
6561 spin_unlock_bh(&tp->lock);
1da177e4 6562
b9ec6c1b 6563 return err;
1da177e4
LT
6564}
6565
6566/* tp->lock is held. */
6567static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
6568 dma_addr_t mapping, u32 maxlen_flags,
6569 u32 nic_addr)
6570{
6571 tg3_write_mem(tp,
6572 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
6573 ((u64) mapping >> 32));
6574 tg3_write_mem(tp,
6575 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
6576 ((u64) mapping & 0xffffffff));
6577 tg3_write_mem(tp,
6578 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
6579 maxlen_flags);
6580
6581 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6582 tg3_write_mem(tp,
6583 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
6584 nic_addr);
6585}
6586
6587static void __tg3_set_rx_mode(struct net_device *);
d244c892 6588static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
15f9850d
DM
6589{
6590 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
6591 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
6592 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
6593 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
6594 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6595 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
6596 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
6597 }
6598 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
6599 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
6600 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6601 u32 val = ec->stats_block_coalesce_usecs;
6602
6603 if (!netif_carrier_ok(tp->dev))
6604 val = 0;
6605
6606 tw32(HOSTCC_STAT_COAL_TICKS, val);
6607 }
6608}
1da177e4
LT
6609
6610/* tp->lock is held. */
8e7a22e3 6611static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
1da177e4
LT
6612{
6613 u32 val, rdmac_mode;
6614 int i, err, limit;
6615
6616 tg3_disable_ints(tp);
6617
6618 tg3_stop_fw(tp);
6619
6620 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
6621
6622 if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
e6de8ad1 6623 tg3_abort_hw(tp, 1);
1da177e4
LT
6624 }
6625
36da4d86 6626 if (reset_phy)
d4d2c558
MC
6627 tg3_phy_reset(tp);
6628
1da177e4
LT
6629 err = tg3_chip_reset(tp);
6630 if (err)
6631 return err;
6632
6633 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
6634
b5af7126
MC
6635 if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
6636 tp->pci_chip_rev_id == CHIPREV_ID_5784_A1) {
d30cdd28
MC
6637 val = tr32(TG3_CPMU_CTRL);
6638 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
6639 tw32(TG3_CPMU_CTRL, val);
9acb961e
MC
6640
6641 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
6642 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
6643 val |= CPMU_LSPD_10MB_MACCLK_6_25;
6644 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
6645
6646 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
6647 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
6648 val |= CPMU_LNK_AWARE_MACCLK_6_25;
6649 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
6650
6651 val = tr32(TG3_CPMU_HST_ACC);
6652 val &= ~CPMU_HST_ACC_MACCLK_MASK;
6653 val |= CPMU_HST_ACC_MACCLK_6_25;
6654 tw32(TG3_CPMU_HST_ACC, val);
d30cdd28
MC
6655 }
6656
1da177e4
LT
6657 /* This works around an issue with Athlon chipsets on
6658 * B3 tigon3 silicon. This bit has no effect on any
6659 * other revision. But do not set this on PCI Express
795d01c5 6660 * chips and don't even touch the clocks if the CPMU is present.
1da177e4 6661 */
795d01c5
MC
6662 if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)) {
6663 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
6664 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
6665 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
6666 }
1da177e4
LT
6667
6668 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
6669 (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
6670 val = tr32(TG3PCI_PCISTATE);
6671 val |= PCISTATE_RETRY_SAME_DMA;
6672 tw32(TG3PCI_PCISTATE, val);
6673 }
6674
0d3031d9
MC
6675 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
6676 /* Allow reads and writes to the
6677 * APE register and memory space.
6678 */
6679 val = tr32(TG3PCI_PCISTATE);
6680 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
6681 PCISTATE_ALLOW_APE_SHMEM_WR;
6682 tw32(TG3PCI_PCISTATE, val);
6683 }
6684
1da177e4
LT
6685 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
6686 /* Enable some hw fixes. */
6687 val = tr32(TG3PCI_MSI_DATA);
6688 val |= (1 << 26) | (1 << 28) | (1 << 29);
6689 tw32(TG3PCI_MSI_DATA, val);
6690 }
6691
6692 /* Descriptor ring init may make accesses to the
6693 * NIC SRAM area to setup the TX descriptors, so we
6694 * can only do this after the hardware has been
6695 * successfully reset.
6696 */
32d8c572
MC
6697 err = tg3_init_rings(tp);
6698 if (err)
6699 return err;
1da177e4 6700
9936bcf6
MC
6701 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
6702 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
d30cdd28
MC
6703 /* This value is determined during the probe time DMA
6704 * engine test, tg3_test_dma.
6705 */
6706 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
6707 }
1da177e4
LT
6708
6709 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
6710 GRC_MODE_4X_NIC_SEND_RINGS |
6711 GRC_MODE_NO_TX_PHDR_CSUM |
6712 GRC_MODE_NO_RX_PHDR_CSUM);
6713 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
d2d746f8
MC
6714
6715 /* Pseudo-header checksum is done by hardware logic and not
6716 * the offload processers, so make the chip do the pseudo-
6717 * header checksums on receive. For transmit it is more
6718 * convenient to do the pseudo-header checksum in software
6719 * as Linux does that on transmit for us in all cases.
6720 */
6721 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
1da177e4
LT
6722
6723 tw32(GRC_MODE,
6724 tp->grc_mode |
6725 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
6726
6727 /* Setup the timer prescalar register. Clock is always 66Mhz. */
6728 val = tr32(GRC_MISC_CFG);
6729 val &= ~0xff;
6730 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
6731 tw32(GRC_MISC_CFG, val);
6732
6733 /* Initialize MBUF/DESC pool. */
cbf46853 6734 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
1da177e4
LT
6735 /* Do nothing. */
6736 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
6737 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
6738 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
6739 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
6740 else
6741 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
6742 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
6743 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
6744 }
1da177e4
LT
6745 else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6746 int fw_len;
6747
6748 fw_len = (TG3_TSO5_FW_TEXT_LEN +
6749 TG3_TSO5_FW_RODATA_LEN +
6750 TG3_TSO5_FW_DATA_LEN +
6751 TG3_TSO5_FW_SBSS_LEN +
6752 TG3_TSO5_FW_BSS_LEN);
6753 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
6754 tw32(BUFMGR_MB_POOL_ADDR,
6755 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
6756 tw32(BUFMGR_MB_POOL_SIZE,
6757 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
6758 }
1da177e4 6759
0f893dc6 6760 if (tp->dev->mtu <= ETH_DATA_LEN) {
1da177e4
LT
6761 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6762 tp->bufmgr_config.mbuf_read_dma_low_water);
6763 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6764 tp->bufmgr_config.mbuf_mac_rx_low_water);
6765 tw32(BUFMGR_MB_HIGH_WATER,
6766 tp->bufmgr_config.mbuf_high_water);
6767 } else {
6768 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6769 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
6770 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6771 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
6772 tw32(BUFMGR_MB_HIGH_WATER,
6773 tp->bufmgr_config.mbuf_high_water_jumbo);
6774 }
6775 tw32(BUFMGR_DMA_LOW_WATER,
6776 tp->bufmgr_config.dma_low_water);
6777 tw32(BUFMGR_DMA_HIGH_WATER,
6778 tp->bufmgr_config.dma_high_water);
6779
6780 tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
6781 for (i = 0; i < 2000; i++) {
6782 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
6783 break;
6784 udelay(10);
6785 }
6786 if (i >= 2000) {
6787 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
6788 tp->dev->name);
6789 return -ENODEV;
6790 }
6791
6792 /* Setup replenish threshold. */
f92905de
MC
6793 val = tp->rx_pending / 8;
6794 if (val == 0)
6795 val = 1;
6796 else if (val > tp->rx_std_max_post)
6797 val = tp->rx_std_max_post;
b5d3772c
MC
6798 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6799 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
6800 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
6801
6802 if (val > (TG3_RX_INTERNAL_RING_SZ_5906 / 2))
6803 val = TG3_RX_INTERNAL_RING_SZ_5906 / 2;
6804 }
f92905de
MC
6805
6806 tw32(RCVBDI_STD_THRESH, val);
1da177e4
LT
6807
6808 /* Initialize TG3_BDINFO's at:
6809 * RCVDBDI_STD_BD: standard eth size rx ring
6810 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
6811 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
6812 *
6813 * like so:
6814 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
6815 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
6816 * ring attribute flags
6817 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
6818 *
6819 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
6820 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
6821 *
6822 * The size of each ring is fixed in the firmware, but the location is
6823 * configurable.
6824 */
6825 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6826 ((u64) tp->rx_std_mapping >> 32));
6827 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6828 ((u64) tp->rx_std_mapping & 0xffffffff));
6829 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
6830 NIC_SRAM_RX_BUFFER_DESC);
6831
6832 /* Don't even try to program the JUMBO/MINI buffer descriptor
6833 * configs on 5705.
6834 */
6835 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
6836 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6837 RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
6838 } else {
6839 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6840 RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6841
6842 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
6843 BDINFO_FLAGS_DISABLED);
6844
6845 /* Setup replenish threshold. */
6846 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
6847
0f893dc6 6848 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
1da177e4
LT
6849 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6850 ((u64) tp->rx_jumbo_mapping >> 32));
6851 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6852 ((u64) tp->rx_jumbo_mapping & 0xffffffff));
6853 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6854 RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6855 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
6856 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
6857 } else {
6858 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6859 BDINFO_FLAGS_DISABLED);
6860 }
6861
6862 }
6863
6864 /* There is only one send ring on 5705/5750, no need to explicitly
6865 * disable the others.
6866 */
6867 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6868 /* Clear out send RCB ring in SRAM. */
6869 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
6870 tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6871 BDINFO_FLAGS_DISABLED);
6872 }
6873
6874 tp->tx_prod = 0;
6875 tp->tx_cons = 0;
6876 tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6877 tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6878
6879 tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
6880 tp->tx_desc_mapping,
6881 (TG3_TX_RING_SIZE <<
6882 BDINFO_FLAGS_MAXLEN_SHIFT),
6883 NIC_SRAM_TX_BUFFER_DESC);
6884
6885 /* There is only one receive return ring on 5705/5750, no need
6886 * to explicitly disable the others.
6887 */
6888 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6889 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
6890 i += TG3_BDINFO_SIZE) {
6891 tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6892 BDINFO_FLAGS_DISABLED);
6893 }
6894 }
6895
6896 tp->rx_rcb_ptr = 0;
6897 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
6898
6899 tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
6900 tp->rx_rcb_mapping,
6901 (TG3_RX_RCB_RING_SIZE(tp) <<
6902 BDINFO_FLAGS_MAXLEN_SHIFT),
6903 0);
6904
6905 tp->rx_std_ptr = tp->rx_pending;
6906 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
6907 tp->rx_std_ptr);
6908
0f893dc6 6909 tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
1da177e4
LT
6910 tp->rx_jumbo_pending : 0;
6911 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
6912 tp->rx_jumbo_ptr);
6913
6914 /* Initialize MAC address and backoff seed. */
986e0aeb 6915 __tg3_set_mac_addr(tp, 0);
1da177e4
LT
6916
6917 /* MTU + ethernet header + FCS + optional VLAN tag */
6918 tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
6919
6920 /* The slot time is changed by tg3_setup_phy if we
6921 * run at gigabit with half duplex.
6922 */
6923 tw32(MAC_TX_LENGTHS,
6924 (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6925 (6 << TX_LENGTHS_IPG_SHIFT) |
6926 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6927
6928 /* Receive rules. */
6929 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
6930 tw32(RCVLPC_CONFIG, 0x0181);
6931
6932 /* Calculate RDMAC_MODE setting early, we need it to determine
6933 * the RCVLPC_STATE_ENABLE mask.
6934 */
6935 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
6936 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
6937 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
6938 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
6939 RDMAC_MODE_LNGREAD_ENAB);
85e94ced 6940
d30cdd28
MC
6941 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784)
6942 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
6943 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
6944 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
6945
85e94ced
MC
6946 /* If statement applies to 5705 and 5750 PCI devices only */
6947 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6948 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6949 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
1da177e4 6950 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
c13e3713 6951 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1da177e4
LT
6952 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
6953 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6954 !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
6955 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6956 }
6957 }
6958
85e94ced
MC
6959 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
6960 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6961
1da177e4
LT
6962 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6963 rdmac_mode |= (1 << 27);
1da177e4
LT
6964
6965 /* Receive/send statistics. */
1661394e
MC
6966 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
6967 val = tr32(RCVLPC_STATS_ENABLE);
6968 val &= ~RCVLPC_STATSENAB_DACK_FIX;
6969 tw32(RCVLPC_STATS_ENABLE, val);
6970 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
6971 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
1da177e4
LT
6972 val = tr32(RCVLPC_STATS_ENABLE);
6973 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
6974 tw32(RCVLPC_STATS_ENABLE, val);
6975 } else {
6976 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
6977 }
6978 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
6979 tw32(SNDDATAI_STATSENAB, 0xffffff);
6980 tw32(SNDDATAI_STATSCTRL,
6981 (SNDDATAI_SCTRL_ENABLE |
6982 SNDDATAI_SCTRL_FASTUPD));
6983
6984 /* Setup host coalescing engine. */
6985 tw32(HOSTCC_MODE, 0);
6986 for (i = 0; i < 2000; i++) {
6987 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
6988 break;
6989 udelay(10);
6990 }
6991
d244c892 6992 __tg3_set_coalesce(tp, &tp->coal);
1da177e4
LT
6993
6994 /* set status block DMA address */
6995 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6996 ((u64) tp->status_mapping >> 32));
6997 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6998 ((u64) tp->status_mapping & 0xffffffff));
6999
7000 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7001 /* Status/statistics block address. See tg3_timer,
7002 * the tg3_periodic_fetch_stats call there, and
7003 * tg3_get_stats to see how this works for 5705/5750 chips.
7004 */
1da177e4
LT
7005 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7006 ((u64) tp->stats_mapping >> 32));
7007 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7008 ((u64) tp->stats_mapping & 0xffffffff));
7009 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
7010 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
7011 }
7012
7013 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
7014
7015 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
7016 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
7017 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7018 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
7019
7020 /* Clear statistics/status block in chip, and status block in ram. */
7021 for (i = NIC_SRAM_STATS_BLK;
7022 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
7023 i += sizeof(u32)) {
7024 tg3_write_mem(tp, i, 0);
7025 udelay(40);
7026 }
7027 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
7028
c94e3941
MC
7029 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
7030 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
7031 /* reset to prevent losing 1st rx packet intermittently */
7032 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7033 udelay(10);
7034 }
7035
1da177e4
LT
7036 tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
7037 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
e8f3f6ca
MC
7038 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
7039 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7040 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
7041 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1da177e4
LT
7042 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
7043 udelay(40);
7044
314fba34 7045 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
9d26e213 7046 * If TG3_FLG2_IS_NIC is zero, we should read the
314fba34
MC
7047 * register to preserve the GPIO settings for LOMs. The GPIOs,
7048 * whether used as inputs or outputs, are set by boot code after
7049 * reset.
7050 */
9d26e213 7051 if (!(tp->tg3_flags2 & TG3_FLG2_IS_NIC)) {
314fba34
MC
7052 u32 gpio_mask;
7053
9d26e213
MC
7054 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
7055 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
7056 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
3e7d83bc
MC
7057
7058 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
7059 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
7060 GRC_LCLCTRL_GPIO_OUTPUT3;
7061
af36e6b6
MC
7062 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
7063 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
7064
aaf84465 7065 tp->grc_local_ctrl &= ~gpio_mask;
314fba34
MC
7066 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
7067
7068 /* GPIO1 must be driven high for eeprom write protect */
9d26e213
MC
7069 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)
7070 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
7071 GRC_LCLCTRL_GPIO_OUTPUT1);
314fba34 7072 }
1da177e4
LT
7073 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
7074 udelay(100);
7075
09ee929c 7076 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
fac9b83e 7077 tp->last_tag = 0;
1da177e4
LT
7078
7079 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7080 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
7081 udelay(40);
7082 }
7083
7084 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
7085 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
7086 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
7087 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
7088 WDMAC_MODE_LNGREAD_ENAB);
7089
85e94ced
MC
7090 /* If statement applies to 5705 and 5750 PCI devices only */
7091 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
7092 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
7093 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
1da177e4
LT
7094 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
7095 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
7096 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
7097 /* nothing */
7098 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
7099 !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
7100 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
7101 val |= WDMAC_MODE_RX_ACCEL;
7102 }
7103 }
7104
d9ab5ad1 7105 /* Enable host coalescing bug fix */
af36e6b6 7106 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) ||
d30cdd28 7107 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) ||
9936bcf6
MC
7108 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784) ||
7109 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761))
d9ab5ad1
MC
7110 val |= (1 << 29);
7111
1da177e4
LT
7112 tw32_f(WDMAC_MODE, val);
7113 udelay(40);
7114
9974a356
MC
7115 if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
7116 u16 pcix_cmd;
7117
7118 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7119 &pcix_cmd);
1da177e4 7120 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
9974a356
MC
7121 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
7122 pcix_cmd |= PCI_X_CMD_READ_2K;
1da177e4 7123 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
9974a356
MC
7124 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
7125 pcix_cmd |= PCI_X_CMD_READ_2K;
1da177e4 7126 }
9974a356
MC
7127 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7128 pcix_cmd);
1da177e4
LT
7129 }
7130
7131 tw32_f(RDMAC_MODE, rdmac_mode);
7132 udelay(40);
7133
7134 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
7135 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7136 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
9936bcf6
MC
7137
7138 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
7139 tw32(SNDDATAC_MODE,
7140 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
7141 else
7142 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
7143
1da177e4
LT
7144 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
7145 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
7146 tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
7147 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
1da177e4
LT
7148 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7149 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
1da177e4
LT
7150 tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
7151 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
7152
7153 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
7154 err = tg3_load_5701_a0_firmware_fix(tp);
7155 if (err)
7156 return err;
7157 }
7158
1da177e4
LT
7159 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
7160 err = tg3_load_tso_firmware(tp);
7161 if (err)
7162 return err;
7163 }
1da177e4
LT
7164
7165 tp->tx_mode = TX_MODE_ENABLE;
7166 tw32_f(MAC_TX_MODE, tp->tx_mode);
7167 udelay(100);
7168
7169 tp->rx_mode = RX_MODE_ENABLE;
9936bcf6
MC
7170 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7171 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
af36e6b6
MC
7172 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
7173
1da177e4
LT
7174 tw32_f(MAC_RX_MODE, tp->rx_mode);
7175 udelay(10);
7176
7177 if (tp->link_config.phy_is_low_power) {
7178 tp->link_config.phy_is_low_power = 0;
7179 tp->link_config.speed = tp->link_config.orig_speed;
7180 tp->link_config.duplex = tp->link_config.orig_duplex;
7181 tp->link_config.autoneg = tp->link_config.orig_autoneg;
7182 }
7183
8ef21428 7184 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1da177e4
LT
7185 tw32_f(MAC_MI_MODE, tp->mi_mode);
7186 udelay(80);
7187
7188 tw32(MAC_LED_CTRL, tp->led_ctrl);
7189
7190 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
c94e3941 7191 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
1da177e4
LT
7192 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7193 udelay(10);
7194 }
7195 tw32_f(MAC_RX_MODE, tp->rx_mode);
7196 udelay(10);
7197
7198 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
7199 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
7200 !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
7201 /* Set drive transmission level to 1.2V */
7202 /* only if the signal pre-emphasis bit is not set */
7203 val = tr32(MAC_SERDES_CFG);
7204 val &= 0xfffff000;
7205 val |= 0x880;
7206 tw32(MAC_SERDES_CFG, val);
7207 }
7208 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
7209 tw32(MAC_SERDES_CFG, 0x616000);
7210 }
7211
7212 /* Prevent chip from dropping frames when flow control
7213 * is enabled.
7214 */
7215 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
7216
7217 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
7218 (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
7219 /* Use hardware link auto-negotiation */
7220 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
7221 }
7222
d4d2c558
MC
7223 if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
7224 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
7225 u32 tmp;
7226
7227 tmp = tr32(SERDES_RX_CTRL);
7228 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
7229 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
7230 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
7231 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
7232 }
7233
36da4d86 7234 err = tg3_setup_phy(tp, 0);
1da177e4
LT
7235 if (err)
7236 return err;
7237
715116a1
MC
7238 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7239 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906) {
1da177e4
LT
7240 u32 tmp;
7241
7242 /* Clear CRC stats. */
569a5df8
MC
7243 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
7244 tg3_writephy(tp, MII_TG3_TEST1,
7245 tmp | MII_TG3_TEST1_CRC_EN);
1da177e4
LT
7246 tg3_readphy(tp, 0x14, &tmp);
7247 }
7248 }
7249
7250 __tg3_set_rx_mode(tp->dev);
7251
7252 /* Initialize receive rules. */
7253 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
7254 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
7255 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
7256 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
7257
4cf78e4f 7258 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
a4e2b347 7259 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
1da177e4
LT
7260 limit = 8;
7261 else
7262 limit = 16;
7263 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
7264 limit -= 4;
7265 switch (limit) {
7266 case 16:
7267 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
7268 case 15:
7269 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
7270 case 14:
7271 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
7272 case 13:
7273 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
7274 case 12:
7275 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
7276 case 11:
7277 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
7278 case 10:
7279 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
7280 case 9:
7281 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
7282 case 8:
7283 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
7284 case 7:
7285 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
7286 case 6:
7287 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
7288 case 5:
7289 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
7290 case 4:
7291 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
7292 case 3:
7293 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
7294 case 2:
7295 case 1:
7296
7297 default:
7298 break;
7299 };
7300
9ce768ea
MC
7301 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7302 /* Write our heartbeat update interval to APE. */
7303 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
7304 APE_HOST_HEARTBEAT_INT_DISABLE);
0d3031d9 7305
1da177e4
LT
7306 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
7307
1da177e4
LT
7308 return 0;
7309}
7310
7311/* Called at device open time to get the chip ready for
7312 * packet processing. Invoked with tp->lock held.
7313 */
8e7a22e3 7314static int tg3_init_hw(struct tg3 *tp, int reset_phy)
1da177e4
LT
7315{
7316 int err;
7317
7318 /* Force the chip into D0. */
bc1c7567 7319 err = tg3_set_power_state(tp, PCI_D0);
1da177e4
LT
7320 if (err)
7321 goto out;
7322
7323 tg3_switch_clocks(tp);
7324
7325 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
7326
8e7a22e3 7327 err = tg3_reset_hw(tp, reset_phy);
1da177e4
LT
7328
7329out:
7330 return err;
7331}
7332
7333#define TG3_STAT_ADD32(PSTAT, REG) \
7334do { u32 __val = tr32(REG); \
7335 (PSTAT)->low += __val; \
7336 if ((PSTAT)->low < __val) \
7337 (PSTAT)->high += 1; \
7338} while (0)
7339
7340static void tg3_periodic_fetch_stats(struct tg3 *tp)
7341{
7342 struct tg3_hw_stats *sp = tp->hw_stats;
7343
7344 if (!netif_carrier_ok(tp->dev))
7345 return;
7346
7347 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
7348 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
7349 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
7350 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
7351 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
7352 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
7353 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
7354 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
7355 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
7356 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
7357 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
7358 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
7359 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
7360
7361 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
7362 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
7363 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
7364 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
7365 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
7366 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
7367 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
7368 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
7369 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
7370 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
7371 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
7372 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
7373 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
7374 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
463d305b
MC
7375
7376 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
7377 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
7378 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
1da177e4
LT
7379}
7380
7381static void tg3_timer(unsigned long __opaque)
7382{
7383 struct tg3 *tp = (struct tg3 *) __opaque;
1da177e4 7384
f475f163
MC
7385 if (tp->irq_sync)
7386 goto restart_timer;
7387
f47c11ee 7388 spin_lock(&tp->lock);
1da177e4 7389
fac9b83e
DM
7390 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
7391 /* All of this garbage is because when using non-tagged
7392 * IRQ status the mailbox/status_block protocol the chip
7393 * uses with the cpu is race prone.
7394 */
7395 if (tp->hw_status->status & SD_STATUS_UPDATED) {
7396 tw32(GRC_LOCAL_CTRL,
7397 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
7398 } else {
7399 tw32(HOSTCC_MODE, tp->coalesce_mode |
7400 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
7401 }
1da177e4 7402
fac9b83e
DM
7403 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
7404 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
f47c11ee 7405 spin_unlock(&tp->lock);
fac9b83e
DM
7406 schedule_work(&tp->reset_task);
7407 return;
7408 }
1da177e4
LT
7409 }
7410
1da177e4
LT
7411 /* This part only runs once per second. */
7412 if (!--tp->timer_counter) {
fac9b83e
DM
7413 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7414 tg3_periodic_fetch_stats(tp);
7415
1da177e4
LT
7416 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
7417 u32 mac_stat;
7418 int phy_event;
7419
7420 mac_stat = tr32(MAC_STATUS);
7421
7422 phy_event = 0;
7423 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
7424 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
7425 phy_event = 1;
7426 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
7427 phy_event = 1;
7428
7429 if (phy_event)
7430 tg3_setup_phy(tp, 0);
7431 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
7432 u32 mac_stat = tr32(MAC_STATUS);
7433 int need_setup = 0;
7434
7435 if (netif_carrier_ok(tp->dev) &&
7436 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
7437 need_setup = 1;
7438 }
7439 if (! netif_carrier_ok(tp->dev) &&
7440 (mac_stat & (MAC_STATUS_PCS_SYNCED |
7441 MAC_STATUS_SIGNAL_DET))) {
7442 need_setup = 1;
7443 }
7444 if (need_setup) {
3d3ebe74
MC
7445 if (!tp->serdes_counter) {
7446 tw32_f(MAC_MODE,
7447 (tp->mac_mode &
7448 ~MAC_MODE_PORT_MODE_MASK));
7449 udelay(40);
7450 tw32_f(MAC_MODE, tp->mac_mode);
7451 udelay(40);
7452 }
1da177e4
LT
7453 tg3_setup_phy(tp, 0);
7454 }
747e8f8b
MC
7455 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
7456 tg3_serdes_parallel_detect(tp);
1da177e4
LT
7457
7458 tp->timer_counter = tp->timer_multiplier;
7459 }
7460
130b8e4d
MC
7461 /* Heartbeat is only sent once every 2 seconds.
7462 *
7463 * The heartbeat is to tell the ASF firmware that the host
7464 * driver is still alive. In the event that the OS crashes,
7465 * ASF needs to reset the hardware to free up the FIFO space
7466 * that may be filled with rx packets destined for the host.
7467 * If the FIFO is full, ASF will no longer function properly.
7468 *
7469 * Unintended resets have been reported on real time kernels
7470 * where the timer doesn't run on time. Netpoll will also have
7471 * same problem.
7472 *
7473 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
7474 * to check the ring condition when the heartbeat is expiring
7475 * before doing the reset. This will prevent most unintended
7476 * resets.
7477 */
1da177e4
LT
7478 if (!--tp->asf_counter) {
7479 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
7480 u32 val;
7481
7c5026aa
MC
7482 tg3_wait_for_event_ack(tp);
7483
bbadf503 7484 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
130b8e4d 7485 FWCMD_NICDRV_ALIVE3);
bbadf503 7486 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
28fbef78 7487 /* 5 seconds timeout */
bbadf503 7488 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
1da177e4 7489 val = tr32(GRC_RX_CPU_EVENT);
7c5026aa
MC
7490 val |= GRC_RX_CPU_DRIVER_EVENT;
7491 tw32_f(GRC_RX_CPU_EVENT, val);
1da177e4
LT
7492 }
7493 tp->asf_counter = tp->asf_multiplier;
7494 }
7495
f47c11ee 7496 spin_unlock(&tp->lock);
1da177e4 7497
f475f163 7498restart_timer:
1da177e4
LT
7499 tp->timer.expires = jiffies + tp->timer_offset;
7500 add_timer(&tp->timer);
7501}
7502
81789ef5 7503static int tg3_request_irq(struct tg3 *tp)
fcfa0a32 7504{
7d12e780 7505 irq_handler_t fn;
fcfa0a32
MC
7506 unsigned long flags;
7507 struct net_device *dev = tp->dev;
7508
7509 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7510 fn = tg3_msi;
7511 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
7512 fn = tg3_msi_1shot;
1fb9df5d 7513 flags = IRQF_SAMPLE_RANDOM;
fcfa0a32
MC
7514 } else {
7515 fn = tg3_interrupt;
7516 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
7517 fn = tg3_interrupt_tagged;
1fb9df5d 7518 flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM;
fcfa0a32
MC
7519 }
7520 return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev));
7521}
7522
7938109f
MC
7523static int tg3_test_interrupt(struct tg3 *tp)
7524{
7525 struct net_device *dev = tp->dev;
b16250e3 7526 int err, i, intr_ok = 0;
7938109f 7527
d4bc3927
MC
7528 if (!netif_running(dev))
7529 return -ENODEV;
7530
7938109f
MC
7531 tg3_disable_ints(tp);
7532
7533 free_irq(tp->pdev->irq, dev);
7534
7535 err = request_irq(tp->pdev->irq, tg3_test_isr,
1fb9df5d 7536 IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev);
7938109f
MC
7537 if (err)
7538 return err;
7539
38f3843e 7540 tp->hw_status->status &= ~SD_STATUS_UPDATED;
7938109f
MC
7541 tg3_enable_ints(tp);
7542
7543 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
7544 HOSTCC_MODE_NOW);
7545
7546 for (i = 0; i < 5; i++) {
b16250e3
MC
7547 u32 int_mbox, misc_host_ctrl;
7548
09ee929c
MC
7549 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
7550 TG3_64BIT_REG_LOW);
b16250e3
MC
7551 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
7552
7553 if ((int_mbox != 0) ||
7554 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
7555 intr_ok = 1;
7938109f 7556 break;
b16250e3
MC
7557 }
7558
7938109f
MC
7559 msleep(10);
7560 }
7561
7562 tg3_disable_ints(tp);
7563
7564 free_irq(tp->pdev->irq, dev);
6aa20a22 7565
fcfa0a32 7566 err = tg3_request_irq(tp);
7938109f
MC
7567
7568 if (err)
7569 return err;
7570
b16250e3 7571 if (intr_ok)
7938109f
MC
7572 return 0;
7573
7574 return -EIO;
7575}
7576
7577/* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
7578 * successfully restored
7579 */
7580static int tg3_test_msi(struct tg3 *tp)
7581{
7582 struct net_device *dev = tp->dev;
7583 int err;
7584 u16 pci_cmd;
7585
7586 if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
7587 return 0;
7588
7589 /* Turn off SERR reporting in case MSI terminates with Master
7590 * Abort.
7591 */
7592 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
7593 pci_write_config_word(tp->pdev, PCI_COMMAND,
7594 pci_cmd & ~PCI_COMMAND_SERR);
7595
7596 err = tg3_test_interrupt(tp);
7597
7598 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
7599
7600 if (!err)
7601 return 0;
7602
7603 /* other failures */
7604 if (err != -EIO)
7605 return err;
7606
7607 /* MSI test failed, go back to INTx mode */
7608 printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
7609 "switching to INTx mode. Please report this failure to "
7610 "the PCI maintainer and include system chipset information.\n",
7611 tp->dev->name);
7612
7613 free_irq(tp->pdev->irq, dev);
7614 pci_disable_msi(tp->pdev);
7615
7616 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7617
fcfa0a32 7618 err = tg3_request_irq(tp);
7938109f
MC
7619 if (err)
7620 return err;
7621
7622 /* Need to reset the chip because the MSI cycle may have terminated
7623 * with Master Abort.
7624 */
f47c11ee 7625 tg3_full_lock(tp, 1);
7938109f 7626
944d980e 7627 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8e7a22e3 7628 err = tg3_init_hw(tp, 1);
7938109f 7629
f47c11ee 7630 tg3_full_unlock(tp);
7938109f
MC
7631
7632 if (err)
7633 free_irq(tp->pdev->irq, dev);
7634
7635 return err;
7636}
7637
1da177e4
LT
7638static int tg3_open(struct net_device *dev)
7639{
7640 struct tg3 *tp = netdev_priv(dev);
7641 int err;
7642
c49a1561
MC
7643 netif_carrier_off(tp->dev);
7644
f47c11ee 7645 tg3_full_lock(tp, 0);
1da177e4 7646
bc1c7567 7647 err = tg3_set_power_state(tp, PCI_D0);
12862086
IS
7648 if (err) {
7649 tg3_full_unlock(tp);
bc1c7567 7650 return err;
12862086 7651 }
bc1c7567 7652
1da177e4
LT
7653 tg3_disable_ints(tp);
7654 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
7655
f47c11ee 7656 tg3_full_unlock(tp);
1da177e4
LT
7657
7658 /* The placement of this call is tied
7659 * to the setup and use of Host TX descriptors.
7660 */
7661 err = tg3_alloc_consistent(tp);
7662 if (err)
7663 return err;
7664
7544b097 7665 if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSI) {
fac9b83e
DM
7666 /* All MSI supporting chips should support tagged
7667 * status. Assert that this is the case.
7668 */
7669 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
7670 printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
7671 "Not using MSI.\n", tp->dev->name);
7672 } else if (pci_enable_msi(tp->pdev) == 0) {
88b06bc2
MC
7673 u32 msi_mode;
7674
7675 msi_mode = tr32(MSGINT_MODE);
7676 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
7677 tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
7678 }
7679 }
fcfa0a32 7680 err = tg3_request_irq(tp);
1da177e4
LT
7681
7682 if (err) {
88b06bc2
MC
7683 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7684 pci_disable_msi(tp->pdev);
7685 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7686 }
1da177e4
LT
7687 tg3_free_consistent(tp);
7688 return err;
7689 }
7690
bea3348e
SH
7691 napi_enable(&tp->napi);
7692
f47c11ee 7693 tg3_full_lock(tp, 0);
1da177e4 7694
8e7a22e3 7695 err = tg3_init_hw(tp, 1);
1da177e4 7696 if (err) {
944d980e 7697 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
1da177e4
LT
7698 tg3_free_rings(tp);
7699 } else {
fac9b83e
DM
7700 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
7701 tp->timer_offset = HZ;
7702 else
7703 tp->timer_offset = HZ / 10;
7704
7705 BUG_ON(tp->timer_offset > HZ);
7706 tp->timer_counter = tp->timer_multiplier =
7707 (HZ / tp->timer_offset);
7708 tp->asf_counter = tp->asf_multiplier =
28fbef78 7709 ((HZ / tp->timer_offset) * 2);
1da177e4
LT
7710
7711 init_timer(&tp->timer);
7712 tp->timer.expires = jiffies + tp->timer_offset;
7713 tp->timer.data = (unsigned long) tp;
7714 tp->timer.function = tg3_timer;
1da177e4
LT
7715 }
7716
f47c11ee 7717 tg3_full_unlock(tp);
1da177e4
LT
7718
7719 if (err) {
bea3348e 7720 napi_disable(&tp->napi);
88b06bc2
MC
7721 free_irq(tp->pdev->irq, dev);
7722 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7723 pci_disable_msi(tp->pdev);
7724 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7725 }
1da177e4
LT
7726 tg3_free_consistent(tp);
7727 return err;
7728 }
7729
7938109f
MC
7730 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7731 err = tg3_test_msi(tp);
fac9b83e 7732
7938109f 7733 if (err) {
f47c11ee 7734 tg3_full_lock(tp, 0);
7938109f
MC
7735
7736 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7737 pci_disable_msi(tp->pdev);
7738 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7739 }
944d980e 7740 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7938109f
MC
7741 tg3_free_rings(tp);
7742 tg3_free_consistent(tp);
7743
f47c11ee 7744 tg3_full_unlock(tp);
7938109f 7745
bea3348e
SH
7746 napi_disable(&tp->napi);
7747
7938109f
MC
7748 return err;
7749 }
fcfa0a32
MC
7750
7751 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7752 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) {
b5d3772c 7753 u32 val = tr32(PCIE_TRANSACTION_CFG);
fcfa0a32 7754
b5d3772c
MC
7755 tw32(PCIE_TRANSACTION_CFG,
7756 val | PCIE_TRANS_CFG_1SHOT_MSI);
fcfa0a32
MC
7757 }
7758 }
7938109f
MC
7759 }
7760
f47c11ee 7761 tg3_full_lock(tp, 0);
1da177e4 7762
7938109f
MC
7763 add_timer(&tp->timer);
7764 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
1da177e4
LT
7765 tg3_enable_ints(tp);
7766
f47c11ee 7767 tg3_full_unlock(tp);
1da177e4
LT
7768
7769 netif_start_queue(dev);
7770
7771 return 0;
7772}
7773
7774#if 0
7775/*static*/ void tg3_dump_state(struct tg3 *tp)
7776{
7777 u32 val32, val32_2, val32_3, val32_4, val32_5;
7778 u16 val16;
7779 int i;
7780
7781 pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
7782 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
7783 printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
7784 val16, val32);
7785
7786 /* MAC block */
7787 printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
7788 tr32(MAC_MODE), tr32(MAC_STATUS));
7789 printk(" MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
7790 tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
7791 printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
7792 tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
7793 printk(" MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
7794 tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
7795
7796 /* Send data initiator control block */
7797 printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
7798 tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
7799 printk(" SNDDATAI_STATSCTRL[%08x]\n",
7800 tr32(SNDDATAI_STATSCTRL));
7801
7802 /* Send data completion control block */
7803 printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
7804
7805 /* Send BD ring selector block */
7806 printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
7807 tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
7808
7809 /* Send BD initiator control block */
7810 printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
7811 tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
7812
7813 /* Send BD completion control block */
7814 printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
7815
7816 /* Receive list placement control block */
7817 printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
7818 tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
7819 printk(" RCVLPC_STATSCTRL[%08x]\n",
7820 tr32(RCVLPC_STATSCTRL));
7821
7822 /* Receive data and receive BD initiator control block */
7823 printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
7824 tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
7825
7826 /* Receive data completion control block */
7827 printk("DEBUG: RCVDCC_MODE[%08x]\n",
7828 tr32(RCVDCC_MODE));
7829
7830 /* Receive BD initiator control block */
7831 printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
7832 tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
7833
7834 /* Receive BD completion control block */
7835 printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
7836 tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
7837
7838 /* Receive list selector control block */
7839 printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
7840 tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
7841
7842 /* Mbuf cluster free block */
7843 printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
7844 tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
7845
7846 /* Host coalescing control block */
7847 printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
7848 tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
7849 printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
7850 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
7851 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
7852 printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
7853 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
7854 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
7855 printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
7856 tr32(HOSTCC_STATS_BLK_NIC_ADDR));
7857 printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
7858 tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
7859
7860 /* Memory arbiter control block */
7861 printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
7862 tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
7863
7864 /* Buffer manager control block */
7865 printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
7866 tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
7867 printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
7868 tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
7869 printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
7870 "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
7871 tr32(BUFMGR_DMA_DESC_POOL_ADDR),
7872 tr32(BUFMGR_DMA_DESC_POOL_SIZE));
7873
7874 /* Read DMA control block */
7875 printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
7876 tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
7877
7878 /* Write DMA control block */
7879 printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
7880 tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
7881
7882 /* DMA completion block */
7883 printk("DEBUG: DMAC_MODE[%08x]\n",
7884 tr32(DMAC_MODE));
7885
7886 /* GRC block */
7887 printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
7888 tr32(GRC_MODE), tr32(GRC_MISC_CFG));
7889 printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
7890 tr32(GRC_LOCAL_CTRL));
7891
7892 /* TG3_BDINFOs */
7893 printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
7894 tr32(RCVDBDI_JUMBO_BD + 0x0),
7895 tr32(RCVDBDI_JUMBO_BD + 0x4),
7896 tr32(RCVDBDI_JUMBO_BD + 0x8),
7897 tr32(RCVDBDI_JUMBO_BD + 0xc));
7898 printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
7899 tr32(RCVDBDI_STD_BD + 0x0),
7900 tr32(RCVDBDI_STD_BD + 0x4),
7901 tr32(RCVDBDI_STD_BD + 0x8),
7902 tr32(RCVDBDI_STD_BD + 0xc));
7903 printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
7904 tr32(RCVDBDI_MINI_BD + 0x0),
7905 tr32(RCVDBDI_MINI_BD + 0x4),
7906 tr32(RCVDBDI_MINI_BD + 0x8),
7907 tr32(RCVDBDI_MINI_BD + 0xc));
7908
7909 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
7910 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
7911 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
7912 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
7913 printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
7914 val32, val32_2, val32_3, val32_4);
7915
7916 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
7917 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
7918 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
7919 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
7920 printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
7921 val32, val32_2, val32_3, val32_4);
7922
7923 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
7924 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
7925 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
7926 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
7927 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
7928 printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
7929 val32, val32_2, val32_3, val32_4, val32_5);
7930
7931 /* SW status block */
7932 printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
7933 tp->hw_status->status,
7934 tp->hw_status->status_tag,
7935 tp->hw_status->rx_jumbo_consumer,
7936 tp->hw_status->rx_consumer,
7937 tp->hw_status->rx_mini_consumer,
7938 tp->hw_status->idx[0].rx_producer,
7939 tp->hw_status->idx[0].tx_consumer);
7940
7941 /* SW statistics block */
7942 printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
7943 ((u32 *)tp->hw_stats)[0],
7944 ((u32 *)tp->hw_stats)[1],
7945 ((u32 *)tp->hw_stats)[2],
7946 ((u32 *)tp->hw_stats)[3]);
7947
7948 /* Mailboxes */
7949 printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
09ee929c
MC
7950 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
7951 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
7952 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
7953 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
1da177e4
LT
7954
7955 /* NIC side send descriptors. */
7956 for (i = 0; i < 6; i++) {
7957 unsigned long txd;
7958
7959 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
7960 + (i * sizeof(struct tg3_tx_buffer_desc));
7961 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
7962 i,
7963 readl(txd + 0x0), readl(txd + 0x4),
7964 readl(txd + 0x8), readl(txd + 0xc));
7965 }
7966
7967 /* NIC side RX descriptors. */
7968 for (i = 0; i < 6; i++) {
7969 unsigned long rxd;
7970
7971 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
7972 + (i * sizeof(struct tg3_rx_buffer_desc));
7973 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
7974 i,
7975 readl(rxd + 0x0), readl(rxd + 0x4),
7976 readl(rxd + 0x8), readl(rxd + 0xc));
7977 rxd += (4 * sizeof(u32));
7978 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
7979 i,
7980 readl(rxd + 0x0), readl(rxd + 0x4),
7981 readl(rxd + 0x8), readl(rxd + 0xc));
7982 }
7983
7984 for (i = 0; i < 6; i++) {
7985 unsigned long rxd;
7986
7987 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
7988 + (i * sizeof(struct tg3_rx_buffer_desc));
7989 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
7990 i,
7991 readl(rxd + 0x0), readl(rxd + 0x4),
7992 readl(rxd + 0x8), readl(rxd + 0xc));
7993 rxd += (4 * sizeof(u32));
7994 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
7995 i,
7996 readl(rxd + 0x0), readl(rxd + 0x4),
7997 readl(rxd + 0x8), readl(rxd + 0xc));
7998 }
7999}
8000#endif
8001
8002static struct net_device_stats *tg3_get_stats(struct net_device *);
8003static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
8004
8005static int tg3_close(struct net_device *dev)
8006{
8007 struct tg3 *tp = netdev_priv(dev);
8008
bea3348e 8009 napi_disable(&tp->napi);
28e53bdd 8010 cancel_work_sync(&tp->reset_task);
7faa006f 8011
1da177e4
LT
8012 netif_stop_queue(dev);
8013
8014 del_timer_sync(&tp->timer);
8015
f47c11ee 8016 tg3_full_lock(tp, 1);
1da177e4
LT
8017#if 0
8018 tg3_dump_state(tp);
8019#endif
8020
8021 tg3_disable_ints(tp);
8022
944d980e 8023 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
1da177e4 8024 tg3_free_rings(tp);
5cf64b8a 8025 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
1da177e4 8026
f47c11ee 8027 tg3_full_unlock(tp);
1da177e4 8028
88b06bc2
MC
8029 free_irq(tp->pdev->irq, dev);
8030 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8031 pci_disable_msi(tp->pdev);
8032 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8033 }
1da177e4
LT
8034
8035 memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
8036 sizeof(tp->net_stats_prev));
8037 memcpy(&tp->estats_prev, tg3_get_estats(tp),
8038 sizeof(tp->estats_prev));
8039
8040 tg3_free_consistent(tp);
8041
bc1c7567
MC
8042 tg3_set_power_state(tp, PCI_D3hot);
8043
8044 netif_carrier_off(tp->dev);
8045
1da177e4
LT
8046 return 0;
8047}
8048
8049static inline unsigned long get_stat64(tg3_stat64_t *val)
8050{
8051 unsigned long ret;
8052
8053#if (BITS_PER_LONG == 32)
8054 ret = val->low;
8055#else
8056 ret = ((u64)val->high << 32) | ((u64)val->low);
8057#endif
8058 return ret;
8059}
8060
8061static unsigned long calc_crc_errors(struct tg3 *tp)
8062{
8063 struct tg3_hw_stats *hw_stats = tp->hw_stats;
8064
8065 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
8066 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
8067 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1da177e4
LT
8068 u32 val;
8069
f47c11ee 8070 spin_lock_bh(&tp->lock);
569a5df8
MC
8071 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
8072 tg3_writephy(tp, MII_TG3_TEST1,
8073 val | MII_TG3_TEST1_CRC_EN);
1da177e4
LT
8074 tg3_readphy(tp, 0x14, &val);
8075 } else
8076 val = 0;
f47c11ee 8077 spin_unlock_bh(&tp->lock);
1da177e4
LT
8078
8079 tp->phy_crc_errors += val;
8080
8081 return tp->phy_crc_errors;
8082 }
8083
8084 return get_stat64(&hw_stats->rx_fcs_errors);
8085}
8086
8087#define ESTAT_ADD(member) \
8088 estats->member = old_estats->member + \
8089 get_stat64(&hw_stats->member)
8090
8091static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
8092{
8093 struct tg3_ethtool_stats *estats = &tp->estats;
8094 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
8095 struct tg3_hw_stats *hw_stats = tp->hw_stats;
8096
8097 if (!hw_stats)
8098 return old_estats;
8099
8100 ESTAT_ADD(rx_octets);
8101 ESTAT_ADD(rx_fragments);
8102 ESTAT_ADD(rx_ucast_packets);
8103 ESTAT_ADD(rx_mcast_packets);
8104 ESTAT_ADD(rx_bcast_packets);
8105 ESTAT_ADD(rx_fcs_errors);
8106 ESTAT_ADD(rx_align_errors);
8107 ESTAT_ADD(rx_xon_pause_rcvd);
8108 ESTAT_ADD(rx_xoff_pause_rcvd);
8109 ESTAT_ADD(rx_mac_ctrl_rcvd);
8110 ESTAT_ADD(rx_xoff_entered);
8111 ESTAT_ADD(rx_frame_too_long_errors);
8112 ESTAT_ADD(rx_jabbers);
8113 ESTAT_ADD(rx_undersize_packets);
8114 ESTAT_ADD(rx_in_length_errors);
8115 ESTAT_ADD(rx_out_length_errors);
8116 ESTAT_ADD(rx_64_or_less_octet_packets);
8117 ESTAT_ADD(rx_65_to_127_octet_packets);
8118 ESTAT_ADD(rx_128_to_255_octet_packets);
8119 ESTAT_ADD(rx_256_to_511_octet_packets);
8120 ESTAT_ADD(rx_512_to_1023_octet_packets);
8121 ESTAT_ADD(rx_1024_to_1522_octet_packets);
8122 ESTAT_ADD(rx_1523_to_2047_octet_packets);
8123 ESTAT_ADD(rx_2048_to_4095_octet_packets);
8124 ESTAT_ADD(rx_4096_to_8191_octet_packets);
8125 ESTAT_ADD(rx_8192_to_9022_octet_packets);
8126
8127 ESTAT_ADD(tx_octets);
8128 ESTAT_ADD(tx_collisions);
8129 ESTAT_ADD(tx_xon_sent);
8130 ESTAT_ADD(tx_xoff_sent);
8131 ESTAT_ADD(tx_flow_control);
8132 ESTAT_ADD(tx_mac_errors);
8133 ESTAT_ADD(tx_single_collisions);
8134 ESTAT_ADD(tx_mult_collisions);
8135 ESTAT_ADD(tx_deferred);
8136 ESTAT_ADD(tx_excessive_collisions);
8137 ESTAT_ADD(tx_late_collisions);
8138 ESTAT_ADD(tx_collide_2times);
8139 ESTAT_ADD(tx_collide_3times);
8140 ESTAT_ADD(tx_collide_4times);
8141 ESTAT_ADD(tx_collide_5times);
8142 ESTAT_ADD(tx_collide_6times);
8143 ESTAT_ADD(tx_collide_7times);
8144 ESTAT_ADD(tx_collide_8times);
8145 ESTAT_ADD(tx_collide_9times);
8146 ESTAT_ADD(tx_collide_10times);
8147 ESTAT_ADD(tx_collide_11times);
8148 ESTAT_ADD(tx_collide_12times);
8149 ESTAT_ADD(tx_collide_13times);
8150 ESTAT_ADD(tx_collide_14times);
8151 ESTAT_ADD(tx_collide_15times);
8152 ESTAT_ADD(tx_ucast_packets);
8153 ESTAT_ADD(tx_mcast_packets);
8154 ESTAT_ADD(tx_bcast_packets);
8155 ESTAT_ADD(tx_carrier_sense_errors);
8156 ESTAT_ADD(tx_discards);
8157 ESTAT_ADD(tx_errors);
8158
8159 ESTAT_ADD(dma_writeq_full);
8160 ESTAT_ADD(dma_write_prioq_full);
8161 ESTAT_ADD(rxbds_empty);
8162 ESTAT_ADD(rx_discards);
8163 ESTAT_ADD(rx_errors);
8164 ESTAT_ADD(rx_threshold_hit);
8165
8166 ESTAT_ADD(dma_readq_full);
8167 ESTAT_ADD(dma_read_prioq_full);
8168 ESTAT_ADD(tx_comp_queue_full);
8169
8170 ESTAT_ADD(ring_set_send_prod_index);
8171 ESTAT_ADD(ring_status_update);
8172 ESTAT_ADD(nic_irqs);
8173 ESTAT_ADD(nic_avoided_irqs);
8174 ESTAT_ADD(nic_tx_threshold_hit);
8175
8176 return estats;
8177}
8178
8179static struct net_device_stats *tg3_get_stats(struct net_device *dev)
8180{
8181 struct tg3 *tp = netdev_priv(dev);
8182 struct net_device_stats *stats = &tp->net_stats;
8183 struct net_device_stats *old_stats = &tp->net_stats_prev;
8184 struct tg3_hw_stats *hw_stats = tp->hw_stats;
8185
8186 if (!hw_stats)
8187 return old_stats;
8188
8189 stats->rx_packets = old_stats->rx_packets +
8190 get_stat64(&hw_stats->rx_ucast_packets) +
8191 get_stat64(&hw_stats->rx_mcast_packets) +
8192 get_stat64(&hw_stats->rx_bcast_packets);
6aa20a22 8193
1da177e4
LT
8194 stats->tx_packets = old_stats->tx_packets +
8195 get_stat64(&hw_stats->tx_ucast_packets) +
8196 get_stat64(&hw_stats->tx_mcast_packets) +
8197 get_stat64(&hw_stats->tx_bcast_packets);
8198
8199 stats->rx_bytes = old_stats->rx_bytes +
8200 get_stat64(&hw_stats->rx_octets);
8201 stats->tx_bytes = old_stats->tx_bytes +
8202 get_stat64(&hw_stats->tx_octets);
8203
8204 stats->rx_errors = old_stats->rx_errors +
4f63b877 8205 get_stat64(&hw_stats->rx_errors);
1da177e4
LT
8206 stats->tx_errors = old_stats->tx_errors +
8207 get_stat64(&hw_stats->tx_errors) +
8208 get_stat64(&hw_stats->tx_mac_errors) +
8209 get_stat64(&hw_stats->tx_carrier_sense_errors) +
8210 get_stat64(&hw_stats->tx_discards);
8211
8212 stats->multicast = old_stats->multicast +
8213 get_stat64(&hw_stats->rx_mcast_packets);
8214 stats->collisions = old_stats->collisions +
8215 get_stat64(&hw_stats->tx_collisions);
8216
8217 stats->rx_length_errors = old_stats->rx_length_errors +
8218 get_stat64(&hw_stats->rx_frame_too_long_errors) +
8219 get_stat64(&hw_stats->rx_undersize_packets);
8220
8221 stats->rx_over_errors = old_stats->rx_over_errors +
8222 get_stat64(&hw_stats->rxbds_empty);
8223 stats->rx_frame_errors = old_stats->rx_frame_errors +
8224 get_stat64(&hw_stats->rx_align_errors);
8225 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
8226 get_stat64(&hw_stats->tx_discards);
8227 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
8228 get_stat64(&hw_stats->tx_carrier_sense_errors);
8229
8230 stats->rx_crc_errors = old_stats->rx_crc_errors +
8231 calc_crc_errors(tp);
8232
4f63b877
JL
8233 stats->rx_missed_errors = old_stats->rx_missed_errors +
8234 get_stat64(&hw_stats->rx_discards);
8235
1da177e4
LT
8236 return stats;
8237}
8238
8239static inline u32 calc_crc(unsigned char *buf, int len)
8240{
8241 u32 reg;
8242 u32 tmp;
8243 int j, k;
8244
8245 reg = 0xffffffff;
8246
8247 for (j = 0; j < len; j++) {
8248 reg ^= buf[j];
8249
8250 for (k = 0; k < 8; k++) {
8251 tmp = reg & 0x01;
8252
8253 reg >>= 1;
8254
8255 if (tmp) {
8256 reg ^= 0xedb88320;
8257 }
8258 }
8259 }
8260
8261 return ~reg;
8262}
8263
8264static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
8265{
8266 /* accept or reject all multicast frames */
8267 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
8268 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
8269 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
8270 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
8271}
8272
8273static void __tg3_set_rx_mode(struct net_device *dev)
8274{
8275 struct tg3 *tp = netdev_priv(dev);
8276 u32 rx_mode;
8277
8278 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
8279 RX_MODE_KEEP_VLAN_TAG);
8280
8281 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8282 * flag clear.
8283 */
8284#if TG3_VLAN_TAG_USED
8285 if (!tp->vlgrp &&
8286 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
8287 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8288#else
8289 /* By definition, VLAN is disabled always in this
8290 * case.
8291 */
8292 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
8293 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8294#endif
8295
8296 if (dev->flags & IFF_PROMISC) {
8297 /* Promiscuous mode. */
8298 rx_mode |= RX_MODE_PROMISC;
8299 } else if (dev->flags & IFF_ALLMULTI) {
8300 /* Accept all multicast. */
8301 tg3_set_multi (tp, 1);
8302 } else if (dev->mc_count < 1) {
8303 /* Reject all multicast. */
8304 tg3_set_multi (tp, 0);
8305 } else {
8306 /* Accept one or more multicast(s). */
8307 struct dev_mc_list *mclist;
8308 unsigned int i;
8309 u32 mc_filter[4] = { 0, };
8310 u32 regidx;
8311 u32 bit;
8312 u32 crc;
8313
8314 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
8315 i++, mclist = mclist->next) {
8316
8317 crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
8318 bit = ~crc & 0x7f;
8319 regidx = (bit & 0x60) >> 5;
8320 bit &= 0x1f;
8321 mc_filter[regidx] |= (1 << bit);
8322 }
8323
8324 tw32(MAC_HASH_REG_0, mc_filter[0]);
8325 tw32(MAC_HASH_REG_1, mc_filter[1]);
8326 tw32(MAC_HASH_REG_2, mc_filter[2]);
8327 tw32(MAC_HASH_REG_3, mc_filter[3]);
8328 }
8329
8330 if (rx_mode != tp->rx_mode) {
8331 tp->rx_mode = rx_mode;
8332 tw32_f(MAC_RX_MODE, rx_mode);
8333 udelay(10);
8334 }
8335}
8336
8337static void tg3_set_rx_mode(struct net_device *dev)
8338{
8339 struct tg3 *tp = netdev_priv(dev);
8340
e75f7c90
MC
8341 if (!netif_running(dev))
8342 return;
8343
f47c11ee 8344 tg3_full_lock(tp, 0);
1da177e4 8345 __tg3_set_rx_mode(dev);
f47c11ee 8346 tg3_full_unlock(tp);
1da177e4
LT
8347}
8348
8349#define TG3_REGDUMP_LEN (32 * 1024)
8350
8351static int tg3_get_regs_len(struct net_device *dev)
8352{
8353 return TG3_REGDUMP_LEN;
8354}
8355
8356static void tg3_get_regs(struct net_device *dev,
8357 struct ethtool_regs *regs, void *_p)
8358{
8359 u32 *p = _p;
8360 struct tg3 *tp = netdev_priv(dev);
8361 u8 *orig_p = _p;
8362 int i;
8363
8364 regs->version = 0;
8365
8366 memset(p, 0, TG3_REGDUMP_LEN);
8367
bc1c7567
MC
8368 if (tp->link_config.phy_is_low_power)
8369 return;
8370
f47c11ee 8371 tg3_full_lock(tp, 0);
1da177e4
LT
8372
8373#define __GET_REG32(reg) (*(p)++ = tr32(reg))
8374#define GET_REG32_LOOP(base,len) \
8375do { p = (u32 *)(orig_p + (base)); \
8376 for (i = 0; i < len; i += 4) \
8377 __GET_REG32((base) + i); \
8378} while (0)
8379#define GET_REG32_1(reg) \
8380do { p = (u32 *)(orig_p + (reg)); \
8381 __GET_REG32((reg)); \
8382} while (0)
8383
8384 GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
8385 GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
8386 GET_REG32_LOOP(MAC_MODE, 0x4f0);
8387 GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
8388 GET_REG32_1(SNDDATAC_MODE);
8389 GET_REG32_LOOP(SNDBDS_MODE, 0x80);
8390 GET_REG32_LOOP(SNDBDI_MODE, 0x48);
8391 GET_REG32_1(SNDBDC_MODE);
8392 GET_REG32_LOOP(RCVLPC_MODE, 0x20);
8393 GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
8394 GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
8395 GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
8396 GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
8397 GET_REG32_1(RCVDCC_MODE);
8398 GET_REG32_LOOP(RCVBDI_MODE, 0x20);
8399 GET_REG32_LOOP(RCVCC_MODE, 0x14);
8400 GET_REG32_LOOP(RCVLSC_MODE, 0x08);
8401 GET_REG32_1(MBFREE_MODE);
8402 GET_REG32_LOOP(HOSTCC_MODE, 0x100);
8403 GET_REG32_LOOP(MEMARB_MODE, 0x10);
8404 GET_REG32_LOOP(BUFMGR_MODE, 0x58);
8405 GET_REG32_LOOP(RDMAC_MODE, 0x08);
8406 GET_REG32_LOOP(WDMAC_MODE, 0x08);
091465d7
CE
8407 GET_REG32_1(RX_CPU_MODE);
8408 GET_REG32_1(RX_CPU_STATE);
8409 GET_REG32_1(RX_CPU_PGMCTR);
8410 GET_REG32_1(RX_CPU_HWBKPT);
8411 GET_REG32_1(TX_CPU_MODE);
8412 GET_REG32_1(TX_CPU_STATE);
8413 GET_REG32_1(TX_CPU_PGMCTR);
1da177e4
LT
8414 GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
8415 GET_REG32_LOOP(FTQ_RESET, 0x120);
8416 GET_REG32_LOOP(MSGINT_MODE, 0x0c);
8417 GET_REG32_1(DMAC_MODE);
8418 GET_REG32_LOOP(GRC_MODE, 0x4c);
8419 if (tp->tg3_flags & TG3_FLAG_NVRAM)
8420 GET_REG32_LOOP(NVRAM_CMD, 0x24);
8421
8422#undef __GET_REG32
8423#undef GET_REG32_LOOP
8424#undef GET_REG32_1
8425
f47c11ee 8426 tg3_full_unlock(tp);
1da177e4
LT
8427}
8428
8429static int tg3_get_eeprom_len(struct net_device *dev)
8430{
8431 struct tg3 *tp = netdev_priv(dev);
8432
8433 return tp->nvram_size;
8434}
8435
8436static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
b9fc7dc5 8437static int tg3_nvram_read_le(struct tg3 *tp, u32 offset, __le32 *val);
1820180b 8438static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val);
1da177e4
LT
8439
8440static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
8441{
8442 struct tg3 *tp = netdev_priv(dev);
8443 int ret;
8444 u8 *pd;
b9fc7dc5
AV
8445 u32 i, offset, len, b_offset, b_count;
8446 __le32 val;
1da177e4 8447
bc1c7567
MC
8448 if (tp->link_config.phy_is_low_power)
8449 return -EAGAIN;
8450
1da177e4
LT
8451 offset = eeprom->offset;
8452 len = eeprom->len;
8453 eeprom->len = 0;
8454
8455 eeprom->magic = TG3_EEPROM_MAGIC;
8456
8457 if (offset & 3) {
8458 /* adjustments to start on required 4 byte boundary */
8459 b_offset = offset & 3;
8460 b_count = 4 - b_offset;
8461 if (b_count > len) {
8462 /* i.e. offset=1 len=2 */
8463 b_count = len;
8464 }
b9fc7dc5 8465 ret = tg3_nvram_read_le(tp, offset-b_offset, &val);
1da177e4
LT
8466 if (ret)
8467 return ret;
1da177e4
LT
8468 memcpy(data, ((char*)&val) + b_offset, b_count);
8469 len -= b_count;
8470 offset += b_count;
8471 eeprom->len += b_count;
8472 }
8473
8474 /* read bytes upto the last 4 byte boundary */
8475 pd = &data[eeprom->len];
8476 for (i = 0; i < (len - (len & 3)); i += 4) {
b9fc7dc5 8477 ret = tg3_nvram_read_le(tp, offset + i, &val);
1da177e4
LT
8478 if (ret) {
8479 eeprom->len += i;
8480 return ret;
8481 }
1da177e4
LT
8482 memcpy(pd + i, &val, 4);
8483 }
8484 eeprom->len += i;
8485
8486 if (len & 3) {
8487 /* read last bytes not ending on 4 byte boundary */
8488 pd = &data[eeprom->len];
8489 b_count = len & 3;
8490 b_offset = offset + len - b_count;
b9fc7dc5 8491 ret = tg3_nvram_read_le(tp, b_offset, &val);
1da177e4
LT
8492 if (ret)
8493 return ret;
b9fc7dc5 8494 memcpy(pd, &val, b_count);
1da177e4
LT
8495 eeprom->len += b_count;
8496 }
8497 return 0;
8498}
8499
6aa20a22 8500static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
1da177e4
LT
8501
8502static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
8503{
8504 struct tg3 *tp = netdev_priv(dev);
8505 int ret;
b9fc7dc5 8506 u32 offset, len, b_offset, odd_len;
1da177e4 8507 u8 *buf;
b9fc7dc5 8508 __le32 start, end;
1da177e4 8509
bc1c7567
MC
8510 if (tp->link_config.phy_is_low_power)
8511 return -EAGAIN;
8512
1da177e4
LT
8513 if (eeprom->magic != TG3_EEPROM_MAGIC)
8514 return -EINVAL;
8515
8516 offset = eeprom->offset;
8517 len = eeprom->len;
8518
8519 if ((b_offset = (offset & 3))) {
8520 /* adjustments to start on required 4 byte boundary */
b9fc7dc5 8521 ret = tg3_nvram_read_le(tp, offset-b_offset, &start);
1da177e4
LT
8522 if (ret)
8523 return ret;
1da177e4
LT
8524 len += b_offset;
8525 offset &= ~3;
1c8594b4
MC
8526 if (len < 4)
8527 len = 4;
1da177e4
LT
8528 }
8529
8530 odd_len = 0;
1c8594b4 8531 if (len & 3) {
1da177e4
LT
8532 /* adjustments to end on required 4 byte boundary */
8533 odd_len = 1;
8534 len = (len + 3) & ~3;
b9fc7dc5 8535 ret = tg3_nvram_read_le(tp, offset+len-4, &end);
1da177e4
LT
8536 if (ret)
8537 return ret;
1da177e4
LT
8538 }
8539
8540 buf = data;
8541 if (b_offset || odd_len) {
8542 buf = kmalloc(len, GFP_KERNEL);
ab0049b4 8543 if (!buf)
1da177e4
LT
8544 return -ENOMEM;
8545 if (b_offset)
8546 memcpy(buf, &start, 4);
8547 if (odd_len)
8548 memcpy(buf+len-4, &end, 4);
8549 memcpy(buf + b_offset, data, eeprom->len);
8550 }
8551
8552 ret = tg3_nvram_write_block(tp, offset, len, buf);
8553
8554 if (buf != data)
8555 kfree(buf);
8556
8557 return ret;
8558}
8559
8560static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8561{
8562 struct tg3 *tp = netdev_priv(dev);
6aa20a22 8563
1da177e4
LT
8564 cmd->supported = (SUPPORTED_Autoneg);
8565
8566 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
8567 cmd->supported |= (SUPPORTED_1000baseT_Half |
8568 SUPPORTED_1000baseT_Full);
8569
ef348144 8570 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
1da177e4
LT
8571 cmd->supported |= (SUPPORTED_100baseT_Half |
8572 SUPPORTED_100baseT_Full |
8573 SUPPORTED_10baseT_Half |
8574 SUPPORTED_10baseT_Full |
3bebab59 8575 SUPPORTED_TP);
ef348144
KK
8576 cmd->port = PORT_TP;
8577 } else {
1da177e4 8578 cmd->supported |= SUPPORTED_FIBRE;
ef348144
KK
8579 cmd->port = PORT_FIBRE;
8580 }
6aa20a22 8581
1da177e4
LT
8582 cmd->advertising = tp->link_config.advertising;
8583 if (netif_running(dev)) {
8584 cmd->speed = tp->link_config.active_speed;
8585 cmd->duplex = tp->link_config.active_duplex;
8586 }
1da177e4
LT
8587 cmd->phy_address = PHY_ADDR;
8588 cmd->transceiver = 0;
8589 cmd->autoneg = tp->link_config.autoneg;
8590 cmd->maxtxpkt = 0;
8591 cmd->maxrxpkt = 0;
8592 return 0;
8593}
6aa20a22 8594
1da177e4
LT
8595static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8596{
8597 struct tg3 *tp = netdev_priv(dev);
6aa20a22
JG
8598
8599 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
1da177e4
LT
8600 /* These are the only valid advertisement bits allowed. */
8601 if (cmd->autoneg == AUTONEG_ENABLE &&
8602 (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
8603 ADVERTISED_1000baseT_Full |
8604 ADVERTISED_Autoneg |
8605 ADVERTISED_FIBRE)))
8606 return -EINVAL;
37ff238d
MC
8607 /* Fiber can only do SPEED_1000. */
8608 else if ((cmd->autoneg != AUTONEG_ENABLE) &&
8609 (cmd->speed != SPEED_1000))
8610 return -EINVAL;
8611 /* Copper cannot force SPEED_1000. */
8612 } else if ((cmd->autoneg != AUTONEG_ENABLE) &&
8613 (cmd->speed == SPEED_1000))
8614 return -EINVAL;
8615 else if ((cmd->speed == SPEED_1000) &&
0ba11fb3 8616 (tp->tg3_flags & TG3_FLAG_10_100_ONLY))
37ff238d 8617 return -EINVAL;
1da177e4 8618
f47c11ee 8619 tg3_full_lock(tp, 0);
1da177e4
LT
8620
8621 tp->link_config.autoneg = cmd->autoneg;
8622 if (cmd->autoneg == AUTONEG_ENABLE) {
405d8e5c
AG
8623 tp->link_config.advertising = (cmd->advertising |
8624 ADVERTISED_Autoneg);
1da177e4
LT
8625 tp->link_config.speed = SPEED_INVALID;
8626 tp->link_config.duplex = DUPLEX_INVALID;
8627 } else {
8628 tp->link_config.advertising = 0;
8629 tp->link_config.speed = cmd->speed;
8630 tp->link_config.duplex = cmd->duplex;
8631 }
6aa20a22 8632
24fcad6b
MC
8633 tp->link_config.orig_speed = tp->link_config.speed;
8634 tp->link_config.orig_duplex = tp->link_config.duplex;
8635 tp->link_config.orig_autoneg = tp->link_config.autoneg;
8636
1da177e4
LT
8637 if (netif_running(dev))
8638 tg3_setup_phy(tp, 1);
8639
f47c11ee 8640 tg3_full_unlock(tp);
6aa20a22 8641
1da177e4
LT
8642 return 0;
8643}
6aa20a22 8644
1da177e4
LT
8645static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
8646{
8647 struct tg3 *tp = netdev_priv(dev);
6aa20a22 8648
1da177e4
LT
8649 strcpy(info->driver, DRV_MODULE_NAME);
8650 strcpy(info->version, DRV_MODULE_VERSION);
c4e6575c 8651 strcpy(info->fw_version, tp->fw_ver);
1da177e4
LT
8652 strcpy(info->bus_info, pci_name(tp->pdev));
8653}
6aa20a22 8654
1da177e4
LT
8655static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8656{
8657 struct tg3 *tp = netdev_priv(dev);
6aa20a22 8658
a85feb8c
GZ
8659 if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
8660 wol->supported = WAKE_MAGIC;
8661 else
8662 wol->supported = 0;
1da177e4
LT
8663 wol->wolopts = 0;
8664 if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
8665 wol->wolopts = WAKE_MAGIC;
8666 memset(&wol->sopass, 0, sizeof(wol->sopass));
8667}
6aa20a22 8668
1da177e4
LT
8669static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8670{
8671 struct tg3 *tp = netdev_priv(dev);
6aa20a22 8672
1da177e4
LT
8673 if (wol->wolopts & ~WAKE_MAGIC)
8674 return -EINVAL;
8675 if ((wol->wolopts & WAKE_MAGIC) &&
a85feb8c 8676 !(tp->tg3_flags & TG3_FLAG_WOL_CAP))
1da177e4 8677 return -EINVAL;
6aa20a22 8678
f47c11ee 8679 spin_lock_bh(&tp->lock);
1da177e4
LT
8680 if (wol->wolopts & WAKE_MAGIC)
8681 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
8682 else
8683 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
f47c11ee 8684 spin_unlock_bh(&tp->lock);
6aa20a22 8685
1da177e4
LT
8686 return 0;
8687}
6aa20a22 8688
1da177e4
LT
8689static u32 tg3_get_msglevel(struct net_device *dev)
8690{
8691 struct tg3 *tp = netdev_priv(dev);
8692 return tp->msg_enable;
8693}
6aa20a22 8694
1da177e4
LT
8695static void tg3_set_msglevel(struct net_device *dev, u32 value)
8696{
8697 struct tg3 *tp = netdev_priv(dev);
8698 tp->msg_enable = value;
8699}
6aa20a22 8700
1da177e4
LT
8701static int tg3_set_tso(struct net_device *dev, u32 value)
8702{
8703 struct tg3 *tp = netdev_priv(dev);
8704
8705 if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
8706 if (value)
8707 return -EINVAL;
8708 return 0;
8709 }
b5d3772c
MC
8710 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
8711 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)) {
9936bcf6 8712 if (value) {
b0026624 8713 dev->features |= NETIF_F_TSO6;
9936bcf6
MC
8714 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8715 dev->features |= NETIF_F_TSO_ECN;
8716 } else
8717 dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN);
b0026624 8718 }
1da177e4
LT
8719 return ethtool_op_set_tso(dev, value);
8720}
6aa20a22 8721
1da177e4
LT
8722static int tg3_nway_reset(struct net_device *dev)
8723{
8724 struct tg3 *tp = netdev_priv(dev);
8725 u32 bmcr;
8726 int r;
6aa20a22 8727
1da177e4
LT
8728 if (!netif_running(dev))
8729 return -EAGAIN;
8730
c94e3941
MC
8731 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8732 return -EINVAL;
8733
f47c11ee 8734 spin_lock_bh(&tp->lock);
1da177e4
LT
8735 r = -EINVAL;
8736 tg3_readphy(tp, MII_BMCR, &bmcr);
8737 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
c94e3941
MC
8738 ((bmcr & BMCR_ANENABLE) ||
8739 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
8740 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
8741 BMCR_ANENABLE);
1da177e4
LT
8742 r = 0;
8743 }
f47c11ee 8744 spin_unlock_bh(&tp->lock);
6aa20a22 8745
1da177e4
LT
8746 return r;
8747}
6aa20a22 8748
1da177e4
LT
8749static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
8750{
8751 struct tg3 *tp = netdev_priv(dev);
6aa20a22 8752
1da177e4
LT
8753 ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
8754 ering->rx_mini_max_pending = 0;
4f81c32b
MC
8755 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
8756 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
8757 else
8758 ering->rx_jumbo_max_pending = 0;
8759
8760 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
1da177e4
LT
8761
8762 ering->rx_pending = tp->rx_pending;
8763 ering->rx_mini_pending = 0;
4f81c32b
MC
8764 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
8765 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
8766 else
8767 ering->rx_jumbo_pending = 0;
8768
1da177e4
LT
8769 ering->tx_pending = tp->tx_pending;
8770}
6aa20a22 8771
1da177e4
LT
8772static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
8773{
8774 struct tg3 *tp = netdev_priv(dev);
b9ec6c1b 8775 int irq_sync = 0, err = 0;
6aa20a22 8776
1da177e4
LT
8777 if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
8778 (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
bc3a9254
MC
8779 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
8780 (ering->tx_pending <= MAX_SKB_FRAGS) ||
7f62ad5d 8781 ((tp->tg3_flags2 & TG3_FLG2_TSO_BUG) &&
bc3a9254 8782 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
1da177e4 8783 return -EINVAL;
6aa20a22 8784
bbe832c0 8785 if (netif_running(dev)) {
1da177e4 8786 tg3_netif_stop(tp);
bbe832c0
MC
8787 irq_sync = 1;
8788 }
1da177e4 8789
bbe832c0 8790 tg3_full_lock(tp, irq_sync);
6aa20a22 8791
1da177e4
LT
8792 tp->rx_pending = ering->rx_pending;
8793
8794 if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
8795 tp->rx_pending > 63)
8796 tp->rx_pending = 63;
8797 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
8798 tp->tx_pending = ering->tx_pending;
8799
8800 if (netif_running(dev)) {
944d980e 8801 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
b9ec6c1b
MC
8802 err = tg3_restart_hw(tp, 1);
8803 if (!err)
8804 tg3_netif_start(tp);
1da177e4
LT
8805 }
8806
f47c11ee 8807 tg3_full_unlock(tp);
6aa20a22 8808
b9ec6c1b 8809 return err;
1da177e4 8810}
6aa20a22 8811
1da177e4
LT
8812static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
8813{
8814 struct tg3 *tp = netdev_priv(dev);
6aa20a22 8815
1da177e4 8816 epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
8d018621
MC
8817
8818 if (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_RX)
8819 epause->rx_pause = 1;
8820 else
8821 epause->rx_pause = 0;
8822
8823 if (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_TX)
8824 epause->tx_pause = 1;
8825 else
8826 epause->tx_pause = 0;
1da177e4 8827}
6aa20a22 8828
1da177e4
LT
8829static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
8830{
8831 struct tg3 *tp = netdev_priv(dev);
b9ec6c1b 8832 int irq_sync = 0, err = 0;
6aa20a22 8833
bbe832c0 8834 if (netif_running(dev)) {
1da177e4 8835 tg3_netif_stop(tp);
bbe832c0
MC
8836 irq_sync = 1;
8837 }
1da177e4 8838
bbe832c0 8839 tg3_full_lock(tp, irq_sync);
f47c11ee 8840
1da177e4
LT
8841 if (epause->autoneg)
8842 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
8843 else
8844 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
8845 if (epause->rx_pause)
8d018621 8846 tp->link_config.flowctrl |= TG3_FLOW_CTRL_RX;
1da177e4 8847 else
8d018621 8848 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_RX;
1da177e4 8849 if (epause->tx_pause)
8d018621 8850 tp->link_config.flowctrl |= TG3_FLOW_CTRL_TX;
1da177e4 8851 else
8d018621 8852 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_TX;
1da177e4
LT
8853
8854 if (netif_running(dev)) {
944d980e 8855 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
b9ec6c1b
MC
8856 err = tg3_restart_hw(tp, 1);
8857 if (!err)
8858 tg3_netif_start(tp);
1da177e4 8859 }
f47c11ee
DM
8860
8861 tg3_full_unlock(tp);
6aa20a22 8862
b9ec6c1b 8863 return err;
1da177e4 8864}
6aa20a22 8865
1da177e4
LT
8866static u32 tg3_get_rx_csum(struct net_device *dev)
8867{
8868 struct tg3 *tp = netdev_priv(dev);
8869 return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
8870}
6aa20a22 8871
1da177e4
LT
8872static int tg3_set_rx_csum(struct net_device *dev, u32 data)
8873{
8874 struct tg3 *tp = netdev_priv(dev);
6aa20a22 8875
1da177e4
LT
8876 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8877 if (data != 0)
8878 return -EINVAL;
8879 return 0;
8880 }
6aa20a22 8881
f47c11ee 8882 spin_lock_bh(&tp->lock);
1da177e4
LT
8883 if (data)
8884 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
8885 else
8886 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
f47c11ee 8887 spin_unlock_bh(&tp->lock);
6aa20a22 8888
1da177e4
LT
8889 return 0;
8890}
6aa20a22 8891
1da177e4
LT
8892static int tg3_set_tx_csum(struct net_device *dev, u32 data)
8893{
8894 struct tg3 *tp = netdev_priv(dev);
6aa20a22 8895
1da177e4
LT
8896 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8897 if (data != 0)
8898 return -EINVAL;
8899 return 0;
8900 }
6aa20a22 8901
af36e6b6 8902 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
d30cdd28 8903 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
9936bcf6
MC
8904 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8905 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
6460d948 8906 ethtool_op_set_tx_ipv6_csum(dev, data);
1da177e4 8907 else
9c27dbdf 8908 ethtool_op_set_tx_csum(dev, data);
1da177e4
LT
8909
8910 return 0;
8911}
8912
b9f2c044 8913static int tg3_get_sset_count (struct net_device *dev, int sset)
1da177e4 8914{
b9f2c044
JG
8915 switch (sset) {
8916 case ETH_SS_TEST:
8917 return TG3_NUM_TEST;
8918 case ETH_SS_STATS:
8919 return TG3_NUM_STATS;
8920 default:
8921 return -EOPNOTSUPP;
8922 }
4cafd3f5
MC
8923}
8924
1da177e4
LT
8925static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
8926{
8927 switch (stringset) {
8928 case ETH_SS_STATS:
8929 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
8930 break;
4cafd3f5
MC
8931 case ETH_SS_TEST:
8932 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
8933 break;
1da177e4
LT
8934 default:
8935 WARN_ON(1); /* we need a WARN() */
8936 break;
8937 }
8938}
8939
4009a93d
MC
8940static int tg3_phys_id(struct net_device *dev, u32 data)
8941{
8942 struct tg3 *tp = netdev_priv(dev);
8943 int i;
8944
8945 if (!netif_running(tp->dev))
8946 return -EAGAIN;
8947
8948 if (data == 0)
759afc31 8949 data = UINT_MAX / 2;
4009a93d
MC
8950
8951 for (i = 0; i < (data * 2); i++) {
8952 if ((i % 2) == 0)
8953 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8954 LED_CTRL_1000MBPS_ON |
8955 LED_CTRL_100MBPS_ON |
8956 LED_CTRL_10MBPS_ON |
8957 LED_CTRL_TRAFFIC_OVERRIDE |
8958 LED_CTRL_TRAFFIC_BLINK |
8959 LED_CTRL_TRAFFIC_LED);
6aa20a22 8960
4009a93d
MC
8961 else
8962 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8963 LED_CTRL_TRAFFIC_OVERRIDE);
8964
8965 if (msleep_interruptible(500))
8966 break;
8967 }
8968 tw32(MAC_LED_CTRL, tp->led_ctrl);
8969 return 0;
8970}
8971
1da177e4
LT
8972static void tg3_get_ethtool_stats (struct net_device *dev,
8973 struct ethtool_stats *estats, u64 *tmp_stats)
8974{
8975 struct tg3 *tp = netdev_priv(dev);
8976 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
8977}
8978
566f86ad 8979#define NVRAM_TEST_SIZE 0x100
a5767dec
MC
8980#define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
8981#define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
8982#define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
b16250e3
MC
8983#define NVRAM_SELFBOOT_HW_SIZE 0x20
8984#define NVRAM_SELFBOOT_DATA_SIZE 0x1c
566f86ad
MC
8985
8986static int tg3_test_nvram(struct tg3 *tp)
8987{
b9fc7dc5
AV
8988 u32 csum, magic;
8989 __le32 *buf;
ab0049b4 8990 int i, j, k, err = 0, size;
566f86ad 8991
1820180b 8992 if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
1b27777a
MC
8993 return -EIO;
8994
1b27777a
MC
8995 if (magic == TG3_EEPROM_MAGIC)
8996 size = NVRAM_TEST_SIZE;
b16250e3 8997 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
a5767dec
MC
8998 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
8999 TG3_EEPROM_SB_FORMAT_1) {
9000 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
9001 case TG3_EEPROM_SB_REVISION_0:
9002 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
9003 break;
9004 case TG3_EEPROM_SB_REVISION_2:
9005 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
9006 break;
9007 case TG3_EEPROM_SB_REVISION_3:
9008 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
9009 break;
9010 default:
9011 return 0;
9012 }
9013 } else
1b27777a 9014 return 0;
b16250e3
MC
9015 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
9016 size = NVRAM_SELFBOOT_HW_SIZE;
9017 else
1b27777a
MC
9018 return -EIO;
9019
9020 buf = kmalloc(size, GFP_KERNEL);
566f86ad
MC
9021 if (buf == NULL)
9022 return -ENOMEM;
9023
1b27777a
MC
9024 err = -EIO;
9025 for (i = 0, j = 0; i < size; i += 4, j++) {
b9fc7dc5 9026 if ((err = tg3_nvram_read_le(tp, i, &buf[j])) != 0)
566f86ad 9027 break;
566f86ad 9028 }
1b27777a 9029 if (i < size)
566f86ad
MC
9030 goto out;
9031
1b27777a 9032 /* Selfboot format */
b9fc7dc5
AV
9033 magic = swab32(le32_to_cpu(buf[0]));
9034 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
b16250e3 9035 TG3_EEPROM_MAGIC_FW) {
1b27777a
MC
9036 u8 *buf8 = (u8 *) buf, csum8 = 0;
9037
b9fc7dc5 9038 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
a5767dec
MC
9039 TG3_EEPROM_SB_REVISION_2) {
9040 /* For rev 2, the csum doesn't include the MBA. */
9041 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
9042 csum8 += buf8[i];
9043 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
9044 csum8 += buf8[i];
9045 } else {
9046 for (i = 0; i < size; i++)
9047 csum8 += buf8[i];
9048 }
1b27777a 9049
ad96b485
AB
9050 if (csum8 == 0) {
9051 err = 0;
9052 goto out;
9053 }
9054
9055 err = -EIO;
9056 goto out;
1b27777a 9057 }
566f86ad 9058
b9fc7dc5 9059 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
b16250e3
MC
9060 TG3_EEPROM_MAGIC_HW) {
9061 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
9062 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
9063 u8 *buf8 = (u8 *) buf;
b16250e3
MC
9064
9065 /* Separate the parity bits and the data bytes. */
9066 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
9067 if ((i == 0) || (i == 8)) {
9068 int l;
9069 u8 msk;
9070
9071 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
9072 parity[k++] = buf8[i] & msk;
9073 i++;
9074 }
9075 else if (i == 16) {
9076 int l;
9077 u8 msk;
9078
9079 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
9080 parity[k++] = buf8[i] & msk;
9081 i++;
9082
9083 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
9084 parity[k++] = buf8[i] & msk;
9085 i++;
9086 }
9087 data[j++] = buf8[i];
9088 }
9089
9090 err = -EIO;
9091 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
9092 u8 hw8 = hweight8(data[i]);
9093
9094 if ((hw8 & 0x1) && parity[i])
9095 goto out;
9096 else if (!(hw8 & 0x1) && !parity[i])
9097 goto out;
9098 }
9099 err = 0;
9100 goto out;
9101 }
9102
566f86ad
MC
9103 /* Bootstrap checksum at offset 0x10 */
9104 csum = calc_crc((unsigned char *) buf, 0x10);
b9fc7dc5 9105 if(csum != le32_to_cpu(buf[0x10/4]))
566f86ad
MC
9106 goto out;
9107
9108 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
9109 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
b9fc7dc5 9110 if (csum != le32_to_cpu(buf[0xfc/4]))
566f86ad
MC
9111 goto out;
9112
9113 err = 0;
9114
9115out:
9116 kfree(buf);
9117 return err;
9118}
9119
ca43007a
MC
9120#define TG3_SERDES_TIMEOUT_SEC 2
9121#define TG3_COPPER_TIMEOUT_SEC 6
9122
9123static int tg3_test_link(struct tg3 *tp)
9124{
9125 int i, max;
9126
9127 if (!netif_running(tp->dev))
9128 return -ENODEV;
9129
4c987487 9130 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
ca43007a
MC
9131 max = TG3_SERDES_TIMEOUT_SEC;
9132 else
9133 max = TG3_COPPER_TIMEOUT_SEC;
9134
9135 for (i = 0; i < max; i++) {
9136 if (netif_carrier_ok(tp->dev))
9137 return 0;
9138
9139 if (msleep_interruptible(1000))
9140 break;
9141 }
9142
9143 return -EIO;
9144}
9145
a71116d1 9146/* Only test the commonly used registers */
30ca3e37 9147static int tg3_test_registers(struct tg3 *tp)
a71116d1 9148{
b16250e3 9149 int i, is_5705, is_5750;
a71116d1
MC
9150 u32 offset, read_mask, write_mask, val, save_val, read_val;
9151 static struct {
9152 u16 offset;
9153 u16 flags;
9154#define TG3_FL_5705 0x1
9155#define TG3_FL_NOT_5705 0x2
9156#define TG3_FL_NOT_5788 0x4
b16250e3 9157#define TG3_FL_NOT_5750 0x8
a71116d1
MC
9158 u32 read_mask;
9159 u32 write_mask;
9160 } reg_tbl[] = {
9161 /* MAC Control Registers */
9162 { MAC_MODE, TG3_FL_NOT_5705,
9163 0x00000000, 0x00ef6f8c },
9164 { MAC_MODE, TG3_FL_5705,
9165 0x00000000, 0x01ef6b8c },
9166 { MAC_STATUS, TG3_FL_NOT_5705,
9167 0x03800107, 0x00000000 },
9168 { MAC_STATUS, TG3_FL_5705,
9169 0x03800100, 0x00000000 },
9170 { MAC_ADDR_0_HIGH, 0x0000,
9171 0x00000000, 0x0000ffff },
9172 { MAC_ADDR_0_LOW, 0x0000,
9173 0x00000000, 0xffffffff },
9174 { MAC_RX_MTU_SIZE, 0x0000,
9175 0x00000000, 0x0000ffff },
9176 { MAC_TX_MODE, 0x0000,
9177 0x00000000, 0x00000070 },
9178 { MAC_TX_LENGTHS, 0x0000,
9179 0x00000000, 0x00003fff },
9180 { MAC_RX_MODE, TG3_FL_NOT_5705,
9181 0x00000000, 0x000007fc },
9182 { MAC_RX_MODE, TG3_FL_5705,
9183 0x00000000, 0x000007dc },
9184 { MAC_HASH_REG_0, 0x0000,
9185 0x00000000, 0xffffffff },
9186 { MAC_HASH_REG_1, 0x0000,
9187 0x00000000, 0xffffffff },
9188 { MAC_HASH_REG_2, 0x0000,
9189 0x00000000, 0xffffffff },
9190 { MAC_HASH_REG_3, 0x0000,
9191 0x00000000, 0xffffffff },
9192
9193 /* Receive Data and Receive BD Initiator Control Registers. */
9194 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
9195 0x00000000, 0xffffffff },
9196 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
9197 0x00000000, 0xffffffff },
9198 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
9199 0x00000000, 0x00000003 },
9200 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
9201 0x00000000, 0xffffffff },
9202 { RCVDBDI_STD_BD+0, 0x0000,
9203 0x00000000, 0xffffffff },
9204 { RCVDBDI_STD_BD+4, 0x0000,
9205 0x00000000, 0xffffffff },
9206 { RCVDBDI_STD_BD+8, 0x0000,
9207 0x00000000, 0xffff0002 },
9208 { RCVDBDI_STD_BD+0xc, 0x0000,
9209 0x00000000, 0xffffffff },
6aa20a22 9210
a71116d1
MC
9211 /* Receive BD Initiator Control Registers. */
9212 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
9213 0x00000000, 0xffffffff },
9214 { RCVBDI_STD_THRESH, TG3_FL_5705,
9215 0x00000000, 0x000003ff },
9216 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
9217 0x00000000, 0xffffffff },
6aa20a22 9218
a71116d1
MC
9219 /* Host Coalescing Control Registers. */
9220 { HOSTCC_MODE, TG3_FL_NOT_5705,
9221 0x00000000, 0x00000004 },
9222 { HOSTCC_MODE, TG3_FL_5705,
9223 0x00000000, 0x000000f6 },
9224 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
9225 0x00000000, 0xffffffff },
9226 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
9227 0x00000000, 0x000003ff },
9228 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
9229 0x00000000, 0xffffffff },
9230 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
9231 0x00000000, 0x000003ff },
9232 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
9233 0x00000000, 0xffffffff },
9234 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
9235 0x00000000, 0x000000ff },
9236 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
9237 0x00000000, 0xffffffff },
9238 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
9239 0x00000000, 0x000000ff },
9240 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
9241 0x00000000, 0xffffffff },
9242 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
9243 0x00000000, 0xffffffff },
9244 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
9245 0x00000000, 0xffffffff },
9246 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
9247 0x00000000, 0x000000ff },
9248 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
9249 0x00000000, 0xffffffff },
9250 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
9251 0x00000000, 0x000000ff },
9252 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
9253 0x00000000, 0xffffffff },
9254 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
9255 0x00000000, 0xffffffff },
9256 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
9257 0x00000000, 0xffffffff },
9258 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
9259 0x00000000, 0xffffffff },
9260 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
9261 0x00000000, 0xffffffff },
9262 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
9263 0xffffffff, 0x00000000 },
9264 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
9265 0xffffffff, 0x00000000 },
9266
9267 /* Buffer Manager Control Registers. */
b16250e3 9268 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
a71116d1 9269 0x00000000, 0x007fff80 },
b16250e3 9270 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
a71116d1
MC
9271 0x00000000, 0x007fffff },
9272 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
9273 0x00000000, 0x0000003f },
9274 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
9275 0x00000000, 0x000001ff },
9276 { BUFMGR_MB_HIGH_WATER, 0x0000,
9277 0x00000000, 0x000001ff },
9278 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
9279 0xffffffff, 0x00000000 },
9280 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
9281 0xffffffff, 0x00000000 },
6aa20a22 9282
a71116d1
MC
9283 /* Mailbox Registers */
9284 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
9285 0x00000000, 0x000001ff },
9286 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
9287 0x00000000, 0x000001ff },
9288 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
9289 0x00000000, 0x000007ff },
9290 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
9291 0x00000000, 0x000001ff },
9292
9293 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
9294 };
9295
b16250e3
MC
9296 is_5705 = is_5750 = 0;
9297 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
a71116d1 9298 is_5705 = 1;
b16250e3
MC
9299 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9300 is_5750 = 1;
9301 }
a71116d1
MC
9302
9303 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
9304 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
9305 continue;
9306
9307 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
9308 continue;
9309
9310 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
9311 (reg_tbl[i].flags & TG3_FL_NOT_5788))
9312 continue;
9313
b16250e3
MC
9314 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
9315 continue;
9316
a71116d1
MC
9317 offset = (u32) reg_tbl[i].offset;
9318 read_mask = reg_tbl[i].read_mask;
9319 write_mask = reg_tbl[i].write_mask;
9320
9321 /* Save the original register content */
9322 save_val = tr32(offset);
9323
9324 /* Determine the read-only value. */
9325 read_val = save_val & read_mask;
9326
9327 /* Write zero to the register, then make sure the read-only bits
9328 * are not changed and the read/write bits are all zeros.
9329 */
9330 tw32(offset, 0);
9331
9332 val = tr32(offset);
9333
9334 /* Test the read-only and read/write bits. */
9335 if (((val & read_mask) != read_val) || (val & write_mask))
9336 goto out;
9337
9338 /* Write ones to all the bits defined by RdMask and WrMask, then
9339 * make sure the read-only bits are not changed and the
9340 * read/write bits are all ones.
9341 */
9342 tw32(offset, read_mask | write_mask);
9343
9344 val = tr32(offset);
9345
9346 /* Test the read-only bits. */
9347 if ((val & read_mask) != read_val)
9348 goto out;
9349
9350 /* Test the read/write bits. */
9351 if ((val & write_mask) != write_mask)
9352 goto out;
9353
9354 tw32(offset, save_val);
9355 }
9356
9357 return 0;
9358
9359out:
9f88f29f
MC
9360 if (netif_msg_hw(tp))
9361 printk(KERN_ERR PFX "Register test failed at offset %x\n",
9362 offset);
a71116d1
MC
9363 tw32(offset, save_val);
9364 return -EIO;
9365}
9366
7942e1db
MC
9367static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
9368{
f71e1309 9369 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
7942e1db
MC
9370 int i;
9371 u32 j;
9372
e9edda69 9373 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
7942e1db
MC
9374 for (j = 0; j < len; j += 4) {
9375 u32 val;
9376
9377 tg3_write_mem(tp, offset + j, test_pattern[i]);
9378 tg3_read_mem(tp, offset + j, &val);
9379 if (val != test_pattern[i])
9380 return -EIO;
9381 }
9382 }
9383 return 0;
9384}
9385
9386static int tg3_test_memory(struct tg3 *tp)
9387{
9388 static struct mem_entry {
9389 u32 offset;
9390 u32 len;
9391 } mem_tbl_570x[] = {
38690194 9392 { 0x00000000, 0x00b50},
7942e1db
MC
9393 { 0x00002000, 0x1c000},
9394 { 0xffffffff, 0x00000}
9395 }, mem_tbl_5705[] = {
9396 { 0x00000100, 0x0000c},
9397 { 0x00000200, 0x00008},
7942e1db
MC
9398 { 0x00004000, 0x00800},
9399 { 0x00006000, 0x01000},
9400 { 0x00008000, 0x02000},
9401 { 0x00010000, 0x0e000},
9402 { 0xffffffff, 0x00000}
79f4d13a
MC
9403 }, mem_tbl_5755[] = {
9404 { 0x00000200, 0x00008},
9405 { 0x00004000, 0x00800},
9406 { 0x00006000, 0x00800},
9407 { 0x00008000, 0x02000},
9408 { 0x00010000, 0x0c000},
9409 { 0xffffffff, 0x00000}
b16250e3
MC
9410 }, mem_tbl_5906[] = {
9411 { 0x00000200, 0x00008},
9412 { 0x00004000, 0x00400},
9413 { 0x00006000, 0x00400},
9414 { 0x00008000, 0x01000},
9415 { 0x00010000, 0x01000},
9416 { 0xffffffff, 0x00000}
7942e1db
MC
9417 };
9418 struct mem_entry *mem_tbl;
9419 int err = 0;
9420 int i;
9421
79f4d13a 9422 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
af36e6b6 9423 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
d30cdd28 9424 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
9936bcf6
MC
9425 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9426 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
79f4d13a 9427 mem_tbl = mem_tbl_5755;
b16250e3
MC
9428 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9429 mem_tbl = mem_tbl_5906;
79f4d13a
MC
9430 else
9431 mem_tbl = mem_tbl_5705;
9432 } else
7942e1db
MC
9433 mem_tbl = mem_tbl_570x;
9434
9435 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
9436 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
9437 mem_tbl[i].len)) != 0)
9438 break;
9439 }
6aa20a22 9440
7942e1db
MC
9441 return err;
9442}
9443
9f40dead
MC
9444#define TG3_MAC_LOOPBACK 0
9445#define TG3_PHY_LOOPBACK 1
9446
9447static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
c76949a6 9448{
9f40dead 9449 u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
c76949a6
MC
9450 u32 desc_idx;
9451 struct sk_buff *skb, *rx_skb;
9452 u8 *tx_data;
9453 dma_addr_t map;
9454 int num_pkts, tx_len, rx_len, i, err;
9455 struct tg3_rx_buffer_desc *desc;
9456
9f40dead 9457 if (loopback_mode == TG3_MAC_LOOPBACK) {
c94e3941
MC
9458 /* HW errata - mac loopback fails in some cases on 5780.
9459 * Normal traffic and PHY loopback are not affected by
9460 * errata.
9461 */
9462 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
9463 return 0;
9464
9f40dead 9465 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
e8f3f6ca
MC
9466 MAC_MODE_PORT_INT_LPBACK;
9467 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
9468 mac_mode |= MAC_MODE_LINK_POLARITY;
3f7045c1
MC
9469 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
9470 mac_mode |= MAC_MODE_PORT_MODE_MII;
9471 else
9472 mac_mode |= MAC_MODE_PORT_MODE_GMII;
9f40dead
MC
9473 tw32(MAC_MODE, mac_mode);
9474 } else if (loopback_mode == TG3_PHY_LOOPBACK) {
3f7045c1
MC
9475 u32 val;
9476
b16250e3
MC
9477 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
9478 u32 phytest;
9479
9480 if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &phytest)) {
9481 u32 phy;
9482
9483 tg3_writephy(tp, MII_TG3_EPHY_TEST,
9484 phytest | MII_TG3_EPHY_SHADOW_EN);
9485 if (!tg3_readphy(tp, 0x1b, &phy))
9486 tg3_writephy(tp, 0x1b, phy & ~0x20);
b16250e3
MC
9487 tg3_writephy(tp, MII_TG3_EPHY_TEST, phytest);
9488 }
5d64ad34
MC
9489 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
9490 } else
9491 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
3f7045c1 9492
9ef8ca99
MC
9493 tg3_phy_toggle_automdix(tp, 0);
9494
3f7045c1 9495 tg3_writephy(tp, MII_BMCR, val);
c94e3941 9496 udelay(40);
5d64ad34 9497
e8f3f6ca 9498 mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5d64ad34 9499 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
b16250e3 9500 tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x1800);
5d64ad34
MC
9501 mac_mode |= MAC_MODE_PORT_MODE_MII;
9502 } else
9503 mac_mode |= MAC_MODE_PORT_MODE_GMII;
b16250e3 9504
c94e3941
MC
9505 /* reset to prevent losing 1st rx packet intermittently */
9506 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
9507 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9508 udelay(10);
9509 tw32_f(MAC_RX_MODE, tp->rx_mode);
9510 }
e8f3f6ca
MC
9511 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
9512 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)
9513 mac_mode &= ~MAC_MODE_LINK_POLARITY;
9514 else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411)
9515 mac_mode |= MAC_MODE_LINK_POLARITY;
ff18ff02
MC
9516 tg3_writephy(tp, MII_TG3_EXT_CTRL,
9517 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
9518 }
9f40dead 9519 tw32(MAC_MODE, mac_mode);
9f40dead
MC
9520 }
9521 else
9522 return -EINVAL;
c76949a6
MC
9523
9524 err = -EIO;
9525
c76949a6 9526 tx_len = 1514;
a20e9c62 9527 skb = netdev_alloc_skb(tp->dev, tx_len);
a50bb7b9
JJ
9528 if (!skb)
9529 return -ENOMEM;
9530
c76949a6
MC
9531 tx_data = skb_put(skb, tx_len);
9532 memcpy(tx_data, tp->dev->dev_addr, 6);
9533 memset(tx_data + 6, 0x0, 8);
9534
9535 tw32(MAC_RX_MTU_SIZE, tx_len + 4);
9536
9537 for (i = 14; i < tx_len; i++)
9538 tx_data[i] = (u8) (i & 0xff);
9539
9540 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
9541
9542 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9543 HOSTCC_MODE_NOW);
9544
9545 udelay(10);
9546
9547 rx_start_idx = tp->hw_status->idx[0].rx_producer;
9548
c76949a6
MC
9549 num_pkts = 0;
9550
9f40dead 9551 tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
c76949a6 9552
9f40dead 9553 tp->tx_prod++;
c76949a6
MC
9554 num_pkts++;
9555
9f40dead
MC
9556 tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
9557 tp->tx_prod);
09ee929c 9558 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
c76949a6
MC
9559
9560 udelay(10);
9561
3f7045c1
MC
9562 /* 250 usec to allow enough time on some 10/100 Mbps devices. */
9563 for (i = 0; i < 25; i++) {
c76949a6
MC
9564 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9565 HOSTCC_MODE_NOW);
9566
9567 udelay(10);
9568
9569 tx_idx = tp->hw_status->idx[0].tx_consumer;
9570 rx_idx = tp->hw_status->idx[0].rx_producer;
9f40dead 9571 if ((tx_idx == tp->tx_prod) &&
c76949a6
MC
9572 (rx_idx == (rx_start_idx + num_pkts)))
9573 break;
9574 }
9575
9576 pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
9577 dev_kfree_skb(skb);
9578
9f40dead 9579 if (tx_idx != tp->tx_prod)
c76949a6
MC
9580 goto out;
9581
9582 if (rx_idx != rx_start_idx + num_pkts)
9583 goto out;
9584
9585 desc = &tp->rx_rcb[rx_start_idx];
9586 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
9587 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
9588 if (opaque_key != RXD_OPAQUE_RING_STD)
9589 goto out;
9590
9591 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
9592 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
9593 goto out;
9594
9595 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
9596 if (rx_len != tx_len)
9597 goto out;
9598
9599 rx_skb = tp->rx_std_buffers[desc_idx].skb;
9600
9601 map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
9602 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
9603
9604 for (i = 14; i < tx_len; i++) {
9605 if (*(rx_skb->data + i) != (u8) (i & 0xff))
9606 goto out;
9607 }
9608 err = 0;
6aa20a22 9609
c76949a6
MC
9610 /* tg3_free_rings will unmap and free the rx_skb */
9611out:
9612 return err;
9613}
9614
9f40dead
MC
9615#define TG3_MAC_LOOPBACK_FAILED 1
9616#define TG3_PHY_LOOPBACK_FAILED 2
9617#define TG3_LOOPBACK_FAILED (TG3_MAC_LOOPBACK_FAILED | \
9618 TG3_PHY_LOOPBACK_FAILED)
9619
9620static int tg3_test_loopback(struct tg3 *tp)
9621{
9622 int err = 0;
9936bcf6 9623 u32 cpmuctrl = 0;
9f40dead
MC
9624
9625 if (!netif_running(tp->dev))
9626 return TG3_LOOPBACK_FAILED;
9627
b9ec6c1b
MC
9628 err = tg3_reset_hw(tp, 1);
9629 if (err)
9630 return TG3_LOOPBACK_FAILED;
9f40dead 9631
b2a5c19c
MC
9632 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9633 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
9936bcf6
MC
9634 int i;
9635 u32 status;
9636
9637 tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
9638
9639 /* Wait for up to 40 microseconds to acquire lock. */
9640 for (i = 0; i < 4; i++) {
9641 status = tr32(TG3_CPMU_MUTEX_GNT);
9642 if (status == CPMU_MUTEX_GNT_DRIVER)
9643 break;
9644 udelay(10);
9645 }
9646
9647 if (status != CPMU_MUTEX_GNT_DRIVER)
9648 return TG3_LOOPBACK_FAILED;
9649
b2a5c19c 9650 /* Turn off link-based power management. */
e875093c 9651 cpmuctrl = tr32(TG3_CPMU_CTRL);
109115e1
MC
9652 tw32(TG3_CPMU_CTRL,
9653 cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE |
9654 CPMU_CTRL_LINK_AWARE_MODE));
9936bcf6
MC
9655 }
9656
9f40dead
MC
9657 if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
9658 err |= TG3_MAC_LOOPBACK_FAILED;
9936bcf6 9659
b2a5c19c
MC
9660 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9661 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
9936bcf6
MC
9662 tw32(TG3_CPMU_CTRL, cpmuctrl);
9663
9664 /* Release the mutex */
9665 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
9666 }
9667
9f40dead
MC
9668 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
9669 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
9670 err |= TG3_PHY_LOOPBACK_FAILED;
9671 }
9672
9673 return err;
9674}
9675
4cafd3f5
MC
9676static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
9677 u64 *data)
9678{
566f86ad
MC
9679 struct tg3 *tp = netdev_priv(dev);
9680
bc1c7567
MC
9681 if (tp->link_config.phy_is_low_power)
9682 tg3_set_power_state(tp, PCI_D0);
9683
566f86ad
MC
9684 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
9685
9686 if (tg3_test_nvram(tp) != 0) {
9687 etest->flags |= ETH_TEST_FL_FAILED;
9688 data[0] = 1;
9689 }
ca43007a
MC
9690 if (tg3_test_link(tp) != 0) {
9691 etest->flags |= ETH_TEST_FL_FAILED;
9692 data[1] = 1;
9693 }
a71116d1 9694 if (etest->flags & ETH_TEST_FL_OFFLINE) {
ec41c7df 9695 int err, irq_sync = 0;
bbe832c0
MC
9696
9697 if (netif_running(dev)) {
a71116d1 9698 tg3_netif_stop(tp);
bbe832c0
MC
9699 irq_sync = 1;
9700 }
a71116d1 9701
bbe832c0 9702 tg3_full_lock(tp, irq_sync);
a71116d1
MC
9703
9704 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
ec41c7df 9705 err = tg3_nvram_lock(tp);
a71116d1
MC
9706 tg3_halt_cpu(tp, RX_CPU_BASE);
9707 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
9708 tg3_halt_cpu(tp, TX_CPU_BASE);
ec41c7df
MC
9709 if (!err)
9710 tg3_nvram_unlock(tp);
a71116d1 9711
d9ab5ad1
MC
9712 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
9713 tg3_phy_reset(tp);
9714
a71116d1
MC
9715 if (tg3_test_registers(tp) != 0) {
9716 etest->flags |= ETH_TEST_FL_FAILED;
9717 data[2] = 1;
9718 }
7942e1db
MC
9719 if (tg3_test_memory(tp) != 0) {
9720 etest->flags |= ETH_TEST_FL_FAILED;
9721 data[3] = 1;
9722 }
9f40dead 9723 if ((data[4] = tg3_test_loopback(tp)) != 0)
c76949a6 9724 etest->flags |= ETH_TEST_FL_FAILED;
a71116d1 9725
f47c11ee
DM
9726 tg3_full_unlock(tp);
9727
d4bc3927
MC
9728 if (tg3_test_interrupt(tp) != 0) {
9729 etest->flags |= ETH_TEST_FL_FAILED;
9730 data[5] = 1;
9731 }
f47c11ee
DM
9732
9733 tg3_full_lock(tp, 0);
d4bc3927 9734
a71116d1
MC
9735 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9736 if (netif_running(dev)) {
9737 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
b9ec6c1b
MC
9738 if (!tg3_restart_hw(tp, 1))
9739 tg3_netif_start(tp);
a71116d1 9740 }
f47c11ee
DM
9741
9742 tg3_full_unlock(tp);
a71116d1 9743 }
bc1c7567
MC
9744 if (tp->link_config.phy_is_low_power)
9745 tg3_set_power_state(tp, PCI_D3hot);
9746
4cafd3f5
MC
9747}
9748
1da177e4
LT
9749static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9750{
9751 struct mii_ioctl_data *data = if_mii(ifr);
9752 struct tg3 *tp = netdev_priv(dev);
9753 int err;
9754
9755 switch(cmd) {
9756 case SIOCGMIIPHY:
9757 data->phy_id = PHY_ADDR;
9758
9759 /* fallthru */
9760 case SIOCGMIIREG: {
9761 u32 mii_regval;
9762
9763 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9764 break; /* We have no PHY */
9765
bc1c7567
MC
9766 if (tp->link_config.phy_is_low_power)
9767 return -EAGAIN;
9768
f47c11ee 9769 spin_lock_bh(&tp->lock);
1da177e4 9770 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
f47c11ee 9771 spin_unlock_bh(&tp->lock);
1da177e4
LT
9772
9773 data->val_out = mii_regval;
9774
9775 return err;
9776 }
9777
9778 case SIOCSMIIREG:
9779 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9780 break; /* We have no PHY */
9781
9782 if (!capable(CAP_NET_ADMIN))
9783 return -EPERM;
9784
bc1c7567
MC
9785 if (tp->link_config.phy_is_low_power)
9786 return -EAGAIN;
9787
f47c11ee 9788 spin_lock_bh(&tp->lock);
1da177e4 9789 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
f47c11ee 9790 spin_unlock_bh(&tp->lock);
1da177e4
LT
9791
9792 return err;
9793
9794 default:
9795 /* do nothing */
9796 break;
9797 }
9798 return -EOPNOTSUPP;
9799}
9800
9801#if TG3_VLAN_TAG_USED
9802static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
9803{
9804 struct tg3 *tp = netdev_priv(dev);
9805
29315e87
MC
9806 if (netif_running(dev))
9807 tg3_netif_stop(tp);
9808
f47c11ee 9809 tg3_full_lock(tp, 0);
1da177e4
LT
9810
9811 tp->vlgrp = grp;
9812
9813 /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
9814 __tg3_set_rx_mode(dev);
9815
29315e87
MC
9816 if (netif_running(dev))
9817 tg3_netif_start(tp);
46966545
MC
9818
9819 tg3_full_unlock(tp);
1da177e4 9820}
1da177e4
LT
9821#endif
9822
15f9850d
DM
9823static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
9824{
9825 struct tg3 *tp = netdev_priv(dev);
9826
9827 memcpy(ec, &tp->coal, sizeof(*ec));
9828 return 0;
9829}
9830
d244c892
MC
9831static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
9832{
9833 struct tg3 *tp = netdev_priv(dev);
9834 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
9835 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
9836
9837 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
9838 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
9839 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
9840 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
9841 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
9842 }
9843
9844 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
9845 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
9846 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
9847 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
9848 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
9849 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
9850 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
9851 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
9852 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
9853 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
9854 return -EINVAL;
9855
9856 /* No rx interrupts will be generated if both are zero */
9857 if ((ec->rx_coalesce_usecs == 0) &&
9858 (ec->rx_max_coalesced_frames == 0))
9859 return -EINVAL;
9860
9861 /* No tx interrupts will be generated if both are zero */
9862 if ((ec->tx_coalesce_usecs == 0) &&
9863 (ec->tx_max_coalesced_frames == 0))
9864 return -EINVAL;
9865
9866 /* Only copy relevant parameters, ignore all others. */
9867 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
9868 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
9869 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
9870 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
9871 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
9872 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
9873 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
9874 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
9875 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
9876
9877 if (netif_running(dev)) {
9878 tg3_full_lock(tp, 0);
9879 __tg3_set_coalesce(tp, &tp->coal);
9880 tg3_full_unlock(tp);
9881 }
9882 return 0;
9883}
9884
7282d491 9885static const struct ethtool_ops tg3_ethtool_ops = {
1da177e4
LT
9886 .get_settings = tg3_get_settings,
9887 .set_settings = tg3_set_settings,
9888 .get_drvinfo = tg3_get_drvinfo,
9889 .get_regs_len = tg3_get_regs_len,
9890 .get_regs = tg3_get_regs,
9891 .get_wol = tg3_get_wol,
9892 .set_wol = tg3_set_wol,
9893 .get_msglevel = tg3_get_msglevel,
9894 .set_msglevel = tg3_set_msglevel,
9895 .nway_reset = tg3_nway_reset,
9896 .get_link = ethtool_op_get_link,
9897 .get_eeprom_len = tg3_get_eeprom_len,
9898 .get_eeprom = tg3_get_eeprom,
9899 .set_eeprom = tg3_set_eeprom,
9900 .get_ringparam = tg3_get_ringparam,
9901 .set_ringparam = tg3_set_ringparam,
9902 .get_pauseparam = tg3_get_pauseparam,
9903 .set_pauseparam = tg3_set_pauseparam,
9904 .get_rx_csum = tg3_get_rx_csum,
9905 .set_rx_csum = tg3_set_rx_csum,
1da177e4 9906 .set_tx_csum = tg3_set_tx_csum,
1da177e4 9907 .set_sg = ethtool_op_set_sg,
1da177e4 9908 .set_tso = tg3_set_tso,
4cafd3f5 9909 .self_test = tg3_self_test,
1da177e4 9910 .get_strings = tg3_get_strings,
4009a93d 9911 .phys_id = tg3_phys_id,
1da177e4 9912 .get_ethtool_stats = tg3_get_ethtool_stats,
15f9850d 9913 .get_coalesce = tg3_get_coalesce,
d244c892 9914 .set_coalesce = tg3_set_coalesce,
b9f2c044 9915 .get_sset_count = tg3_get_sset_count,
1da177e4
LT
9916};
9917
9918static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
9919{
1b27777a 9920 u32 cursize, val, magic;
1da177e4
LT
9921
9922 tp->nvram_size = EEPROM_CHIP_SIZE;
9923
1820180b 9924 if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
1da177e4
LT
9925 return;
9926
b16250e3
MC
9927 if ((magic != TG3_EEPROM_MAGIC) &&
9928 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
9929 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
1da177e4
LT
9930 return;
9931
9932 /*
9933 * Size the chip by reading offsets at increasing powers of two.
9934 * When we encounter our validation signature, we know the addressing
9935 * has wrapped around, and thus have our chip size.
9936 */
1b27777a 9937 cursize = 0x10;
1da177e4
LT
9938
9939 while (cursize < tp->nvram_size) {
1820180b 9940 if (tg3_nvram_read_swab(tp, cursize, &val) != 0)
1da177e4
LT
9941 return;
9942
1820180b 9943 if (val == magic)
1da177e4
LT
9944 break;
9945
9946 cursize <<= 1;
9947 }
9948
9949 tp->nvram_size = cursize;
9950}
6aa20a22 9951
1da177e4
LT
9952static void __devinit tg3_get_nvram_size(struct tg3 *tp)
9953{
9954 u32 val;
9955
1820180b 9956 if (tg3_nvram_read_swab(tp, 0, &val) != 0)
1b27777a
MC
9957 return;
9958
9959 /* Selfboot format */
1820180b 9960 if (val != TG3_EEPROM_MAGIC) {
1b27777a
MC
9961 tg3_get_eeprom_size(tp);
9962 return;
9963 }
9964
1da177e4
LT
9965 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
9966 if (val != 0) {
9967 tp->nvram_size = (val >> 16) * 1024;
9968 return;
9969 }
9970 }
fd1122a2 9971 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
1da177e4
LT
9972}
9973
9974static void __devinit tg3_get_nvram_info(struct tg3 *tp)
9975{
9976 u32 nvcfg1;
9977
9978 nvcfg1 = tr32(NVRAM_CFG1);
9979 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
9980 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9981 }
9982 else {
9983 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9984 tw32(NVRAM_CFG1, nvcfg1);
9985 }
9986
4c987487 9987 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
a4e2b347 9988 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
1da177e4
LT
9989 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
9990 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
9991 tp->nvram_jedecnum = JEDEC_ATMEL;
9992 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
9993 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9994 break;
9995 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
9996 tp->nvram_jedecnum = JEDEC_ATMEL;
9997 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
9998 break;
9999 case FLASH_VENDOR_ATMEL_EEPROM:
10000 tp->nvram_jedecnum = JEDEC_ATMEL;
10001 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10002 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10003 break;
10004 case FLASH_VENDOR_ST:
10005 tp->nvram_jedecnum = JEDEC_ST;
10006 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
10007 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10008 break;
10009 case FLASH_VENDOR_SAIFUN:
10010 tp->nvram_jedecnum = JEDEC_SAIFUN;
10011 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
10012 break;
10013 case FLASH_VENDOR_SST_SMALL:
10014 case FLASH_VENDOR_SST_LARGE:
10015 tp->nvram_jedecnum = JEDEC_SST;
10016 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
10017 break;
10018 }
10019 }
10020 else {
10021 tp->nvram_jedecnum = JEDEC_ATMEL;
10022 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
10023 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10024 }
10025}
10026
361b4ac2
MC
10027static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
10028{
10029 u32 nvcfg1;
10030
10031 nvcfg1 = tr32(NVRAM_CFG1);
10032
e6af301b
MC
10033 /* NVRAM protection for TPM */
10034 if (nvcfg1 & (1 << 27))
10035 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10036
361b4ac2
MC
10037 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10038 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
10039 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
10040 tp->nvram_jedecnum = JEDEC_ATMEL;
10041 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10042 break;
10043 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
10044 tp->nvram_jedecnum = JEDEC_ATMEL;
10045 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10046 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10047 break;
10048 case FLASH_5752VENDOR_ST_M45PE10:
10049 case FLASH_5752VENDOR_ST_M45PE20:
10050 case FLASH_5752VENDOR_ST_M45PE40:
10051 tp->nvram_jedecnum = JEDEC_ST;
10052 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10053 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10054 break;
10055 }
10056
10057 if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
10058 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
10059 case FLASH_5752PAGE_SIZE_256:
10060 tp->nvram_pagesize = 256;
10061 break;
10062 case FLASH_5752PAGE_SIZE_512:
10063 tp->nvram_pagesize = 512;
10064 break;
10065 case FLASH_5752PAGE_SIZE_1K:
10066 tp->nvram_pagesize = 1024;
10067 break;
10068 case FLASH_5752PAGE_SIZE_2K:
10069 tp->nvram_pagesize = 2048;
10070 break;
10071 case FLASH_5752PAGE_SIZE_4K:
10072 tp->nvram_pagesize = 4096;
10073 break;
10074 case FLASH_5752PAGE_SIZE_264:
10075 tp->nvram_pagesize = 264;
10076 break;
10077 }
10078 }
10079 else {
10080 /* For eeprom, set pagesize to maximum eeprom size */
10081 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10082
10083 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10084 tw32(NVRAM_CFG1, nvcfg1);
10085 }
10086}
10087
d3c7b886
MC
10088static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
10089{
989a9d23 10090 u32 nvcfg1, protect = 0;
d3c7b886
MC
10091
10092 nvcfg1 = tr32(NVRAM_CFG1);
10093
10094 /* NVRAM protection for TPM */
989a9d23 10095 if (nvcfg1 & (1 << 27)) {
d3c7b886 10096 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
989a9d23
MC
10097 protect = 1;
10098 }
d3c7b886 10099
989a9d23
MC
10100 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
10101 switch (nvcfg1) {
d3c7b886
MC
10102 case FLASH_5755VENDOR_ATMEL_FLASH_1:
10103 case FLASH_5755VENDOR_ATMEL_FLASH_2:
10104 case FLASH_5755VENDOR_ATMEL_FLASH_3:
70b65a2d 10105 case FLASH_5755VENDOR_ATMEL_FLASH_5:
d3c7b886
MC
10106 tp->nvram_jedecnum = JEDEC_ATMEL;
10107 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10108 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10109 tp->nvram_pagesize = 264;
70b65a2d
MC
10110 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
10111 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
fd1122a2
MC
10112 tp->nvram_size = (protect ? 0x3e200 :
10113 TG3_NVRAM_SIZE_512KB);
989a9d23 10114 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
fd1122a2
MC
10115 tp->nvram_size = (protect ? 0x1f200 :
10116 TG3_NVRAM_SIZE_256KB);
989a9d23 10117 else
fd1122a2
MC
10118 tp->nvram_size = (protect ? 0x1f200 :
10119 TG3_NVRAM_SIZE_128KB);
d3c7b886
MC
10120 break;
10121 case FLASH_5752VENDOR_ST_M45PE10:
10122 case FLASH_5752VENDOR_ST_M45PE20:
10123 case FLASH_5752VENDOR_ST_M45PE40:
10124 tp->nvram_jedecnum = JEDEC_ST;
10125 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10126 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10127 tp->nvram_pagesize = 256;
989a9d23 10128 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
fd1122a2
MC
10129 tp->nvram_size = (protect ?
10130 TG3_NVRAM_SIZE_64KB :
10131 TG3_NVRAM_SIZE_128KB);
989a9d23 10132 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
fd1122a2
MC
10133 tp->nvram_size = (protect ?
10134 TG3_NVRAM_SIZE_64KB :
10135 TG3_NVRAM_SIZE_256KB);
989a9d23 10136 else
fd1122a2
MC
10137 tp->nvram_size = (protect ?
10138 TG3_NVRAM_SIZE_128KB :
10139 TG3_NVRAM_SIZE_512KB);
d3c7b886
MC
10140 break;
10141 }
10142}
10143
1b27777a
MC
10144static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
10145{
10146 u32 nvcfg1;
10147
10148 nvcfg1 = tr32(NVRAM_CFG1);
10149
10150 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10151 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
10152 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
10153 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
10154 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
10155 tp->nvram_jedecnum = JEDEC_ATMEL;
10156 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10157 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10158
10159 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10160 tw32(NVRAM_CFG1, nvcfg1);
10161 break;
10162 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
10163 case FLASH_5755VENDOR_ATMEL_FLASH_1:
10164 case FLASH_5755VENDOR_ATMEL_FLASH_2:
10165 case FLASH_5755VENDOR_ATMEL_FLASH_3:
10166 tp->nvram_jedecnum = JEDEC_ATMEL;
10167 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10168 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10169 tp->nvram_pagesize = 264;
10170 break;
10171 case FLASH_5752VENDOR_ST_M45PE10:
10172 case FLASH_5752VENDOR_ST_M45PE20:
10173 case FLASH_5752VENDOR_ST_M45PE40:
10174 tp->nvram_jedecnum = JEDEC_ST;
10175 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10176 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10177 tp->nvram_pagesize = 256;
10178 break;
10179 }
10180}
10181
6b91fa02
MC
10182static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
10183{
10184 u32 nvcfg1, protect = 0;
10185
10186 nvcfg1 = tr32(NVRAM_CFG1);
10187
10188 /* NVRAM protection for TPM */
10189 if (nvcfg1 & (1 << 27)) {
10190 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10191 protect = 1;
10192 }
10193
10194 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
10195 switch (nvcfg1) {
10196 case FLASH_5761VENDOR_ATMEL_ADB021D:
10197 case FLASH_5761VENDOR_ATMEL_ADB041D:
10198 case FLASH_5761VENDOR_ATMEL_ADB081D:
10199 case FLASH_5761VENDOR_ATMEL_ADB161D:
10200 case FLASH_5761VENDOR_ATMEL_MDB021D:
10201 case FLASH_5761VENDOR_ATMEL_MDB041D:
10202 case FLASH_5761VENDOR_ATMEL_MDB081D:
10203 case FLASH_5761VENDOR_ATMEL_MDB161D:
10204 tp->nvram_jedecnum = JEDEC_ATMEL;
10205 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10206 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10207 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
10208 tp->nvram_pagesize = 256;
10209 break;
10210 case FLASH_5761VENDOR_ST_A_M45PE20:
10211 case FLASH_5761VENDOR_ST_A_M45PE40:
10212 case FLASH_5761VENDOR_ST_A_M45PE80:
10213 case FLASH_5761VENDOR_ST_A_M45PE16:
10214 case FLASH_5761VENDOR_ST_M_M45PE20:
10215 case FLASH_5761VENDOR_ST_M_M45PE40:
10216 case FLASH_5761VENDOR_ST_M_M45PE80:
10217 case FLASH_5761VENDOR_ST_M_M45PE16:
10218 tp->nvram_jedecnum = JEDEC_ST;
10219 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10220 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10221 tp->nvram_pagesize = 256;
10222 break;
10223 }
10224
10225 if (protect) {
10226 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
10227 } else {
10228 switch (nvcfg1) {
10229 case FLASH_5761VENDOR_ATMEL_ADB161D:
10230 case FLASH_5761VENDOR_ATMEL_MDB161D:
10231 case FLASH_5761VENDOR_ST_A_M45PE16:
10232 case FLASH_5761VENDOR_ST_M_M45PE16:
fd1122a2 10233 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
6b91fa02
MC
10234 break;
10235 case FLASH_5761VENDOR_ATMEL_ADB081D:
10236 case FLASH_5761VENDOR_ATMEL_MDB081D:
10237 case FLASH_5761VENDOR_ST_A_M45PE80:
10238 case FLASH_5761VENDOR_ST_M_M45PE80:
fd1122a2 10239 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
6b91fa02
MC
10240 break;
10241 case FLASH_5761VENDOR_ATMEL_ADB041D:
10242 case FLASH_5761VENDOR_ATMEL_MDB041D:
10243 case FLASH_5761VENDOR_ST_A_M45PE40:
10244 case FLASH_5761VENDOR_ST_M_M45PE40:
fd1122a2 10245 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
6b91fa02
MC
10246 break;
10247 case FLASH_5761VENDOR_ATMEL_ADB021D:
10248 case FLASH_5761VENDOR_ATMEL_MDB021D:
10249 case FLASH_5761VENDOR_ST_A_M45PE20:
10250 case FLASH_5761VENDOR_ST_M_M45PE20:
fd1122a2 10251 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
6b91fa02
MC
10252 break;
10253 }
10254 }
10255}
10256
b5d3772c
MC
10257static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
10258{
10259 tp->nvram_jedecnum = JEDEC_ATMEL;
10260 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10261 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10262}
10263
1da177e4
LT
10264/* Chips other than 5700/5701 use the NVRAM for fetching info. */
10265static void __devinit tg3_nvram_init(struct tg3 *tp)
10266{
1da177e4
LT
10267 tw32_f(GRC_EEPROM_ADDR,
10268 (EEPROM_ADDR_FSM_RESET |
10269 (EEPROM_DEFAULT_CLOCK_PERIOD <<
10270 EEPROM_ADDR_CLKPERD_SHIFT)));
10271
9d57f01c 10272 msleep(1);
1da177e4
LT
10273
10274 /* Enable seeprom accesses. */
10275 tw32_f(GRC_LOCAL_CTRL,
10276 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
10277 udelay(100);
10278
10279 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10280 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
10281 tp->tg3_flags |= TG3_FLAG_NVRAM;
10282
ec41c7df
MC
10283 if (tg3_nvram_lock(tp)) {
10284 printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
10285 "tg3_nvram_init failed.\n", tp->dev->name);
10286 return;
10287 }
e6af301b 10288 tg3_enable_nvram_access(tp);
1da177e4 10289
989a9d23
MC
10290 tp->nvram_size = 0;
10291
361b4ac2
MC
10292 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
10293 tg3_get_5752_nvram_info(tp);
d3c7b886
MC
10294 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10295 tg3_get_5755_nvram_info(tp);
d30cdd28
MC
10296 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
10297 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784)
1b27777a 10298 tg3_get_5787_nvram_info(tp);
6b91fa02
MC
10299 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
10300 tg3_get_5761_nvram_info(tp);
b5d3772c
MC
10301 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10302 tg3_get_5906_nvram_info(tp);
361b4ac2
MC
10303 else
10304 tg3_get_nvram_info(tp);
10305
989a9d23
MC
10306 if (tp->nvram_size == 0)
10307 tg3_get_nvram_size(tp);
1da177e4 10308
e6af301b 10309 tg3_disable_nvram_access(tp);
381291b7 10310 tg3_nvram_unlock(tp);
1da177e4
LT
10311
10312 } else {
10313 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
10314
10315 tg3_get_eeprom_size(tp);
10316 }
10317}
10318
10319static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
10320 u32 offset, u32 *val)
10321{
10322 u32 tmp;
10323 int i;
10324
10325 if (offset > EEPROM_ADDR_ADDR_MASK ||
10326 (offset % 4) != 0)
10327 return -EINVAL;
10328
10329 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
10330 EEPROM_ADDR_DEVID_MASK |
10331 EEPROM_ADDR_READ);
10332 tw32(GRC_EEPROM_ADDR,
10333 tmp |
10334 (0 << EEPROM_ADDR_DEVID_SHIFT) |
10335 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
10336 EEPROM_ADDR_ADDR_MASK) |
10337 EEPROM_ADDR_READ | EEPROM_ADDR_START);
10338
9d57f01c 10339 for (i = 0; i < 1000; i++) {
1da177e4
LT
10340 tmp = tr32(GRC_EEPROM_ADDR);
10341
10342 if (tmp & EEPROM_ADDR_COMPLETE)
10343 break;
9d57f01c 10344 msleep(1);
1da177e4
LT
10345 }
10346 if (!(tmp & EEPROM_ADDR_COMPLETE))
10347 return -EBUSY;
10348
10349 *val = tr32(GRC_EEPROM_DATA);
10350 return 0;
10351}
10352
10353#define NVRAM_CMD_TIMEOUT 10000
10354
10355static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
10356{
10357 int i;
10358
10359 tw32(NVRAM_CMD, nvram_cmd);
10360 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
10361 udelay(10);
10362 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
10363 udelay(10);
10364 break;
10365 }
10366 }
10367 if (i == NVRAM_CMD_TIMEOUT) {
10368 return -EBUSY;
10369 }
10370 return 0;
10371}
10372
1820180b
MC
10373static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
10374{
10375 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
10376 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
10377 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
6b91fa02 10378 !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
1820180b
MC
10379 (tp->nvram_jedecnum == JEDEC_ATMEL))
10380
10381 addr = ((addr / tp->nvram_pagesize) <<
10382 ATMEL_AT45DB0X1B_PAGE_POS) +
10383 (addr % tp->nvram_pagesize);
10384
10385 return addr;
10386}
10387
c4e6575c
MC
10388static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
10389{
10390 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
10391 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
10392 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
6b91fa02 10393 !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
c4e6575c
MC
10394 (tp->nvram_jedecnum == JEDEC_ATMEL))
10395
10396 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
10397 tp->nvram_pagesize) +
10398 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
10399
10400 return addr;
10401}
10402
1da177e4
LT
10403static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
10404{
10405 int ret;
10406
1da177e4
LT
10407 if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
10408 return tg3_nvram_read_using_eeprom(tp, offset, val);
10409
1820180b 10410 offset = tg3_nvram_phys_addr(tp, offset);
1da177e4
LT
10411
10412 if (offset > NVRAM_ADDR_MSK)
10413 return -EINVAL;
10414
ec41c7df
MC
10415 ret = tg3_nvram_lock(tp);
10416 if (ret)
10417 return ret;
1da177e4 10418
e6af301b 10419 tg3_enable_nvram_access(tp);
1da177e4
LT
10420
10421 tw32(NVRAM_ADDR, offset);
10422 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
10423 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
10424
10425 if (ret == 0)
10426 *val = swab32(tr32(NVRAM_RDDATA));
10427
e6af301b 10428 tg3_disable_nvram_access(tp);
1da177e4 10429
381291b7
MC
10430 tg3_nvram_unlock(tp);
10431
1da177e4
LT
10432 return ret;
10433}
10434
b9fc7dc5
AV
10435static int tg3_nvram_read_le(struct tg3 *tp, u32 offset, __le32 *val)
10436{
10437 u32 v;
10438 int res = tg3_nvram_read(tp, offset, &v);
10439 if (!res)
10440 *val = cpu_to_le32(v);
10441 return res;
10442}
10443
1820180b
MC
10444static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val)
10445{
10446 int err;
10447 u32 tmp;
10448
10449 err = tg3_nvram_read(tp, offset, &tmp);
10450 *val = swab32(tmp);
10451 return err;
10452}
10453
1da177e4
LT
10454static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
10455 u32 offset, u32 len, u8 *buf)
10456{
10457 int i, j, rc = 0;
10458 u32 val;
10459
10460 for (i = 0; i < len; i += 4) {
b9fc7dc5
AV
10461 u32 addr;
10462 __le32 data;
1da177e4
LT
10463
10464 addr = offset + i;
10465
10466 memcpy(&data, buf + i, 4);
10467
b9fc7dc5 10468 tw32(GRC_EEPROM_DATA, le32_to_cpu(data));
1da177e4
LT
10469
10470 val = tr32(GRC_EEPROM_ADDR);
10471 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
10472
10473 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
10474 EEPROM_ADDR_READ);
10475 tw32(GRC_EEPROM_ADDR, val |
10476 (0 << EEPROM_ADDR_DEVID_SHIFT) |
10477 (addr & EEPROM_ADDR_ADDR_MASK) |
10478 EEPROM_ADDR_START |
10479 EEPROM_ADDR_WRITE);
6aa20a22 10480
9d57f01c 10481 for (j = 0; j < 1000; j++) {
1da177e4
LT
10482 val = tr32(GRC_EEPROM_ADDR);
10483
10484 if (val & EEPROM_ADDR_COMPLETE)
10485 break;
9d57f01c 10486 msleep(1);
1da177e4
LT
10487 }
10488 if (!(val & EEPROM_ADDR_COMPLETE)) {
10489 rc = -EBUSY;
10490 break;
10491 }
10492 }
10493
10494 return rc;
10495}
10496
10497/* offset and length are dword aligned */
10498static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
10499 u8 *buf)
10500{
10501 int ret = 0;
10502 u32 pagesize = tp->nvram_pagesize;
10503 u32 pagemask = pagesize - 1;
10504 u32 nvram_cmd;
10505 u8 *tmp;
10506
10507 tmp = kmalloc(pagesize, GFP_KERNEL);
10508 if (tmp == NULL)
10509 return -ENOMEM;
10510
10511 while (len) {
10512 int j;
e6af301b 10513 u32 phy_addr, page_off, size;
1da177e4
LT
10514
10515 phy_addr = offset & ~pagemask;
6aa20a22 10516
1da177e4 10517 for (j = 0; j < pagesize; j += 4) {
286e310f 10518 if ((ret = tg3_nvram_read_le(tp, phy_addr + j,
b9fc7dc5 10519 (__le32 *) (tmp + j))))
1da177e4
LT
10520 break;
10521 }
10522 if (ret)
10523 break;
10524
10525 page_off = offset & pagemask;
10526 size = pagesize;
10527 if (len < size)
10528 size = len;
10529
10530 len -= size;
10531
10532 memcpy(tmp + page_off, buf, size);
10533
10534 offset = offset + (pagesize - page_off);
10535
e6af301b 10536 tg3_enable_nvram_access(tp);
1da177e4
LT
10537
10538 /*
10539 * Before we can erase the flash page, we need
10540 * to issue a special "write enable" command.
10541 */
10542 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
10543
10544 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
10545 break;
10546
10547 /* Erase the target page */
10548 tw32(NVRAM_ADDR, phy_addr);
10549
10550 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
10551 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
10552
10553 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
10554 break;
10555
10556 /* Issue another write enable to start the write. */
10557 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
10558
10559 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
10560 break;
10561
10562 for (j = 0; j < pagesize; j += 4) {
b9fc7dc5 10563 __be32 data;
1da177e4 10564
b9fc7dc5
AV
10565 data = *((__be32 *) (tmp + j));
10566 /* swab32(le32_to_cpu(data)), actually */
10567 tw32(NVRAM_WRDATA, be32_to_cpu(data));
1da177e4
LT
10568
10569 tw32(NVRAM_ADDR, phy_addr + j);
10570
10571 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
10572 NVRAM_CMD_WR;
10573
10574 if (j == 0)
10575 nvram_cmd |= NVRAM_CMD_FIRST;
10576 else if (j == (pagesize - 4))
10577 nvram_cmd |= NVRAM_CMD_LAST;
10578
10579 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
10580 break;
10581 }
10582 if (ret)
10583 break;
10584 }
10585
10586 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
10587 tg3_nvram_exec_cmd(tp, nvram_cmd);
10588
10589 kfree(tmp);
10590
10591 return ret;
10592}
10593
10594/* offset and length are dword aligned */
10595static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
10596 u8 *buf)
10597{
10598 int i, ret = 0;
10599
10600 for (i = 0; i < len; i += 4, offset += 4) {
b9fc7dc5
AV
10601 u32 page_off, phy_addr, nvram_cmd;
10602 __be32 data;
1da177e4
LT
10603
10604 memcpy(&data, buf + i, 4);
b9fc7dc5 10605 tw32(NVRAM_WRDATA, be32_to_cpu(data));
1da177e4
LT
10606
10607 page_off = offset % tp->nvram_pagesize;
10608
1820180b 10609 phy_addr = tg3_nvram_phys_addr(tp, offset);
1da177e4
LT
10610
10611 tw32(NVRAM_ADDR, phy_addr);
10612
10613 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
10614
10615 if ((page_off == 0) || (i == 0))
10616 nvram_cmd |= NVRAM_CMD_FIRST;
f6d9a256 10617 if (page_off == (tp->nvram_pagesize - 4))
1da177e4
LT
10618 nvram_cmd |= NVRAM_CMD_LAST;
10619
10620 if (i == (len - 4))
10621 nvram_cmd |= NVRAM_CMD_LAST;
10622
4c987487 10623 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
af36e6b6 10624 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) &&
1b27777a 10625 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) &&
d30cdd28 10626 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784) &&
9936bcf6 10627 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) &&
4c987487
MC
10628 (tp->nvram_jedecnum == JEDEC_ST) &&
10629 (nvram_cmd & NVRAM_CMD_FIRST)) {
1da177e4
LT
10630
10631 if ((ret = tg3_nvram_exec_cmd(tp,
10632 NVRAM_CMD_WREN | NVRAM_CMD_GO |
10633 NVRAM_CMD_DONE)))
10634
10635 break;
10636 }
10637 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
10638 /* We always do complete word writes to eeprom. */
10639 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
10640 }
10641
10642 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
10643 break;
10644 }
10645 return ret;
10646}
10647
10648/* offset and length are dword aligned */
10649static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
10650{
10651 int ret;
10652
1da177e4 10653 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
314fba34
MC
10654 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
10655 ~GRC_LCLCTRL_GPIO_OUTPUT1);
1da177e4
LT
10656 udelay(40);
10657 }
10658
10659 if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
10660 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
10661 }
10662 else {
10663 u32 grc_mode;
10664
ec41c7df
MC
10665 ret = tg3_nvram_lock(tp);
10666 if (ret)
10667 return ret;
1da177e4 10668
e6af301b
MC
10669 tg3_enable_nvram_access(tp);
10670 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
10671 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
1da177e4 10672 tw32(NVRAM_WRITE1, 0x406);
1da177e4
LT
10673
10674 grc_mode = tr32(GRC_MODE);
10675 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
10676
10677 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
10678 !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
10679
10680 ret = tg3_nvram_write_block_buffered(tp, offset, len,
10681 buf);
10682 }
10683 else {
10684 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
10685 buf);
10686 }
10687
10688 grc_mode = tr32(GRC_MODE);
10689 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
10690
e6af301b 10691 tg3_disable_nvram_access(tp);
1da177e4
LT
10692 tg3_nvram_unlock(tp);
10693 }
10694
10695 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
314fba34 10696 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
1da177e4
LT
10697 udelay(40);
10698 }
10699
10700 return ret;
10701}
10702
10703struct subsys_tbl_ent {
10704 u16 subsys_vendor, subsys_devid;
10705 u32 phy_id;
10706};
10707
10708static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
10709 /* Broadcom boards. */
10710 { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
10711 { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
10712 { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
10713 { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 }, /* BCM95700A9 */
10714 { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
10715 { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
10716 { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 }, /* BCM95701A7 */
10717 { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
10718 { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
10719 { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
10720 { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
10721
10722 /* 3com boards. */
10723 { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
10724 { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
10725 { PCI_VENDOR_ID_3COM, 0x1004, 0 }, /* 3C996SX */
10726 { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
10727 { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
10728
10729 /* DELL boards. */
10730 { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
10731 { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
10732 { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
10733 { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
10734
10735 /* Compaq boards. */
10736 { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
10737 { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
10738 { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 }, /* CHANGELING */
10739 { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
10740 { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
10741
10742 /* IBM boards. */
10743 { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
10744};
10745
10746static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
10747{
10748 int i;
10749
10750 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
10751 if ((subsys_id_to_phy_id[i].subsys_vendor ==
10752 tp->pdev->subsystem_vendor) &&
10753 (subsys_id_to_phy_id[i].subsys_devid ==
10754 tp->pdev->subsystem_device))
10755 return &subsys_id_to_phy_id[i];
10756 }
10757 return NULL;
10758}
10759
7d0c41ef 10760static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
1da177e4 10761{
1da177e4 10762 u32 val;
caf636c7
MC
10763 u16 pmcsr;
10764
10765 /* On some early chips the SRAM cannot be accessed in D3hot state,
10766 * so need make sure we're in D0.
10767 */
10768 pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
10769 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10770 pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
10771 msleep(1);
7d0c41ef
MC
10772
10773 /* Make sure register accesses (indirect or otherwise)
10774 * will function correctly.
10775 */
10776 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10777 tp->misc_host_ctrl);
1da177e4 10778
f49639e6
DM
10779 /* The memory arbiter has to be enabled in order for SRAM accesses
10780 * to succeed. Normally on powerup the tg3 chip firmware will make
10781 * sure it is enabled, but other entities such as system netboot
10782 * code might disable it.
10783 */
10784 val = tr32(MEMARB_MODE);
10785 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
10786
1da177e4 10787 tp->phy_id = PHY_ID_INVALID;
7d0c41ef
MC
10788 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10789
a85feb8c
GZ
10790 /* Assume an onboard device and WOL capable by default. */
10791 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT | TG3_FLAG_WOL_CAP;
72b845e0 10792
b5d3772c 10793 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
9d26e213 10794 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
b5d3772c 10795 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
9d26e213
MC
10796 tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
10797 }
0527ba35
MC
10798 val = tr32(VCPU_CFGSHDW);
10799 if (val & VCPU_CFGSHDW_ASPM_DBNC)
8ed5d97e 10800 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
0527ba35
MC
10801 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
10802 (val & VCPU_CFGSHDW_WOL_MAGPKT))
10803 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
b5d3772c
MC
10804 return;
10805 }
10806
1da177e4
LT
10807 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
10808 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
10809 u32 nic_cfg, led_cfg;
7d0c41ef
MC
10810 u32 nic_phy_id, ver, cfg2 = 0, eeprom_phy_id;
10811 int eeprom_phy_serdes = 0;
1da177e4
LT
10812
10813 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
10814 tp->nic_sram_data_cfg = nic_cfg;
10815
10816 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
10817 ver >>= NIC_SRAM_DATA_VER_SHIFT;
10818 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
10819 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
10820 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
10821 (ver > 0) && (ver < 0x100))
10822 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
10823
1da177e4
LT
10824 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
10825 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
10826 eeprom_phy_serdes = 1;
10827
10828 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
10829 if (nic_phy_id != 0) {
10830 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
10831 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
10832
10833 eeprom_phy_id = (id1 >> 16) << 10;
10834 eeprom_phy_id |= (id2 & 0xfc00) << 16;
10835 eeprom_phy_id |= (id2 & 0x03ff) << 0;
10836 } else
10837 eeprom_phy_id = 0;
10838
7d0c41ef 10839 tp->phy_id = eeprom_phy_id;
747e8f8b 10840 if (eeprom_phy_serdes) {
a4e2b347 10841 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
747e8f8b
MC
10842 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
10843 else
10844 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
10845 }
7d0c41ef 10846
cbf46853 10847 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
1da177e4
LT
10848 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
10849 SHASTA_EXT_LED_MODE_MASK);
cbf46853 10850 else
1da177e4
LT
10851 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
10852
10853 switch (led_cfg) {
10854 default:
10855 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
10856 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10857 break;
10858
10859 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
10860 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
10861 break;
10862
10863 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
10864 tp->led_ctrl = LED_CTRL_MODE_MAC;
9ba27794
MC
10865
10866 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
10867 * read on some older 5700/5701 bootcode.
10868 */
10869 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
10870 ASIC_REV_5700 ||
10871 GET_ASIC_REV(tp->pci_chip_rev_id) ==
10872 ASIC_REV_5701)
10873 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10874
1da177e4
LT
10875 break;
10876
10877 case SHASTA_EXT_LED_SHARED:
10878 tp->led_ctrl = LED_CTRL_MODE_SHARED;
10879 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
10880 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
10881 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
10882 LED_CTRL_MODE_PHY_2);
10883 break;
10884
10885 case SHASTA_EXT_LED_MAC:
10886 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
10887 break;
10888
10889 case SHASTA_EXT_LED_COMBO:
10890 tp->led_ctrl = LED_CTRL_MODE_COMBO;
10891 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
10892 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
10893 LED_CTRL_MODE_PHY_2);
10894 break;
10895
10896 };
10897
10898 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10899 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
10900 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
10901 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
10902
b2a5c19c
MC
10903 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
10904 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
5f60891b 10905
9d26e213 10906 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
1da177e4 10907 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
9d26e213
MC
10908 if ((tp->pdev->subsystem_vendor ==
10909 PCI_VENDOR_ID_ARIMA) &&
10910 (tp->pdev->subsystem_device == 0x205a ||
10911 tp->pdev->subsystem_device == 0x2063))
10912 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
10913 } else {
f49639e6 10914 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
9d26e213
MC
10915 tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
10916 }
1da177e4
LT
10917
10918 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
10919 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
cbf46853 10920 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
1da177e4
LT
10921 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
10922 }
0d3031d9
MC
10923 if (nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE)
10924 tp->tg3_flags3 |= TG3_FLG3_ENABLE_APE;
a85feb8c
GZ
10925 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES &&
10926 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
10927 tp->tg3_flags &= ~TG3_FLAG_WOL_CAP;
1da177e4 10928
0527ba35
MC
10929 if (tp->tg3_flags & TG3_FLAG_WOL_CAP &&
10930 nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)
10931 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
10932
1da177e4
LT
10933 if (cfg2 & (1 << 17))
10934 tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
10935
10936 /* serdes signal pre-emphasis in register 0x590 set by */
10937 /* bootcode if bit 18 is set */
10938 if (cfg2 & (1 << 18))
10939 tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
8ed5d97e
MC
10940
10941 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10942 u32 cfg3;
10943
10944 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
10945 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
10946 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
10947 }
1da177e4 10948 }
7d0c41ef
MC
10949}
10950
b2a5c19c
MC
10951static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
10952{
10953 int i;
10954 u32 val;
10955
10956 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
10957 tw32(OTP_CTRL, cmd);
10958
10959 /* Wait for up to 1 ms for command to execute. */
10960 for (i = 0; i < 100; i++) {
10961 val = tr32(OTP_STATUS);
10962 if (val & OTP_STATUS_CMD_DONE)
10963 break;
10964 udelay(10);
10965 }
10966
10967 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
10968}
10969
10970/* Read the gphy configuration from the OTP region of the chip. The gphy
10971 * configuration is a 32-bit value that straddles the alignment boundary.
10972 * We do two 32-bit reads and then shift and merge the results.
10973 */
10974static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
10975{
10976 u32 bhalf_otp, thalf_otp;
10977
10978 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
10979
10980 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
10981 return 0;
10982
10983 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
10984
10985 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
10986 return 0;
10987
10988 thalf_otp = tr32(OTP_READ_DATA);
10989
10990 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
10991
10992 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
10993 return 0;
10994
10995 bhalf_otp = tr32(OTP_READ_DATA);
10996
10997 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
10998}
10999
7d0c41ef
MC
11000static int __devinit tg3_phy_probe(struct tg3 *tp)
11001{
11002 u32 hw_phy_id_1, hw_phy_id_2;
11003 u32 hw_phy_id, hw_phy_id_masked;
11004 int err;
1da177e4
LT
11005
11006 /* Reading the PHY ID register can conflict with ASF
11007 * firwmare access to the PHY hardware.
11008 */
11009 err = 0;
0d3031d9
MC
11010 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
11011 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
1da177e4
LT
11012 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
11013 } else {
11014 /* Now read the physical PHY_ID from the chip and verify
11015 * that it is sane. If it doesn't look good, we fall back
11016 * to either the hard-coded table based PHY_ID and failing
11017 * that the value found in the eeprom area.
11018 */
11019 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
11020 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
11021
11022 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
11023 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
11024 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
11025
11026 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
11027 }
11028
11029 if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
11030 tp->phy_id = hw_phy_id;
11031 if (hw_phy_id_masked == PHY_ID_BCM8002)
11032 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
da6b2d01
MC
11033 else
11034 tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
1da177e4 11035 } else {
7d0c41ef
MC
11036 if (tp->phy_id != PHY_ID_INVALID) {
11037 /* Do nothing, phy ID already set up in
11038 * tg3_get_eeprom_hw_cfg().
11039 */
1da177e4
LT
11040 } else {
11041 struct subsys_tbl_ent *p;
11042
11043 /* No eeprom signature? Try the hardcoded
11044 * subsys device table.
11045 */
11046 p = lookup_by_subsys(tp);
11047 if (!p)
11048 return -ENODEV;
11049
11050 tp->phy_id = p->phy_id;
11051 if (!tp->phy_id ||
11052 tp->phy_id == PHY_ID_BCM8002)
11053 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
11054 }
11055 }
11056
747e8f8b 11057 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
0d3031d9 11058 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) &&
1da177e4 11059 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
3600d918 11060 u32 bmsr, adv_reg, tg3_ctrl, mask;
1da177e4
LT
11061
11062 tg3_readphy(tp, MII_BMSR, &bmsr);
11063 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
11064 (bmsr & BMSR_LSTATUS))
11065 goto skip_phy_reset;
6aa20a22 11066
1da177e4
LT
11067 err = tg3_phy_reset(tp);
11068 if (err)
11069 return err;
11070
11071 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
11072 ADVERTISE_100HALF | ADVERTISE_100FULL |
11073 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
11074 tg3_ctrl = 0;
11075 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
11076 tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
11077 MII_TG3_CTRL_ADV_1000_FULL);
11078 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
11079 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
11080 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
11081 MII_TG3_CTRL_ENABLE_AS_MASTER);
11082 }
11083
3600d918
MC
11084 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
11085 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
11086 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
11087 if (!tg3_copper_is_advertising_all(tp, mask)) {
1da177e4
LT
11088 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
11089
11090 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
11091 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
11092
11093 tg3_writephy(tp, MII_BMCR,
11094 BMCR_ANENABLE | BMCR_ANRESTART);
11095 }
11096 tg3_phy_set_wirespeed(tp);
11097
11098 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
11099 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
11100 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
11101 }
11102
11103skip_phy_reset:
11104 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
11105 err = tg3_init_5401phy_dsp(tp);
11106 if (err)
11107 return err;
11108 }
11109
11110 if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
11111 err = tg3_init_5401phy_dsp(tp);
11112 }
11113
747e8f8b 11114 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
1da177e4
LT
11115 tp->link_config.advertising =
11116 (ADVERTISED_1000baseT_Half |
11117 ADVERTISED_1000baseT_Full |
11118 ADVERTISED_Autoneg |
11119 ADVERTISED_FIBRE);
11120 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
11121 tp->link_config.advertising &=
11122 ~(ADVERTISED_1000baseT_Half |
11123 ADVERTISED_1000baseT_Full);
11124
11125 return err;
11126}
11127
11128static void __devinit tg3_read_partno(struct tg3 *tp)
11129{
11130 unsigned char vpd_data[256];
af2c6a4a 11131 unsigned int i;
1b27777a 11132 u32 magic;
1da177e4 11133
1820180b 11134 if (tg3_nvram_read_swab(tp, 0x0, &magic))
f49639e6 11135 goto out_not_found;
1da177e4 11136
1820180b 11137 if (magic == TG3_EEPROM_MAGIC) {
1b27777a
MC
11138 for (i = 0; i < 256; i += 4) {
11139 u32 tmp;
1da177e4 11140
1b27777a
MC
11141 if (tg3_nvram_read(tp, 0x100 + i, &tmp))
11142 goto out_not_found;
11143
11144 vpd_data[i + 0] = ((tmp >> 0) & 0xff);
11145 vpd_data[i + 1] = ((tmp >> 8) & 0xff);
11146 vpd_data[i + 2] = ((tmp >> 16) & 0xff);
11147 vpd_data[i + 3] = ((tmp >> 24) & 0xff);
11148 }
11149 } else {
11150 int vpd_cap;
11151
11152 vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
11153 for (i = 0; i < 256; i += 4) {
11154 u32 tmp, j = 0;
b9fc7dc5 11155 __le32 v;
1b27777a
MC
11156 u16 tmp16;
11157
11158 pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
11159 i);
11160 while (j++ < 100) {
11161 pci_read_config_word(tp->pdev, vpd_cap +
11162 PCI_VPD_ADDR, &tmp16);
11163 if (tmp16 & 0x8000)
11164 break;
11165 msleep(1);
11166 }
f49639e6
DM
11167 if (!(tmp16 & 0x8000))
11168 goto out_not_found;
11169
1b27777a
MC
11170 pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
11171 &tmp);
b9fc7dc5
AV
11172 v = cpu_to_le32(tmp);
11173 memcpy(&vpd_data[i], &v, 4);
1b27777a 11174 }
1da177e4
LT
11175 }
11176
11177 /* Now parse and find the part number. */
af2c6a4a 11178 for (i = 0; i < 254; ) {
1da177e4 11179 unsigned char val = vpd_data[i];
af2c6a4a 11180 unsigned int block_end;
1da177e4
LT
11181
11182 if (val == 0x82 || val == 0x91) {
11183 i = (i + 3 +
11184 (vpd_data[i + 1] +
11185 (vpd_data[i + 2] << 8)));
11186 continue;
11187 }
11188
11189 if (val != 0x90)
11190 goto out_not_found;
11191
11192 block_end = (i + 3 +
11193 (vpd_data[i + 1] +
11194 (vpd_data[i + 2] << 8)));
11195 i += 3;
af2c6a4a
MC
11196
11197 if (block_end > 256)
11198 goto out_not_found;
11199
11200 while (i < (block_end - 2)) {
1da177e4
LT
11201 if (vpd_data[i + 0] == 'P' &&
11202 vpd_data[i + 1] == 'N') {
11203 int partno_len = vpd_data[i + 2];
11204
af2c6a4a
MC
11205 i += 3;
11206 if (partno_len > 24 || (partno_len + i) > 256)
1da177e4
LT
11207 goto out_not_found;
11208
11209 memcpy(tp->board_part_number,
af2c6a4a 11210 &vpd_data[i], partno_len);
1da177e4
LT
11211
11212 /* Success. */
11213 return;
11214 }
af2c6a4a 11215 i += 3 + vpd_data[i + 2];
1da177e4
LT
11216 }
11217
11218 /* Part number not found. */
11219 goto out_not_found;
11220 }
11221
11222out_not_found:
b5d3772c
MC
11223 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11224 strcpy(tp->board_part_number, "BCM95906");
11225 else
11226 strcpy(tp->board_part_number, "none");
1da177e4
LT
11227}
11228
9c8a620e
MC
11229static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
11230{
11231 u32 val;
11232
11233 if (tg3_nvram_read_swab(tp, offset, &val) ||
11234 (val & 0xfc000000) != 0x0c000000 ||
11235 tg3_nvram_read_swab(tp, offset + 4, &val) ||
11236 val != 0)
11237 return 0;
11238
11239 return 1;
11240}
11241
c4e6575c
MC
11242static void __devinit tg3_read_fw_ver(struct tg3 *tp)
11243{
11244 u32 val, offset, start;
9c8a620e
MC
11245 u32 ver_offset;
11246 int i, bcnt;
c4e6575c
MC
11247
11248 if (tg3_nvram_read_swab(tp, 0, &val))
11249 return;
11250
11251 if (val != TG3_EEPROM_MAGIC)
11252 return;
11253
11254 if (tg3_nvram_read_swab(tp, 0xc, &offset) ||
11255 tg3_nvram_read_swab(tp, 0x4, &start))
11256 return;
11257
11258 offset = tg3_nvram_logical_addr(tp, offset);
9c8a620e
MC
11259
11260 if (!tg3_fw_img_is_valid(tp, offset) ||
11261 tg3_nvram_read_swab(tp, offset + 8, &ver_offset))
c4e6575c
MC
11262 return;
11263
9c8a620e
MC
11264 offset = offset + ver_offset - start;
11265 for (i = 0; i < 16; i += 4) {
b9fc7dc5
AV
11266 __le32 v;
11267 if (tg3_nvram_read_le(tp, offset + i, &v))
9c8a620e
MC
11268 return;
11269
b9fc7dc5 11270 memcpy(tp->fw_ver + i, &v, 4);
9c8a620e 11271 }
c4e6575c 11272
9c8a620e 11273 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
84af67fd 11274 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
9c8a620e
MC
11275 return;
11276
11277 for (offset = TG3_NVM_DIR_START;
11278 offset < TG3_NVM_DIR_END;
11279 offset += TG3_NVM_DIRENT_SIZE) {
11280 if (tg3_nvram_read_swab(tp, offset, &val))
c4e6575c
MC
11281 return;
11282
9c8a620e
MC
11283 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
11284 break;
11285 }
11286
11287 if (offset == TG3_NVM_DIR_END)
11288 return;
11289
11290 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
11291 start = 0x08000000;
11292 else if (tg3_nvram_read_swab(tp, offset - 4, &start))
11293 return;
11294
11295 if (tg3_nvram_read_swab(tp, offset + 4, &offset) ||
11296 !tg3_fw_img_is_valid(tp, offset) ||
11297 tg3_nvram_read_swab(tp, offset + 8, &val))
11298 return;
11299
11300 offset += val - start;
11301
11302 bcnt = strlen(tp->fw_ver);
11303
11304 tp->fw_ver[bcnt++] = ',';
11305 tp->fw_ver[bcnt++] = ' ';
11306
11307 for (i = 0; i < 4; i++) {
b9fc7dc5
AV
11308 __le32 v;
11309 if (tg3_nvram_read_le(tp, offset, &v))
c4e6575c
MC
11310 return;
11311
b9fc7dc5 11312 offset += sizeof(v);
c4e6575c 11313
b9fc7dc5
AV
11314 if (bcnt > TG3_VER_SIZE - sizeof(v)) {
11315 memcpy(&tp->fw_ver[bcnt], &v, TG3_VER_SIZE - bcnt);
9c8a620e 11316 break;
c4e6575c 11317 }
9c8a620e 11318
b9fc7dc5
AV
11319 memcpy(&tp->fw_ver[bcnt], &v, sizeof(v));
11320 bcnt += sizeof(v);
c4e6575c 11321 }
9c8a620e
MC
11322
11323 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
c4e6575c
MC
11324}
11325
7544b097
MC
11326static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
11327
1da177e4
LT
11328static int __devinit tg3_get_invariants(struct tg3 *tp)
11329{
11330 static struct pci_device_id write_reorder_chipsets[] = {
1da177e4
LT
11331 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
11332 PCI_DEVICE_ID_AMD_FE_GATE_700C) },
c165b004
JL
11333 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
11334 PCI_DEVICE_ID_AMD_8131_BRIDGE) },
399de50b
MC
11335 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
11336 PCI_DEVICE_ID_VIA_8385_0) },
1da177e4
LT
11337 { },
11338 };
11339 u32 misc_ctrl_reg;
11340 u32 cacheline_sz_reg;
11341 u32 pci_state_reg, grc_misc_cfg;
11342 u32 val;
11343 u16 pci_cmd;
c7835a77 11344 int err, pcie_cap;
1da177e4 11345
1da177e4
LT
11346 /* Force memory write invalidate off. If we leave it on,
11347 * then on 5700_BX chips we have to enable a workaround.
11348 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
11349 * to match the cacheline size. The Broadcom driver have this
11350 * workaround but turns MWI off all the times so never uses
11351 * it. This seems to suggest that the workaround is insufficient.
11352 */
11353 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11354 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
11355 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11356
11357 /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
11358 * has the register indirect write enable bit set before
11359 * we try to access any of the MMIO registers. It is also
11360 * critical that the PCI-X hw workaround situation is decided
11361 * before that as well.
11362 */
11363 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11364 &misc_ctrl_reg);
11365
11366 tp->pci_chip_rev_id = (misc_ctrl_reg >>
11367 MISC_HOST_CTRL_CHIPREV_SHIFT);
795d01c5
MC
11368 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
11369 u32 prod_id_asic_rev;
11370
11371 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
11372 &prod_id_asic_rev);
11373 tp->pci_chip_rev_id = prod_id_asic_rev & PROD_ID_ASIC_REV_MASK;
11374 }
1da177e4 11375
ff645bec
MC
11376 /* Wrong chip ID in 5752 A0. This code can be removed later
11377 * as A0 is not in production.
11378 */
11379 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
11380 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
11381
6892914f
MC
11382 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
11383 * we need to disable memory and use config. cycles
11384 * only to access all registers. The 5702/03 chips
11385 * can mistakenly decode the special cycles from the
11386 * ICH chipsets as memory write cycles, causing corruption
11387 * of register and memory space. Only certain ICH bridges
11388 * will drive special cycles with non-zero data during the
11389 * address phase which can fall within the 5703's address
11390 * range. This is not an ICH bug as the PCI spec allows
11391 * non-zero address during special cycles. However, only
11392 * these ICH bridges are known to drive non-zero addresses
11393 * during special cycles.
11394 *
11395 * Since special cycles do not cross PCI bridges, we only
11396 * enable this workaround if the 5703 is on the secondary
11397 * bus of these ICH bridges.
11398 */
11399 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
11400 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
11401 static struct tg3_dev_id {
11402 u32 vendor;
11403 u32 device;
11404 u32 rev;
11405 } ich_chipsets[] = {
11406 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
11407 PCI_ANY_ID },
11408 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
11409 PCI_ANY_ID },
11410 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
11411 0xa },
11412 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
11413 PCI_ANY_ID },
11414 { },
11415 };
11416 struct tg3_dev_id *pci_id = &ich_chipsets[0];
11417 struct pci_dev *bridge = NULL;
11418
11419 while (pci_id->vendor != 0) {
11420 bridge = pci_get_device(pci_id->vendor, pci_id->device,
11421 bridge);
11422 if (!bridge) {
11423 pci_id++;
11424 continue;
11425 }
11426 if (pci_id->rev != PCI_ANY_ID) {
44c10138 11427 if (bridge->revision > pci_id->rev)
6892914f
MC
11428 continue;
11429 }
11430 if (bridge->subordinate &&
11431 (bridge->subordinate->number ==
11432 tp->pdev->bus->number)) {
11433
11434 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
11435 pci_dev_put(bridge);
11436 break;
11437 }
11438 }
11439 }
11440
41588ba1
MC
11441 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
11442 static struct tg3_dev_id {
11443 u32 vendor;
11444 u32 device;
11445 } bridge_chipsets[] = {
11446 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
11447 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
11448 { },
11449 };
11450 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
11451 struct pci_dev *bridge = NULL;
11452
11453 while (pci_id->vendor != 0) {
11454 bridge = pci_get_device(pci_id->vendor,
11455 pci_id->device,
11456 bridge);
11457 if (!bridge) {
11458 pci_id++;
11459 continue;
11460 }
11461 if (bridge->subordinate &&
11462 (bridge->subordinate->number <=
11463 tp->pdev->bus->number) &&
11464 (bridge->subordinate->subordinate >=
11465 tp->pdev->bus->number)) {
11466 tp->tg3_flags3 |= TG3_FLG3_5701_DMA_BUG;
11467 pci_dev_put(bridge);
11468 break;
11469 }
11470 }
11471 }
11472
4a29cc2e
MC
11473 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
11474 * DMA addresses > 40-bit. This bridge may have other additional
11475 * 57xx devices behind it in some 4-port NIC designs for example.
11476 * Any tg3 device found behind the bridge will also need the 40-bit
11477 * DMA workaround.
11478 */
a4e2b347
MC
11479 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
11480 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
11481 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
4a29cc2e 11482 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
4cf78e4f 11483 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
a4e2b347 11484 }
4a29cc2e
MC
11485 else {
11486 struct pci_dev *bridge = NULL;
11487
11488 do {
11489 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
11490 PCI_DEVICE_ID_SERVERWORKS_EPB,
11491 bridge);
11492 if (bridge && bridge->subordinate &&
11493 (bridge->subordinate->number <=
11494 tp->pdev->bus->number) &&
11495 (bridge->subordinate->subordinate >=
11496 tp->pdev->bus->number)) {
11497 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
11498 pci_dev_put(bridge);
11499 break;
11500 }
11501 } while (bridge);
11502 }
4cf78e4f 11503
1da177e4
LT
11504 /* Initialize misc host control in PCI block. */
11505 tp->misc_host_ctrl |= (misc_ctrl_reg &
11506 MISC_HOST_CTRL_CHIPREV);
11507 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11508 tp->misc_host_ctrl);
11509
11510 pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
11511 &cacheline_sz_reg);
11512
11513 tp->pci_cacheline_sz = (cacheline_sz_reg >> 0) & 0xff;
11514 tp->pci_lat_timer = (cacheline_sz_reg >> 8) & 0xff;
11515 tp->pci_hdr_type = (cacheline_sz_reg >> 16) & 0xff;
11516 tp->pci_bist = (cacheline_sz_reg >> 24) & 0xff;
11517
7544b097
MC
11518 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
11519 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
11520 tp->pdev_peer = tg3_find_peer(tp);
11521
6708e5cc 11522 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
4cf78e4f 11523 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
af36e6b6 11524 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
d9ab5ad1 11525 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
d30cdd28 11526 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9936bcf6 11527 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
b5d3772c 11528 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
a4e2b347 11529 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
6708e5cc
JL
11530 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
11531
1b440c56
JL
11532 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
11533 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
11534 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
11535
5a6f3074 11536 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
7544b097
MC
11537 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI;
11538 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
11539 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
11540 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
11541 tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
11542 tp->pdev_peer == tp->pdev))
11543 tp->tg3_flags &= ~TG3_FLAG_SUPPORT_MSI;
11544
af36e6b6 11545 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
b5d3772c 11546 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
d30cdd28 11547 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9936bcf6 11548 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
b5d3772c 11549 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5a6f3074 11550 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
fcfa0a32 11551 tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
52c0fd83 11552 } else {
7f62ad5d 11553 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG;
52c0fd83
MC
11554 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
11555 ASIC_REV_5750 &&
11556 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
7f62ad5d 11557 tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG;
52c0fd83 11558 }
5a6f3074 11559 }
1da177e4 11560
0f893dc6
MC
11561 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
11562 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 &&
d9ab5ad1 11563 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
af36e6b6 11564 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755 &&
b5d3772c 11565 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787 &&
d30cdd28 11566 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
9936bcf6 11567 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761 &&
b5d3772c 11568 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
0f893dc6
MC
11569 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
11570
c7835a77
MC
11571 pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
11572 if (pcie_cap != 0) {
1da177e4 11573 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
5f5c51e3
MC
11574
11575 pcie_set_readrq(tp->pdev, 4096);
11576
c7835a77
MC
11577 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11578 u16 lnkctl;
11579
11580 pci_read_config_word(tp->pdev,
11581 pcie_cap + PCI_EXP_LNKCTL,
11582 &lnkctl);
11583 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN)
11584 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_2;
11585 }
11586 }
1da177e4 11587
399de50b
MC
11588 /* If we have an AMD 762 or VIA K8T800 chipset, write
11589 * reordering to the mailbox registers done by the host
11590 * controller can cause major troubles. We read back from
11591 * every mailbox register write to force the writes to be
11592 * posted to the chip in order.
11593 */
11594 if (pci_dev_present(write_reorder_chipsets) &&
11595 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
11596 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
11597
1da177e4
LT
11598 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
11599 tp->pci_lat_timer < 64) {
11600 tp->pci_lat_timer = 64;
11601
11602 cacheline_sz_reg = ((tp->pci_cacheline_sz & 0xff) << 0);
11603 cacheline_sz_reg |= ((tp->pci_lat_timer & 0xff) << 8);
11604 cacheline_sz_reg |= ((tp->pci_hdr_type & 0xff) << 16);
11605 cacheline_sz_reg |= ((tp->pci_bist & 0xff) << 24);
11606
11607 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
11608 cacheline_sz_reg);
11609 }
11610
9974a356
MC
11611 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
11612 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
11613 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
11614 if (!tp->pcix_cap) {
11615 printk(KERN_ERR PFX "Cannot find PCI-X "
11616 "capability, aborting.\n");
11617 return -EIO;
11618 }
11619 }
11620
1da177e4
LT
11621 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
11622 &pci_state_reg);
11623
9974a356 11624 if (tp->pcix_cap && (pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
1da177e4
LT
11625 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
11626
11627 /* If this is a 5700 BX chipset, and we are in PCI-X
11628 * mode, enable register write workaround.
11629 *
11630 * The workaround is to use indirect register accesses
11631 * for all chip writes not to mailbox registers.
11632 */
11633 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
11634 u32 pm_reg;
1da177e4
LT
11635
11636 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
11637
11638 /* The chip can have it's power management PCI config
11639 * space registers clobbered due to this bug.
11640 * So explicitly force the chip into D0 here.
11641 */
9974a356
MC
11642 pci_read_config_dword(tp->pdev,
11643 tp->pm_cap + PCI_PM_CTRL,
1da177e4
LT
11644 &pm_reg);
11645 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
11646 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
9974a356
MC
11647 pci_write_config_dword(tp->pdev,
11648 tp->pm_cap + PCI_PM_CTRL,
1da177e4
LT
11649 pm_reg);
11650
11651 /* Also, force SERR#/PERR# in PCI command. */
11652 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11653 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
11654 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11655 }
11656 }
11657
087fe256
MC
11658 /* 5700 BX chips need to have their TX producer index mailboxes
11659 * written twice to workaround a bug.
11660 */
11661 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
11662 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
11663
1da177e4
LT
11664 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
11665 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
11666 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
11667 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
11668
11669 /* Chip-specific fixup from Broadcom driver */
11670 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
11671 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
11672 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
11673 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
11674 }
11675
1ee582d8 11676 /* Default fast path register access methods */
20094930 11677 tp->read32 = tg3_read32;
1ee582d8 11678 tp->write32 = tg3_write32;
09ee929c 11679 tp->read32_mbox = tg3_read32;
20094930 11680 tp->write32_mbox = tg3_write32;
1ee582d8
MC
11681 tp->write32_tx_mbox = tg3_write32;
11682 tp->write32_rx_mbox = tg3_write32;
11683
11684 /* Various workaround register access methods */
11685 if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
11686 tp->write32 = tg3_write_indirect_reg32;
98efd8a6
MC
11687 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
11688 ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
11689 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
11690 /*
11691 * Back to back register writes can cause problems on these
11692 * chips, the workaround is to read back all reg writes
11693 * except those to mailbox regs.
11694 *
11695 * See tg3_write_indirect_reg32().
11696 */
1ee582d8 11697 tp->write32 = tg3_write_flush_reg32;
98efd8a6
MC
11698 }
11699
1ee582d8
MC
11700
11701 if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
11702 (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
11703 tp->write32_tx_mbox = tg3_write32_tx_mbox;
11704 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
11705 tp->write32_rx_mbox = tg3_write_flush_reg32;
11706 }
20094930 11707
6892914f
MC
11708 if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
11709 tp->read32 = tg3_read_indirect_reg32;
11710 tp->write32 = tg3_write_indirect_reg32;
11711 tp->read32_mbox = tg3_read_indirect_mbox;
11712 tp->write32_mbox = tg3_write_indirect_mbox;
11713 tp->write32_tx_mbox = tg3_write_indirect_mbox;
11714 tp->write32_rx_mbox = tg3_write_indirect_mbox;
11715
11716 iounmap(tp->regs);
22abe310 11717 tp->regs = NULL;
6892914f
MC
11718
11719 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11720 pci_cmd &= ~PCI_COMMAND_MEMORY;
11721 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11722 }
b5d3772c
MC
11723 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11724 tp->read32_mbox = tg3_read32_mbox_5906;
11725 tp->write32_mbox = tg3_write32_mbox_5906;
11726 tp->write32_tx_mbox = tg3_write32_mbox_5906;
11727 tp->write32_rx_mbox = tg3_write32_mbox_5906;
11728 }
6892914f 11729
bbadf503
MC
11730 if (tp->write32 == tg3_write_indirect_reg32 ||
11731 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
11732 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
f49639e6 11733 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
bbadf503
MC
11734 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
11735
7d0c41ef 11736 /* Get eeprom hw config before calling tg3_set_power_state().
9d26e213 11737 * In particular, the TG3_FLG2_IS_NIC flag must be
7d0c41ef
MC
11738 * determined before calling tg3_set_power_state() so that
11739 * we know whether or not to switch out of Vaux power.
11740 * When the flag is set, it means that GPIO1 is used for eeprom
11741 * write protect and also implies that it is a LOM where GPIOs
11742 * are not used to switch power.
6aa20a22 11743 */
7d0c41ef
MC
11744 tg3_get_eeprom_hw_cfg(tp);
11745
0d3031d9
MC
11746 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
11747 /* Allow reads and writes to the
11748 * APE register and memory space.
11749 */
11750 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
11751 PCISTATE_ALLOW_APE_SHMEM_WR;
11752 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
11753 pci_state_reg);
11754 }
11755
9936bcf6 11756 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
b5af7126 11757 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
d30cdd28
MC
11758 tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT;
11759
b5af7126
MC
11760 if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
11761 tp->pci_chip_rev_id == CHIPREV_ID_5784_A1 ||
11762 tp->pci_chip_rev_id == CHIPREV_ID_5761_A0 ||
11763 tp->pci_chip_rev_id == CHIPREV_ID_5761_A1)
11764 tp->tg3_flags3 |= TG3_FLG3_5761_5784_AX_FIXES;
11765 }
11766
314fba34
MC
11767 /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
11768 * GPIO1 driven high will bring 5700's external PHY out of reset.
11769 * It is also used as eeprom write protect on LOMs.
11770 */
11771 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
11772 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
11773 (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
11774 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
11775 GRC_LCLCTRL_GPIO_OUTPUT1);
3e7d83bc
MC
11776 /* Unused GPIO3 must be driven as output on 5752 because there
11777 * are no pull-up resistors on unused GPIO pins.
11778 */
11779 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
11780 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
314fba34 11781
af36e6b6
MC
11782 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
11783 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
11784
5f0c4a3c
MC
11785 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761) {
11786 /* Turn off the debug UART. */
11787 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
11788 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
11789 /* Keep VMain power. */
11790 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
11791 GRC_LCLCTRL_GPIO_OUTPUT0;
11792 }
11793
1da177e4 11794 /* Force the chip into D0. */
bc1c7567 11795 err = tg3_set_power_state(tp, PCI_D0);
1da177e4
LT
11796 if (err) {
11797 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
11798 pci_name(tp->pdev));
11799 return err;
11800 }
11801
11802 /* 5700 B0 chips do not support checksumming correctly due
11803 * to hardware bugs.
11804 */
11805 if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
11806 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
11807
1da177e4
LT
11808 /* Derive initial jumbo mode from MTU assigned in
11809 * ether_setup() via the alloc_etherdev() call
11810 */
0f893dc6 11811 if (tp->dev->mtu > ETH_DATA_LEN &&
a4e2b347 11812 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
0f893dc6 11813 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
1da177e4
LT
11814
11815 /* Determine WakeOnLan speed to use. */
11816 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11817 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
11818 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
11819 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
11820 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
11821 } else {
11822 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
11823 }
11824
11825 /* A few boards don't want Ethernet@WireSpeed phy feature */
11826 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
11827 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
11828 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
747e8f8b 11829 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
b5d3772c 11830 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) ||
747e8f8b 11831 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
1da177e4
LT
11832 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
11833
11834 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
11835 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
11836 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
11837 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
11838 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
11839
c424cb24
MC
11840 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
11841 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
d30cdd28 11842 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
9936bcf6
MC
11843 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11844 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
d4011ada
MC
11845 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
11846 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
11847 tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
c1d2a196
MC
11848 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
11849 tp->tg3_flags2 |= TG3_FLG2_PHY_ADJUST_TRIM;
11850 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
c424cb24
MC
11851 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
11852 }
1da177e4 11853
b2a5c19c
MC
11854 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
11855 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
11856 tp->phy_otp = tg3_read_otp_phycfg(tp);
11857 if (tp->phy_otp == 0)
11858 tp->phy_otp = TG3_OTP_DEFAULT;
11859 }
11860
8ef21428
MC
11861 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11862 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
11863 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
11864 else
11865 tp->mi_mode = MAC_MI_MODE_BASE;
11866
1da177e4 11867 tp->coalesce_mode = 0;
1da177e4
LT
11868 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
11869 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
11870 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
11871
11872 /* Initialize MAC MI mode, polling disabled. */
11873 tw32_f(MAC_MI_MODE, tp->mi_mode);
11874 udelay(80);
11875
11876 /* Initialize data/descriptor byte/word swapping. */
11877 val = tr32(GRC_MODE);
11878 val &= GRC_MODE_HOST_STACKUP;
11879 tw32(GRC_MODE, val | tp->grc_mode);
11880
11881 tg3_switch_clocks(tp);
11882
11883 /* Clear this out for sanity. */
11884 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
11885
11886 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
11887 &pci_state_reg);
11888 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
11889 (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
11890 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
11891
11892 if (chiprevid == CHIPREV_ID_5701_A0 ||
11893 chiprevid == CHIPREV_ID_5701_B0 ||
11894 chiprevid == CHIPREV_ID_5701_B2 ||
11895 chiprevid == CHIPREV_ID_5701_B5) {
11896 void __iomem *sram_base;
11897
11898 /* Write some dummy words into the SRAM status block
11899 * area, see if it reads back correctly. If the return
11900 * value is bad, force enable the PCIX workaround.
11901 */
11902 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
11903
11904 writel(0x00000000, sram_base);
11905 writel(0x00000000, sram_base + 4);
11906 writel(0xffffffff, sram_base + 4);
11907 if (readl(sram_base) != 0x00000000)
11908 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
11909 }
11910 }
11911
11912 udelay(50);
11913 tg3_nvram_init(tp);
11914
11915 grc_misc_cfg = tr32(GRC_MISC_CFG);
11916 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
11917
1da177e4
LT
11918 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
11919 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
11920 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
11921 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
11922
fac9b83e
DM
11923 if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
11924 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
11925 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
11926 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
11927 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
11928 HOSTCC_MODE_CLRTICK_TXBD);
11929
11930 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
11931 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11932 tp->misc_host_ctrl);
11933 }
11934
1da177e4
LT
11935 /* these are limited to 10/100 only */
11936 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
11937 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
11938 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
11939 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
11940 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
11941 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
11942 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
11943 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
11944 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
676917d4
MC
11945 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
11946 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
b5d3772c 11947 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
1da177e4
LT
11948 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
11949
11950 err = tg3_phy_probe(tp);
11951 if (err) {
11952 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
11953 pci_name(tp->pdev), err);
11954 /* ... but do not return immediately ... */
11955 }
11956
11957 tg3_read_partno(tp);
c4e6575c 11958 tg3_read_fw_ver(tp);
1da177e4
LT
11959
11960 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
11961 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
11962 } else {
11963 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
11964 tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
11965 else
11966 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
11967 }
11968
11969 /* 5700 {AX,BX} chips have a broken status block link
11970 * change bit implementation, so we must use the
11971 * status register in those cases.
11972 */
11973 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
11974 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
11975 else
11976 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
11977
11978 /* The led_ctrl is set during tg3_phy_probe, here we might
11979 * have to force the link status polling mechanism based
11980 * upon subsystem IDs.
11981 */
11982 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
007a880d 11983 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
1da177e4
LT
11984 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
11985 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
11986 TG3_FLAG_USE_LINKCHG_REG);
11987 }
11988
11989 /* For all SERDES we poll the MAC status register. */
11990 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
11991 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
11992 else
11993 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
11994
5a6f3074 11995 /* All chips before 5787 can get confused if TX buffers
1da177e4
LT
11996 * straddle the 4GB address boundary in some cases.
11997 */
af36e6b6 11998 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
b5d3772c 11999 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
d30cdd28 12000 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9936bcf6 12001 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
b5d3772c 12002 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
5a6f3074
MC
12003 tp->dev->hard_start_xmit = tg3_start_xmit;
12004 else
12005 tp->dev->hard_start_xmit = tg3_start_xmit_dma_bug;
1da177e4
LT
12006
12007 tp->rx_offset = 2;
12008 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
12009 (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
12010 tp->rx_offset = 0;
12011
f92905de
MC
12012 tp->rx_std_max_post = TG3_RX_RING_SIZE;
12013
12014 /* Increment the rx prod index on the rx std ring by at most
12015 * 8 for these chips to workaround hw errata.
12016 */
12017 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12018 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
12019 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12020 tp->rx_std_max_post = 8;
12021
8ed5d97e
MC
12022 if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND)
12023 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
12024 PCIE_PWR_MGMT_L1_THRESH_MSK;
12025
1da177e4
LT
12026 return err;
12027}
12028
49b6e95f 12029#ifdef CONFIG_SPARC
1da177e4
LT
12030static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
12031{
12032 struct net_device *dev = tp->dev;
12033 struct pci_dev *pdev = tp->pdev;
49b6e95f 12034 struct device_node *dp = pci_device_to_OF_node(pdev);
374d4cac 12035 const unsigned char *addr;
49b6e95f
DM
12036 int len;
12037
12038 addr = of_get_property(dp, "local-mac-address", &len);
12039 if (addr && len == 6) {
12040 memcpy(dev->dev_addr, addr, 6);
12041 memcpy(dev->perm_addr, dev->dev_addr, 6);
12042 return 0;
1da177e4
LT
12043 }
12044 return -ENODEV;
12045}
12046
12047static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
12048{
12049 struct net_device *dev = tp->dev;
12050
12051 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
2ff43697 12052 memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
1da177e4
LT
12053 return 0;
12054}
12055#endif
12056
12057static int __devinit tg3_get_device_address(struct tg3 *tp)
12058{
12059 struct net_device *dev = tp->dev;
12060 u32 hi, lo, mac_offset;
008652b3 12061 int addr_ok = 0;
1da177e4 12062
49b6e95f 12063#ifdef CONFIG_SPARC
1da177e4
LT
12064 if (!tg3_get_macaddr_sparc(tp))
12065 return 0;
12066#endif
12067
12068 mac_offset = 0x7c;
f49639e6 12069 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
a4e2b347 12070 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
1da177e4
LT
12071 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
12072 mac_offset = 0xcc;
12073 if (tg3_nvram_lock(tp))
12074 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
12075 else
12076 tg3_nvram_unlock(tp);
12077 }
b5d3772c
MC
12078 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12079 mac_offset = 0x10;
1da177e4
LT
12080
12081 /* First try to get it from MAC address mailbox. */
12082 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
12083 if ((hi >> 16) == 0x484b) {
12084 dev->dev_addr[0] = (hi >> 8) & 0xff;
12085 dev->dev_addr[1] = (hi >> 0) & 0xff;
12086
12087 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
12088 dev->dev_addr[2] = (lo >> 24) & 0xff;
12089 dev->dev_addr[3] = (lo >> 16) & 0xff;
12090 dev->dev_addr[4] = (lo >> 8) & 0xff;
12091 dev->dev_addr[5] = (lo >> 0) & 0xff;
1da177e4 12092
008652b3
MC
12093 /* Some old bootcode may report a 0 MAC address in SRAM */
12094 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
12095 }
12096 if (!addr_ok) {
12097 /* Next, try NVRAM. */
f49639e6 12098 if (!tg3_nvram_read(tp, mac_offset + 0, &hi) &&
008652b3
MC
12099 !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
12100 dev->dev_addr[0] = ((hi >> 16) & 0xff);
12101 dev->dev_addr[1] = ((hi >> 24) & 0xff);
12102 dev->dev_addr[2] = ((lo >> 0) & 0xff);
12103 dev->dev_addr[3] = ((lo >> 8) & 0xff);
12104 dev->dev_addr[4] = ((lo >> 16) & 0xff);
12105 dev->dev_addr[5] = ((lo >> 24) & 0xff);
12106 }
12107 /* Finally just fetch it out of the MAC control regs. */
12108 else {
12109 hi = tr32(MAC_ADDR_0_HIGH);
12110 lo = tr32(MAC_ADDR_0_LOW);
12111
12112 dev->dev_addr[5] = lo & 0xff;
12113 dev->dev_addr[4] = (lo >> 8) & 0xff;
12114 dev->dev_addr[3] = (lo >> 16) & 0xff;
12115 dev->dev_addr[2] = (lo >> 24) & 0xff;
12116 dev->dev_addr[1] = hi & 0xff;
12117 dev->dev_addr[0] = (hi >> 8) & 0xff;
12118 }
1da177e4
LT
12119 }
12120
12121 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
7582a335 12122#ifdef CONFIG_SPARC
1da177e4
LT
12123 if (!tg3_get_default_macaddr_sparc(tp))
12124 return 0;
12125#endif
12126 return -EINVAL;
12127 }
2ff43697 12128 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
1da177e4
LT
12129 return 0;
12130}
12131
59e6b434
DM
12132#define BOUNDARY_SINGLE_CACHELINE 1
12133#define BOUNDARY_MULTI_CACHELINE 2
12134
12135static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
12136{
12137 int cacheline_size;
12138 u8 byte;
12139 int goal;
12140
12141 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
12142 if (byte == 0)
12143 cacheline_size = 1024;
12144 else
12145 cacheline_size = (int) byte * 4;
12146
12147 /* On 5703 and later chips, the boundary bits have no
12148 * effect.
12149 */
12150 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12151 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
12152 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
12153 goto out;
12154
12155#if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
12156 goal = BOUNDARY_MULTI_CACHELINE;
12157#else
12158#if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
12159 goal = BOUNDARY_SINGLE_CACHELINE;
12160#else
12161 goal = 0;
12162#endif
12163#endif
12164
12165 if (!goal)
12166 goto out;
12167
12168 /* PCI controllers on most RISC systems tend to disconnect
12169 * when a device tries to burst across a cache-line boundary.
12170 * Therefore, letting tg3 do so just wastes PCI bandwidth.
12171 *
12172 * Unfortunately, for PCI-E there are only limited
12173 * write-side controls for this, and thus for reads
12174 * we will still get the disconnects. We'll also waste
12175 * these PCI cycles for both read and write for chips
12176 * other than 5700 and 5701 which do not implement the
12177 * boundary bits.
12178 */
12179 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
12180 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
12181 switch (cacheline_size) {
12182 case 16:
12183 case 32:
12184 case 64:
12185 case 128:
12186 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12187 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
12188 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
12189 } else {
12190 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
12191 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
12192 }
12193 break;
12194
12195 case 256:
12196 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
12197 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
12198 break;
12199
12200 default:
12201 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
12202 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
12203 break;
12204 };
12205 } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12206 switch (cacheline_size) {
12207 case 16:
12208 case 32:
12209 case 64:
12210 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12211 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
12212 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
12213 break;
12214 }
12215 /* fallthrough */
12216 case 128:
12217 default:
12218 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
12219 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
12220 break;
12221 };
12222 } else {
12223 switch (cacheline_size) {
12224 case 16:
12225 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12226 val |= (DMA_RWCTRL_READ_BNDRY_16 |
12227 DMA_RWCTRL_WRITE_BNDRY_16);
12228 break;
12229 }
12230 /* fallthrough */
12231 case 32:
12232 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12233 val |= (DMA_RWCTRL_READ_BNDRY_32 |
12234 DMA_RWCTRL_WRITE_BNDRY_32);
12235 break;
12236 }
12237 /* fallthrough */
12238 case 64:
12239 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12240 val |= (DMA_RWCTRL_READ_BNDRY_64 |
12241 DMA_RWCTRL_WRITE_BNDRY_64);
12242 break;
12243 }
12244 /* fallthrough */
12245 case 128:
12246 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12247 val |= (DMA_RWCTRL_READ_BNDRY_128 |
12248 DMA_RWCTRL_WRITE_BNDRY_128);
12249 break;
12250 }
12251 /* fallthrough */
12252 case 256:
12253 val |= (DMA_RWCTRL_READ_BNDRY_256 |
12254 DMA_RWCTRL_WRITE_BNDRY_256);
12255 break;
12256 case 512:
12257 val |= (DMA_RWCTRL_READ_BNDRY_512 |
12258 DMA_RWCTRL_WRITE_BNDRY_512);
12259 break;
12260 case 1024:
12261 default:
12262 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
12263 DMA_RWCTRL_WRITE_BNDRY_1024);
12264 break;
12265 };
12266 }
12267
12268out:
12269 return val;
12270}
12271
1da177e4
LT
12272static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
12273{
12274 struct tg3_internal_buffer_desc test_desc;
12275 u32 sram_dma_descs;
12276 int i, ret;
12277
12278 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
12279
12280 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
12281 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
12282 tw32(RDMAC_STATUS, 0);
12283 tw32(WDMAC_STATUS, 0);
12284
12285 tw32(BUFMGR_MODE, 0);
12286 tw32(FTQ_RESET, 0);
12287
12288 test_desc.addr_hi = ((u64) buf_dma) >> 32;
12289 test_desc.addr_lo = buf_dma & 0xffffffff;
12290 test_desc.nic_mbuf = 0x00002100;
12291 test_desc.len = size;
12292
12293 /*
12294 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
12295 * the *second* time the tg3 driver was getting loaded after an
12296 * initial scan.
12297 *
12298 * Broadcom tells me:
12299 * ...the DMA engine is connected to the GRC block and a DMA
12300 * reset may affect the GRC block in some unpredictable way...
12301 * The behavior of resets to individual blocks has not been tested.
12302 *
12303 * Broadcom noted the GRC reset will also reset all sub-components.
12304 */
12305 if (to_device) {
12306 test_desc.cqid_sqid = (13 << 8) | 2;
12307
12308 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
12309 udelay(40);
12310 } else {
12311 test_desc.cqid_sqid = (16 << 8) | 7;
12312
12313 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
12314 udelay(40);
12315 }
12316 test_desc.flags = 0x00000005;
12317
12318 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
12319 u32 val;
12320
12321 val = *(((u32 *)&test_desc) + i);
12322 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
12323 sram_dma_descs + (i * sizeof(u32)));
12324 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
12325 }
12326 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
12327
12328 if (to_device) {
12329 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
12330 } else {
12331 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
12332 }
12333
12334 ret = -ENODEV;
12335 for (i = 0; i < 40; i++) {
12336 u32 val;
12337
12338 if (to_device)
12339 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
12340 else
12341 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
12342 if ((val & 0xffff) == sram_dma_descs) {
12343 ret = 0;
12344 break;
12345 }
12346
12347 udelay(100);
12348 }
12349
12350 return ret;
12351}
12352
ded7340d 12353#define TEST_BUFFER_SIZE 0x2000
1da177e4
LT
12354
12355static int __devinit tg3_test_dma(struct tg3 *tp)
12356{
12357 dma_addr_t buf_dma;
59e6b434 12358 u32 *buf, saved_dma_rwctrl;
1da177e4
LT
12359 int ret;
12360
12361 buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
12362 if (!buf) {
12363 ret = -ENOMEM;
12364 goto out_nofree;
12365 }
12366
12367 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
12368 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
12369
59e6b434 12370 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
1da177e4
LT
12371
12372 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12373 /* DMA read watermark not used on PCIE */
12374 tp->dma_rwctrl |= 0x00180000;
12375 } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
85e94ced
MC
12376 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
12377 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
1da177e4
LT
12378 tp->dma_rwctrl |= 0x003f0000;
12379 else
12380 tp->dma_rwctrl |= 0x003f000f;
12381 } else {
12382 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
12383 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
12384 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
49afdeb6 12385 u32 read_water = 0x7;
1da177e4 12386
4a29cc2e
MC
12387 /* If the 5704 is behind the EPB bridge, we can
12388 * do the less restrictive ONE_DMA workaround for
12389 * better performance.
12390 */
12391 if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
12392 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
12393 tp->dma_rwctrl |= 0x8000;
12394 else if (ccval == 0x6 || ccval == 0x7)
1da177e4
LT
12395 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
12396
49afdeb6
MC
12397 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
12398 read_water = 4;
59e6b434 12399 /* Set bit 23 to enable PCIX hw bug fix */
49afdeb6
MC
12400 tp->dma_rwctrl |=
12401 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
12402 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
12403 (1 << 23);
4cf78e4f
MC
12404 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
12405 /* 5780 always in PCIX mode */
12406 tp->dma_rwctrl |= 0x00144000;
a4e2b347
MC
12407 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
12408 /* 5714 always in PCIX mode */
12409 tp->dma_rwctrl |= 0x00148000;
1da177e4
LT
12410 } else {
12411 tp->dma_rwctrl |= 0x001b000f;
12412 }
12413 }
12414
12415 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
12416 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
12417 tp->dma_rwctrl &= 0xfffffff0;
12418
12419 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12420 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
12421 /* Remove this if it causes problems for some boards. */
12422 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
12423
12424 /* On 5700/5701 chips, we need to set this bit.
12425 * Otherwise the chip will issue cacheline transactions
12426 * to streamable DMA memory with not all the byte
12427 * enables turned on. This is an error on several
12428 * RISC PCI controllers, in particular sparc64.
12429 *
12430 * On 5703/5704 chips, this bit has been reassigned
12431 * a different meaning. In particular, it is used
12432 * on those chips to enable a PCI-X workaround.
12433 */
12434 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
12435 }
12436
12437 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12438
12439#if 0
12440 /* Unneeded, already done by tg3_get_invariants. */
12441 tg3_switch_clocks(tp);
12442#endif
12443
12444 ret = 0;
12445 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12446 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
12447 goto out;
12448
59e6b434
DM
12449 /* It is best to perform DMA test with maximum write burst size
12450 * to expose the 5700/5701 write DMA bug.
12451 */
12452 saved_dma_rwctrl = tp->dma_rwctrl;
12453 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
12454 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12455
1da177e4
LT
12456 while (1) {
12457 u32 *p = buf, i;
12458
12459 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
12460 p[i] = i;
12461
12462 /* Send the buffer to the chip. */
12463 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
12464 if (ret) {
12465 printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
12466 break;
12467 }
12468
12469#if 0
12470 /* validate data reached card RAM correctly. */
12471 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
12472 u32 val;
12473 tg3_read_mem(tp, 0x2100 + (i*4), &val);
12474 if (le32_to_cpu(val) != p[i]) {
12475 printk(KERN_ERR " tg3_test_dma() Card buffer corrupted on write! (%d != %d)\n", val, i);
12476 /* ret = -ENODEV here? */
12477 }
12478 p[i] = 0;
12479 }
12480#endif
12481 /* Now read it back. */
12482 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
12483 if (ret) {
12484 printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
12485
12486 break;
12487 }
12488
12489 /* Verify it. */
12490 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
12491 if (p[i] == i)
12492 continue;
12493
59e6b434
DM
12494 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
12495 DMA_RWCTRL_WRITE_BNDRY_16) {
12496 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
1da177e4
LT
12497 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
12498 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12499 break;
12500 } else {
12501 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
12502 ret = -ENODEV;
12503 goto out;
12504 }
12505 }
12506
12507 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
12508 /* Success. */
12509 ret = 0;
12510 break;
12511 }
12512 }
59e6b434
DM
12513 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
12514 DMA_RWCTRL_WRITE_BNDRY_16) {
6d1cfbab
MC
12515 static struct pci_device_id dma_wait_state_chipsets[] = {
12516 { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
12517 PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
12518 { },
12519 };
12520
59e6b434 12521 /* DMA test passed without adjusting DMA boundary,
6d1cfbab
MC
12522 * now look for chipsets that are known to expose the
12523 * DMA bug without failing the test.
59e6b434 12524 */
6d1cfbab
MC
12525 if (pci_dev_present(dma_wait_state_chipsets)) {
12526 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
12527 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
12528 }
12529 else
12530 /* Safe to use the calculated DMA boundary. */
12531 tp->dma_rwctrl = saved_dma_rwctrl;
12532
59e6b434
DM
12533 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12534 }
1da177e4
LT
12535
12536out:
12537 pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
12538out_nofree:
12539 return ret;
12540}
12541
12542static void __devinit tg3_init_link_config(struct tg3 *tp)
12543{
12544 tp->link_config.advertising =
12545 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
12546 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
12547 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
12548 ADVERTISED_Autoneg | ADVERTISED_MII);
12549 tp->link_config.speed = SPEED_INVALID;
12550 tp->link_config.duplex = DUPLEX_INVALID;
12551 tp->link_config.autoneg = AUTONEG_ENABLE;
1da177e4
LT
12552 tp->link_config.active_speed = SPEED_INVALID;
12553 tp->link_config.active_duplex = DUPLEX_INVALID;
12554 tp->link_config.phy_is_low_power = 0;
12555 tp->link_config.orig_speed = SPEED_INVALID;
12556 tp->link_config.orig_duplex = DUPLEX_INVALID;
12557 tp->link_config.orig_autoneg = AUTONEG_INVALID;
12558}
12559
12560static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
12561{
fdfec172
MC
12562 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
12563 tp->bufmgr_config.mbuf_read_dma_low_water =
12564 DEFAULT_MB_RDMA_LOW_WATER_5705;
12565 tp->bufmgr_config.mbuf_mac_rx_low_water =
12566 DEFAULT_MB_MACRX_LOW_WATER_5705;
12567 tp->bufmgr_config.mbuf_high_water =
12568 DEFAULT_MB_HIGH_WATER_5705;
b5d3772c
MC
12569 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12570 tp->bufmgr_config.mbuf_mac_rx_low_water =
12571 DEFAULT_MB_MACRX_LOW_WATER_5906;
12572 tp->bufmgr_config.mbuf_high_water =
12573 DEFAULT_MB_HIGH_WATER_5906;
12574 }
fdfec172
MC
12575
12576 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
12577 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
12578 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
12579 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
12580 tp->bufmgr_config.mbuf_high_water_jumbo =
12581 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
12582 } else {
12583 tp->bufmgr_config.mbuf_read_dma_low_water =
12584 DEFAULT_MB_RDMA_LOW_WATER;
12585 tp->bufmgr_config.mbuf_mac_rx_low_water =
12586 DEFAULT_MB_MACRX_LOW_WATER;
12587 tp->bufmgr_config.mbuf_high_water =
12588 DEFAULT_MB_HIGH_WATER;
12589
12590 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
12591 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
12592 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
12593 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
12594 tp->bufmgr_config.mbuf_high_water_jumbo =
12595 DEFAULT_MB_HIGH_WATER_JUMBO;
12596 }
1da177e4
LT
12597
12598 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
12599 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
12600}
12601
12602static char * __devinit tg3_phy_string(struct tg3 *tp)
12603{
12604 switch (tp->phy_id & PHY_ID_MASK) {
12605 case PHY_ID_BCM5400: return "5400";
12606 case PHY_ID_BCM5401: return "5401";
12607 case PHY_ID_BCM5411: return "5411";
12608 case PHY_ID_BCM5701: return "5701";
12609 case PHY_ID_BCM5703: return "5703";
12610 case PHY_ID_BCM5704: return "5704";
12611 case PHY_ID_BCM5705: return "5705";
12612 case PHY_ID_BCM5750: return "5750";
85e94ced 12613 case PHY_ID_BCM5752: return "5752";
a4e2b347 12614 case PHY_ID_BCM5714: return "5714";
4cf78e4f 12615 case PHY_ID_BCM5780: return "5780";
af36e6b6 12616 case PHY_ID_BCM5755: return "5755";
d9ab5ad1 12617 case PHY_ID_BCM5787: return "5787";
d30cdd28 12618 case PHY_ID_BCM5784: return "5784";
126a3368 12619 case PHY_ID_BCM5756: return "5722/5756";
b5d3772c 12620 case PHY_ID_BCM5906: return "5906";
9936bcf6 12621 case PHY_ID_BCM5761: return "5761";
1da177e4
LT
12622 case PHY_ID_BCM8002: return "8002/serdes";
12623 case 0: return "serdes";
12624 default: return "unknown";
12625 };
12626}
12627
f9804ddb
MC
12628static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
12629{
12630 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12631 strcpy(str, "PCI Express");
12632 return str;
12633 } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
12634 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
12635
12636 strcpy(str, "PCIX:");
12637
12638 if ((clock_ctrl == 7) ||
12639 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
12640 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
12641 strcat(str, "133MHz");
12642 else if (clock_ctrl == 0)
12643 strcat(str, "33MHz");
12644 else if (clock_ctrl == 2)
12645 strcat(str, "50MHz");
12646 else if (clock_ctrl == 4)
12647 strcat(str, "66MHz");
12648 else if (clock_ctrl == 6)
12649 strcat(str, "100MHz");
f9804ddb
MC
12650 } else {
12651 strcpy(str, "PCI:");
12652 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
12653 strcat(str, "66MHz");
12654 else
12655 strcat(str, "33MHz");
12656 }
12657 if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
12658 strcat(str, ":32-bit");
12659 else
12660 strcat(str, ":64-bit");
12661 return str;
12662}
12663
8c2dc7e1 12664static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
1da177e4
LT
12665{
12666 struct pci_dev *peer;
12667 unsigned int func, devnr = tp->pdev->devfn & ~7;
12668
12669 for (func = 0; func < 8; func++) {
12670 peer = pci_get_slot(tp->pdev->bus, devnr | func);
12671 if (peer && peer != tp->pdev)
12672 break;
12673 pci_dev_put(peer);
12674 }
16fe9d74
MC
12675 /* 5704 can be configured in single-port mode, set peer to
12676 * tp->pdev in that case.
12677 */
12678 if (!peer) {
12679 peer = tp->pdev;
12680 return peer;
12681 }
1da177e4
LT
12682
12683 /*
12684 * We don't need to keep the refcount elevated; there's no way
12685 * to remove one half of this device without removing the other
12686 */
12687 pci_dev_put(peer);
12688
12689 return peer;
12690}
12691
15f9850d
DM
12692static void __devinit tg3_init_coal(struct tg3 *tp)
12693{
12694 struct ethtool_coalesce *ec = &tp->coal;
12695
12696 memset(ec, 0, sizeof(*ec));
12697 ec->cmd = ETHTOOL_GCOALESCE;
12698 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
12699 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
12700 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
12701 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
12702 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
12703 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
12704 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
12705 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
12706 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
12707
12708 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
12709 HOSTCC_MODE_CLRTICK_TXBD)) {
12710 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
12711 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
12712 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
12713 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
12714 }
d244c892
MC
12715
12716 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
12717 ec->rx_coalesce_usecs_irq = 0;
12718 ec->tx_coalesce_usecs_irq = 0;
12719 ec->stats_block_coalesce_usecs = 0;
12720 }
15f9850d
DM
12721}
12722
1da177e4
LT
12723static int __devinit tg3_init_one(struct pci_dev *pdev,
12724 const struct pci_device_id *ent)
12725{
12726 static int tg3_version_printed = 0;
2de58e30
SS
12727 resource_size_t tg3reg_base;
12728 unsigned long tg3reg_len;
1da177e4
LT
12729 struct net_device *dev;
12730 struct tg3 *tp;
d6645372 12731 int err, pm_cap;
f9804ddb 12732 char str[40];
72f2afb8 12733 u64 dma_mask, persist_dma_mask;
d6645372 12734 DECLARE_MAC_BUF(mac);
1da177e4
LT
12735
12736 if (tg3_version_printed++ == 0)
12737 printk(KERN_INFO "%s", version);
12738
12739 err = pci_enable_device(pdev);
12740 if (err) {
12741 printk(KERN_ERR PFX "Cannot enable PCI device, "
12742 "aborting.\n");
12743 return err;
12744 }
12745
12746 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
12747 printk(KERN_ERR PFX "Cannot find proper PCI device "
12748 "base address, aborting.\n");
12749 err = -ENODEV;
12750 goto err_out_disable_pdev;
12751 }
12752
12753 err = pci_request_regions(pdev, DRV_MODULE_NAME);
12754 if (err) {
12755 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
12756 "aborting.\n");
12757 goto err_out_disable_pdev;
12758 }
12759
12760 pci_set_master(pdev);
12761
12762 /* Find power-management capability. */
12763 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
12764 if (pm_cap == 0) {
12765 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
12766 "aborting.\n");
12767 err = -EIO;
12768 goto err_out_free_res;
12769 }
12770
1da177e4
LT
12771 tg3reg_base = pci_resource_start(pdev, 0);
12772 tg3reg_len = pci_resource_len(pdev, 0);
12773
12774 dev = alloc_etherdev(sizeof(*tp));
12775 if (!dev) {
12776 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
12777 err = -ENOMEM;
12778 goto err_out_free_res;
12779 }
12780
1da177e4
LT
12781 SET_NETDEV_DEV(dev, &pdev->dev);
12782
1da177e4
LT
12783#if TG3_VLAN_TAG_USED
12784 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
12785 dev->vlan_rx_register = tg3_vlan_rx_register;
1da177e4
LT
12786#endif
12787
12788 tp = netdev_priv(dev);
12789 tp->pdev = pdev;
12790 tp->dev = dev;
12791 tp->pm_cap = pm_cap;
12792 tp->mac_mode = TG3_DEF_MAC_MODE;
12793 tp->rx_mode = TG3_DEF_RX_MODE;
12794 tp->tx_mode = TG3_DEF_TX_MODE;
8ef21428 12795
1da177e4
LT
12796 if (tg3_debug > 0)
12797 tp->msg_enable = tg3_debug;
12798 else
12799 tp->msg_enable = TG3_DEF_MSG_ENABLE;
12800
12801 /* The word/byte swap controls here control register access byte
12802 * swapping. DMA data byte swapping is controlled in the GRC_MODE
12803 * setting below.
12804 */
12805 tp->misc_host_ctrl =
12806 MISC_HOST_CTRL_MASK_PCI_INT |
12807 MISC_HOST_CTRL_WORD_SWAP |
12808 MISC_HOST_CTRL_INDIR_ACCESS |
12809 MISC_HOST_CTRL_PCISTATE_RW;
12810
12811 /* The NONFRM (non-frame) byte/word swap controls take effect
12812 * on descriptor entries, anything which isn't packet data.
12813 *
12814 * The StrongARM chips on the board (one for tx, one for rx)
12815 * are running in big-endian mode.
12816 */
12817 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
12818 GRC_MODE_WSWAP_NONFRM_DATA);
12819#ifdef __BIG_ENDIAN
12820 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
12821#endif
12822 spin_lock_init(&tp->lock);
1da177e4 12823 spin_lock_init(&tp->indirect_lock);
c4028958 12824 INIT_WORK(&tp->reset_task, tg3_reset_task);
1da177e4
LT
12825
12826 tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
ab0049b4 12827 if (!tp->regs) {
1da177e4
LT
12828 printk(KERN_ERR PFX "Cannot map device registers, "
12829 "aborting.\n");
12830 err = -ENOMEM;
12831 goto err_out_free_dev;
12832 }
12833
12834 tg3_init_link_config(tp);
12835
1da177e4
LT
12836 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
12837 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
12838 tp->tx_pending = TG3_DEF_TX_RING_PENDING;
12839
12840 dev->open = tg3_open;
12841 dev->stop = tg3_close;
12842 dev->get_stats = tg3_get_stats;
12843 dev->set_multicast_list = tg3_set_rx_mode;
12844 dev->set_mac_address = tg3_set_mac_addr;
12845 dev->do_ioctl = tg3_ioctl;
12846 dev->tx_timeout = tg3_tx_timeout;
bea3348e 12847 netif_napi_add(dev, &tp->napi, tg3_poll, 64);
1da177e4 12848 dev->ethtool_ops = &tg3_ethtool_ops;
1da177e4
LT
12849 dev->watchdog_timeo = TG3_TX_TIMEOUT;
12850 dev->change_mtu = tg3_change_mtu;
12851 dev->irq = pdev->irq;
12852#ifdef CONFIG_NET_POLL_CONTROLLER
12853 dev->poll_controller = tg3_poll_controller;
12854#endif
12855
12856 err = tg3_get_invariants(tp);
12857 if (err) {
12858 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
12859 "aborting.\n");
12860 goto err_out_iounmap;
12861 }
12862
4a29cc2e
MC
12863 /* The EPB bridge inside 5714, 5715, and 5780 and any
12864 * device behind the EPB cannot support DMA addresses > 40-bit.
72f2afb8
MC
12865 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
12866 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
12867 * do DMA address check in tg3_start_xmit().
12868 */
4a29cc2e
MC
12869 if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
12870 persist_dma_mask = dma_mask = DMA_32BIT_MASK;
12871 else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
72f2afb8
MC
12872 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
12873#ifdef CONFIG_HIGHMEM
12874 dma_mask = DMA_64BIT_MASK;
12875#endif
4a29cc2e 12876 } else
72f2afb8
MC
12877 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
12878
12879 /* Configure DMA attributes. */
12880 if (dma_mask > DMA_32BIT_MASK) {
12881 err = pci_set_dma_mask(pdev, dma_mask);
12882 if (!err) {
12883 dev->features |= NETIF_F_HIGHDMA;
12884 err = pci_set_consistent_dma_mask(pdev,
12885 persist_dma_mask);
12886 if (err < 0) {
12887 printk(KERN_ERR PFX "Unable to obtain 64 bit "
12888 "DMA for consistent allocations\n");
12889 goto err_out_iounmap;
12890 }
12891 }
12892 }
12893 if (err || dma_mask == DMA_32BIT_MASK) {
12894 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
12895 if (err) {
12896 printk(KERN_ERR PFX "No usable DMA configuration, "
12897 "aborting.\n");
12898 goto err_out_iounmap;
12899 }
12900 }
12901
fdfec172 12902 tg3_init_bufmgr_config(tp);
1da177e4 12903
1da177e4
LT
12904 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
12905 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
12906 }
12907 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12908 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
12909 tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
c7835a77 12910 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
1da177e4
LT
12911 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
12912 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
12913 } else {
7f62ad5d 12914 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG;
1da177e4
LT
12915 }
12916
4e3a7aaa
MC
12917 /* TSO is on by default on chips that support hardware TSO.
12918 * Firmware TSO on older chips gives lower performance, so it
12919 * is off by default, but can be enabled using ethtool.
12920 */
b0026624 12921 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
1da177e4 12922 dev->features |= NETIF_F_TSO;
b5d3772c
MC
12923 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
12924 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906))
b0026624 12925 dev->features |= NETIF_F_TSO6;
9936bcf6
MC
12926 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12927 dev->features |= NETIF_F_TSO_ECN;
b0026624 12928 }
1da177e4 12929
1da177e4
LT
12930
12931 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
12932 !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
12933 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
12934 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
12935 tp->rx_pending = 63;
12936 }
12937
1da177e4
LT
12938 err = tg3_get_device_address(tp);
12939 if (err) {
12940 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
12941 "aborting.\n");
12942 goto err_out_iounmap;
12943 }
12944
c88864df
MC
12945 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
12946 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
12947 printk(KERN_ERR PFX "Cannot find proper PCI device "
12948 "base address for APE, aborting.\n");
12949 err = -ENODEV;
12950 goto err_out_iounmap;
12951 }
12952
12953 tg3reg_base = pci_resource_start(pdev, 2);
12954 tg3reg_len = pci_resource_len(pdev, 2);
12955
12956 tp->aperegs = ioremap_nocache(tg3reg_base, tg3reg_len);
79ea13ce 12957 if (!tp->aperegs) {
c88864df
MC
12958 printk(KERN_ERR PFX "Cannot map APE registers, "
12959 "aborting.\n");
12960 err = -ENOMEM;
12961 goto err_out_iounmap;
12962 }
12963
12964 tg3_ape_lock_init(tp);
12965 }
12966
1da177e4
LT
12967 /*
12968 * Reset chip in case UNDI or EFI driver did not shutdown
12969 * DMA self test will enable WDMAC and we'll see (spurious)
12970 * pending DMA on the PCI bus at that point.
12971 */
12972 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
12973 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
1da177e4 12974 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
944d980e 12975 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
1da177e4
LT
12976 }
12977
12978 err = tg3_test_dma(tp);
12979 if (err) {
12980 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
c88864df 12981 goto err_out_apeunmap;
1da177e4
LT
12982 }
12983
12984 /* Tigon3 can do ipv4 only... and some chips have buggy
12985 * checksumming.
12986 */
12987 if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
d212f87b 12988 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
af36e6b6 12989 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
d30cdd28 12990 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
9936bcf6
MC
12991 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12992 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
d212f87b
SH
12993 dev->features |= NETIF_F_IPV6_CSUM;
12994
1da177e4
LT
12995 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
12996 } else
12997 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
12998
1da177e4
LT
12999 /* flow control autonegotiation is default behavior */
13000 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
8d018621 13001 tp->link_config.flowctrl = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1da177e4 13002
15f9850d
DM
13003 tg3_init_coal(tp);
13004
c49a1561
MC
13005 pci_set_drvdata(pdev, dev);
13006
1da177e4
LT
13007 err = register_netdev(dev);
13008 if (err) {
13009 printk(KERN_ERR PFX "Cannot register net device, "
13010 "aborting.\n");
0d3031d9 13011 goto err_out_apeunmap;
1da177e4
LT
13012 }
13013
d6645372
JP
13014 printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] "
13015 "(%s) %s Ethernet %s\n",
1da177e4
LT
13016 dev->name,
13017 tp->board_part_number,
13018 tp->pci_chip_rev_id,
13019 tg3_phy_string(tp),
f9804ddb 13020 tg3_bus_string(tp, str),
cbb45d21
MC
13021 ((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" :
13022 ((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" :
d6645372
JP
13023 "10/100/1000Base-T")),
13024 print_mac(mac, dev->dev_addr));
1da177e4
LT
13025
13026 printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
1c46ae05 13027 "MIirq[%d] ASF[%d] WireSpeed[%d] TSOcap[%d]\n",
1da177e4
LT
13028 dev->name,
13029 (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
13030 (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
13031 (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
13032 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
1da177e4
LT
13033 (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
13034 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
4a29cc2e
MC
13035 printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
13036 dev->name, tp->dma_rwctrl,
13037 (pdev->dma_mask == DMA_32BIT_MASK) ? 32 :
13038 (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64));
1da177e4
LT
13039
13040 return 0;
13041
0d3031d9
MC
13042err_out_apeunmap:
13043 if (tp->aperegs) {
13044 iounmap(tp->aperegs);
13045 tp->aperegs = NULL;
13046 }
13047
1da177e4 13048err_out_iounmap:
6892914f
MC
13049 if (tp->regs) {
13050 iounmap(tp->regs);
22abe310 13051 tp->regs = NULL;
6892914f 13052 }
1da177e4
LT
13053
13054err_out_free_dev:
13055 free_netdev(dev);
13056
13057err_out_free_res:
13058 pci_release_regions(pdev);
13059
13060err_out_disable_pdev:
13061 pci_disable_device(pdev);
13062 pci_set_drvdata(pdev, NULL);
13063 return err;
13064}
13065
13066static void __devexit tg3_remove_one(struct pci_dev *pdev)
13067{
13068 struct net_device *dev = pci_get_drvdata(pdev);
13069
13070 if (dev) {
13071 struct tg3 *tp = netdev_priv(dev);
13072
7faa006f 13073 flush_scheduled_work();
1da177e4 13074 unregister_netdev(dev);
0d3031d9
MC
13075 if (tp->aperegs) {
13076 iounmap(tp->aperegs);
13077 tp->aperegs = NULL;
13078 }
6892914f
MC
13079 if (tp->regs) {
13080 iounmap(tp->regs);
22abe310 13081 tp->regs = NULL;
6892914f 13082 }
1da177e4
LT
13083 free_netdev(dev);
13084 pci_release_regions(pdev);
13085 pci_disable_device(pdev);
13086 pci_set_drvdata(pdev, NULL);
13087 }
13088}
13089
13090static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
13091{
13092 struct net_device *dev = pci_get_drvdata(pdev);
13093 struct tg3 *tp = netdev_priv(dev);
13094 int err;
13095
3e0c95fd
MC
13096 /* PCI register 4 needs to be saved whether netif_running() or not.
13097 * MSI address and data need to be saved if using MSI and
13098 * netif_running().
13099 */
13100 pci_save_state(pdev);
13101
1da177e4
LT
13102 if (!netif_running(dev))
13103 return 0;
13104
7faa006f 13105 flush_scheduled_work();
1da177e4
LT
13106 tg3_netif_stop(tp);
13107
13108 del_timer_sync(&tp->timer);
13109
f47c11ee 13110 tg3_full_lock(tp, 1);
1da177e4 13111 tg3_disable_ints(tp);
f47c11ee 13112 tg3_full_unlock(tp);
1da177e4
LT
13113
13114 netif_device_detach(dev);
13115
f47c11ee 13116 tg3_full_lock(tp, 0);
944d980e 13117 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6a9eba15 13118 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
f47c11ee 13119 tg3_full_unlock(tp);
1da177e4
LT
13120
13121 err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
13122 if (err) {
f47c11ee 13123 tg3_full_lock(tp, 0);
1da177e4 13124
6a9eba15 13125 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
b9ec6c1b
MC
13126 if (tg3_restart_hw(tp, 1))
13127 goto out;
1da177e4
LT
13128
13129 tp->timer.expires = jiffies + tp->timer_offset;
13130 add_timer(&tp->timer);
13131
13132 netif_device_attach(dev);
13133 tg3_netif_start(tp);
13134
b9ec6c1b 13135out:
f47c11ee 13136 tg3_full_unlock(tp);
1da177e4
LT
13137 }
13138
13139 return err;
13140}
13141
13142static int tg3_resume(struct pci_dev *pdev)
13143{
13144 struct net_device *dev = pci_get_drvdata(pdev);
13145 struct tg3 *tp = netdev_priv(dev);
13146 int err;
13147
3e0c95fd
MC
13148 pci_restore_state(tp->pdev);
13149
1da177e4
LT
13150 if (!netif_running(dev))
13151 return 0;
13152
bc1c7567 13153 err = tg3_set_power_state(tp, PCI_D0);
1da177e4
LT
13154 if (err)
13155 return err;
13156
13157 netif_device_attach(dev);
13158
f47c11ee 13159 tg3_full_lock(tp, 0);
1da177e4 13160
6a9eba15 13161 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
b9ec6c1b
MC
13162 err = tg3_restart_hw(tp, 1);
13163 if (err)
13164 goto out;
1da177e4
LT
13165
13166 tp->timer.expires = jiffies + tp->timer_offset;
13167 add_timer(&tp->timer);
13168
1da177e4
LT
13169 tg3_netif_start(tp);
13170
b9ec6c1b 13171out:
f47c11ee 13172 tg3_full_unlock(tp);
1da177e4 13173
b9ec6c1b 13174 return err;
1da177e4
LT
13175}
13176
13177static struct pci_driver tg3_driver = {
13178 .name = DRV_MODULE_NAME,
13179 .id_table = tg3_pci_tbl,
13180 .probe = tg3_init_one,
13181 .remove = __devexit_p(tg3_remove_one),
13182 .suspend = tg3_suspend,
13183 .resume = tg3_resume
13184};
13185
13186static int __init tg3_init(void)
13187{
29917620 13188 return pci_register_driver(&tg3_driver);
1da177e4
LT
13189}
13190
13191static void __exit tg3_cleanup(void)
13192{
13193 pci_unregister_driver(&tg3_driver);
13194}
13195
13196module_init(tg3_init);
13197module_exit(tg3_cleanup);