]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/net/tg3.c
tg3: Remove 40BIT_DMA_LIMIT_BUG
[mirror_ubuntu-artful-kernel.git] / drivers / net / tg3.c
1 /*
2 * tg3.c: Broadcom Tigon3 ethernet driver.
3 *
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2011 Broadcom Corporation.
8 *
9 * Firmware is:
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
12 *
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
16 */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47
48 #include <net/checksum.h>
49 #include <net/ip.h>
50
51 #include <asm/system.h>
52 #include <linux/io.h>
53 #include <asm/byteorder.h>
54 #include <linux/uaccess.h>
55
56 #ifdef CONFIG_SPARC
57 #include <asm/idprom.h>
58 #include <asm/prom.h>
59 #endif
60
61 #define BAR_0 0
62 #define BAR_2 2
63
64 #include "tg3.h"
65
66 /* Functions & macros to verify TG3_FLAGS types */
67
68 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
69 {
70 return test_bit(flag, bits);
71 }
72
73 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
74 {
75 set_bit(flag, bits);
76 }
77
78 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
79 {
80 clear_bit(flag, bits);
81 }
82
83 #define tg3_flag(tp, flag) \
84 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
85 #define tg3_flag_set(tp, flag) \
86 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
87 #define tg3_flag_clear(tp, flag) \
88 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
89
90 #define DRV_MODULE_NAME "tg3"
91 #define TG3_MAJ_NUM 3
92 #define TG3_MIN_NUM 119
93 #define DRV_MODULE_VERSION \
94 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
95 #define DRV_MODULE_RELDATE "May 18, 2011"
96
97 #define TG3_DEF_MAC_MODE 0
98 #define TG3_DEF_RX_MODE 0
99 #define TG3_DEF_TX_MODE 0
100 #define TG3_DEF_MSG_ENABLE \
101 (NETIF_MSG_DRV | \
102 NETIF_MSG_PROBE | \
103 NETIF_MSG_LINK | \
104 NETIF_MSG_TIMER | \
105 NETIF_MSG_IFDOWN | \
106 NETIF_MSG_IFUP | \
107 NETIF_MSG_RX_ERR | \
108 NETIF_MSG_TX_ERR)
109
110 /* length of time before we decide the hardware is borked,
111 * and dev->tx_timeout() should be called to fix the problem
112 */
113
114 #define TG3_TX_TIMEOUT (5 * HZ)
115
116 /* hardware minimum and maximum for a single frame's data payload */
117 #define TG3_MIN_MTU 60
118 #define TG3_MAX_MTU(tp) \
119 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
120
121 /* These numbers seem to be hard coded in the NIC firmware somehow.
122 * You can't change the ring sizes, but you can change where you place
123 * them in the NIC onboard memory.
124 */
125 #define TG3_RX_STD_RING_SIZE(tp) \
126 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
127 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
128 #define TG3_DEF_RX_RING_PENDING 200
129 #define TG3_RX_JMB_RING_SIZE(tp) \
130 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
131 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
132 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
133 #define TG3_RSS_INDIR_TBL_SIZE 128
134
135 /* Do not place this n-ring entries value into the tp struct itself,
136 * we really want to expose these constants to GCC so that modulo et
137 * al. operations are done with shifts and masks instead of with
138 * hw multiply/modulo instructions. Another solution would be to
139 * replace things like '% foo' with '& (foo - 1)'.
140 */
141
142 #define TG3_TX_RING_SIZE 512
143 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
144
145 #define TG3_RX_STD_RING_BYTES(tp) \
146 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
147 #define TG3_RX_JMB_RING_BYTES(tp) \
148 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
149 #define TG3_RX_RCB_RING_BYTES(tp) \
150 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
151 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
152 TG3_TX_RING_SIZE)
153 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
154
155 #define TG3_DMA_BYTE_ENAB 64
156
157 #define TG3_RX_STD_DMA_SZ 1536
158 #define TG3_RX_JMB_DMA_SZ 9046
159
160 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
161
162 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
163 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
164
165 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
166 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
167
168 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
169 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
170
171 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
172 * that are at least dword aligned when used in PCIX mode. The driver
173 * works around this bug by double copying the packet. This workaround
174 * is built into the normal double copy length check for efficiency.
175 *
176 * However, the double copy is only necessary on those architectures
177 * where unaligned memory accesses are inefficient. For those architectures
178 * where unaligned memory accesses incur little penalty, we can reintegrate
179 * the 5701 in the normal rx path. Doing so saves a device structure
180 * dereference by hardcoding the double copy threshold in place.
181 */
182 #define TG3_RX_COPY_THRESHOLD 256
183 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
184 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
185 #else
186 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
187 #endif
188
189 /* minimum number of free TX descriptors required to wake up TX process */
190 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
191
192 #define TG3_RAW_IP_ALIGN 2
193
194 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
195
196 #define FIRMWARE_TG3 "tigon/tg3.bin"
197 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
198 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
199
200 static char version[] __devinitdata =
201 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
202
203 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
204 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
205 MODULE_LICENSE("GPL");
206 MODULE_VERSION(DRV_MODULE_VERSION);
207 MODULE_FIRMWARE(FIRMWARE_TG3);
208 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
209 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
210
211 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
212 module_param(tg3_debug, int, 0);
213 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
214
215 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
216 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
217 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
218 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
219 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
220 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
221 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
222 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
223 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
224 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
225 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
226 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
227 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
228 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
229 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
230 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
231 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
232 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
233 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
234 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
235 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
236 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
237 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
257 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
258 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
260 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
261 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
263 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
264 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
272 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
278 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
286 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
287 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
288 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
289 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
290 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
291 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
292 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
293 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
294 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
295 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
296 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
297 {}
298 };
299
300 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
301
302 static const struct {
303 const char string[ETH_GSTRING_LEN];
304 } ethtool_stats_keys[] = {
305 { "rx_octets" },
306 { "rx_fragments" },
307 { "rx_ucast_packets" },
308 { "rx_mcast_packets" },
309 { "rx_bcast_packets" },
310 { "rx_fcs_errors" },
311 { "rx_align_errors" },
312 { "rx_xon_pause_rcvd" },
313 { "rx_xoff_pause_rcvd" },
314 { "rx_mac_ctrl_rcvd" },
315 { "rx_xoff_entered" },
316 { "rx_frame_too_long_errors" },
317 { "rx_jabbers" },
318 { "rx_undersize_packets" },
319 { "rx_in_length_errors" },
320 { "rx_out_length_errors" },
321 { "rx_64_or_less_octet_packets" },
322 { "rx_65_to_127_octet_packets" },
323 { "rx_128_to_255_octet_packets" },
324 { "rx_256_to_511_octet_packets" },
325 { "rx_512_to_1023_octet_packets" },
326 { "rx_1024_to_1522_octet_packets" },
327 { "rx_1523_to_2047_octet_packets" },
328 { "rx_2048_to_4095_octet_packets" },
329 { "rx_4096_to_8191_octet_packets" },
330 { "rx_8192_to_9022_octet_packets" },
331
332 { "tx_octets" },
333 { "tx_collisions" },
334
335 { "tx_xon_sent" },
336 { "tx_xoff_sent" },
337 { "tx_flow_control" },
338 { "tx_mac_errors" },
339 { "tx_single_collisions" },
340 { "tx_mult_collisions" },
341 { "tx_deferred" },
342 { "tx_excessive_collisions" },
343 { "tx_late_collisions" },
344 { "tx_collide_2times" },
345 { "tx_collide_3times" },
346 { "tx_collide_4times" },
347 { "tx_collide_5times" },
348 { "tx_collide_6times" },
349 { "tx_collide_7times" },
350 { "tx_collide_8times" },
351 { "tx_collide_9times" },
352 { "tx_collide_10times" },
353 { "tx_collide_11times" },
354 { "tx_collide_12times" },
355 { "tx_collide_13times" },
356 { "tx_collide_14times" },
357 { "tx_collide_15times" },
358 { "tx_ucast_packets" },
359 { "tx_mcast_packets" },
360 { "tx_bcast_packets" },
361 { "tx_carrier_sense_errors" },
362 { "tx_discards" },
363 { "tx_errors" },
364
365 { "dma_writeq_full" },
366 { "dma_write_prioq_full" },
367 { "rxbds_empty" },
368 { "rx_discards" },
369 { "rx_errors" },
370 { "rx_threshold_hit" },
371
372 { "dma_readq_full" },
373 { "dma_read_prioq_full" },
374 { "tx_comp_queue_full" },
375
376 { "ring_set_send_prod_index" },
377 { "ring_status_update" },
378 { "nic_irqs" },
379 { "nic_avoided_irqs" },
380 { "nic_tx_threshold_hit" },
381
382 { "mbuf_lwm_thresh_hit" },
383 };
384
385 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
386
387
388 static const struct {
389 const char string[ETH_GSTRING_LEN];
390 } ethtool_test_keys[] = {
391 { "nvram test (online) " },
392 { "link test (online) " },
393 { "register test (offline)" },
394 { "memory test (offline)" },
395 { "loopback test (offline)" },
396 { "interrupt test (offline)" },
397 };
398
399 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
400
401
402 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
403 {
404 writel(val, tp->regs + off);
405 }
406
407 static u32 tg3_read32(struct tg3 *tp, u32 off)
408 {
409 return readl(tp->regs + off);
410 }
411
412 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
413 {
414 writel(val, tp->aperegs + off);
415 }
416
417 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
418 {
419 return readl(tp->aperegs + off);
420 }
421
422 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
423 {
424 unsigned long flags;
425
426 spin_lock_irqsave(&tp->indirect_lock, flags);
427 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
428 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
429 spin_unlock_irqrestore(&tp->indirect_lock, flags);
430 }
431
432 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
433 {
434 writel(val, tp->regs + off);
435 readl(tp->regs + off);
436 }
437
438 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
439 {
440 unsigned long flags;
441 u32 val;
442
443 spin_lock_irqsave(&tp->indirect_lock, flags);
444 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
445 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
446 spin_unlock_irqrestore(&tp->indirect_lock, flags);
447 return val;
448 }
449
450 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
451 {
452 unsigned long flags;
453
454 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
455 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
456 TG3_64BIT_REG_LOW, val);
457 return;
458 }
459 if (off == TG3_RX_STD_PROD_IDX_REG) {
460 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
461 TG3_64BIT_REG_LOW, val);
462 return;
463 }
464
465 spin_lock_irqsave(&tp->indirect_lock, flags);
466 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
467 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
468 spin_unlock_irqrestore(&tp->indirect_lock, flags);
469
470 /* In indirect mode when disabling interrupts, we also need
471 * to clear the interrupt bit in the GRC local ctrl register.
472 */
473 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
474 (val == 0x1)) {
475 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
476 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
477 }
478 }
479
480 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
481 {
482 unsigned long flags;
483 u32 val;
484
485 spin_lock_irqsave(&tp->indirect_lock, flags);
486 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
487 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
488 spin_unlock_irqrestore(&tp->indirect_lock, flags);
489 return val;
490 }
491
492 /* usec_wait specifies the wait time in usec when writing to certain registers
493 * where it is unsafe to read back the register without some delay.
494 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
495 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
496 */
497 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
498 {
499 if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
500 /* Non-posted methods */
501 tp->write32(tp, off, val);
502 else {
503 /* Posted method */
504 tg3_write32(tp, off, val);
505 if (usec_wait)
506 udelay(usec_wait);
507 tp->read32(tp, off);
508 }
509 /* Wait again after the read for the posted method to guarantee that
510 * the wait time is met.
511 */
512 if (usec_wait)
513 udelay(usec_wait);
514 }
515
516 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
517 {
518 tp->write32_mbox(tp, off, val);
519 if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
520 tp->read32_mbox(tp, off);
521 }
522
523 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
524 {
525 void __iomem *mbox = tp->regs + off;
526 writel(val, mbox);
527 if (tg3_flag(tp, TXD_MBOX_HWBUG))
528 writel(val, mbox);
529 if (tg3_flag(tp, MBOX_WRITE_REORDER))
530 readl(mbox);
531 }
532
533 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
534 {
535 return readl(tp->regs + off + GRCMBOX_BASE);
536 }
537
538 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
539 {
540 writel(val, tp->regs + off + GRCMBOX_BASE);
541 }
542
543 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
544 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
545 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
546 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
547 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
548
549 #define tw32(reg, val) tp->write32(tp, reg, val)
550 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
551 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
552 #define tr32(reg) tp->read32(tp, reg)
553
554 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
555 {
556 unsigned long flags;
557
558 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
559 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
560 return;
561
562 spin_lock_irqsave(&tp->indirect_lock, flags);
563 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
564 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
565 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
566
567 /* Always leave this as zero. */
568 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
569 } else {
570 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
571 tw32_f(TG3PCI_MEM_WIN_DATA, val);
572
573 /* Always leave this as zero. */
574 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
575 }
576 spin_unlock_irqrestore(&tp->indirect_lock, flags);
577 }
578
579 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
580 {
581 unsigned long flags;
582
583 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
584 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
585 *val = 0;
586 return;
587 }
588
589 spin_lock_irqsave(&tp->indirect_lock, flags);
590 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
591 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
592 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
593
594 /* Always leave this as zero. */
595 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
596 } else {
597 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
598 *val = tr32(TG3PCI_MEM_WIN_DATA);
599
600 /* Always leave this as zero. */
601 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
602 }
603 spin_unlock_irqrestore(&tp->indirect_lock, flags);
604 }
605
606 static void tg3_ape_lock_init(struct tg3 *tp)
607 {
608 int i;
609 u32 regbase;
610
611 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
612 regbase = TG3_APE_LOCK_GRANT;
613 else
614 regbase = TG3_APE_PER_LOCK_GRANT;
615
616 /* Make sure the driver hasn't any stale locks. */
617 for (i = 0; i < 8; i++)
618 tg3_ape_write32(tp, regbase + 4 * i, APE_LOCK_GRANT_DRIVER);
619 }
620
621 static int tg3_ape_lock(struct tg3 *tp, int locknum)
622 {
623 int i, off;
624 int ret = 0;
625 u32 status, req, gnt;
626
627 if (!tg3_flag(tp, ENABLE_APE))
628 return 0;
629
630 switch (locknum) {
631 case TG3_APE_LOCK_GRC:
632 case TG3_APE_LOCK_MEM:
633 break;
634 default:
635 return -EINVAL;
636 }
637
638 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
639 req = TG3_APE_LOCK_REQ;
640 gnt = TG3_APE_LOCK_GRANT;
641 } else {
642 req = TG3_APE_PER_LOCK_REQ;
643 gnt = TG3_APE_PER_LOCK_GRANT;
644 }
645
646 off = 4 * locknum;
647
648 tg3_ape_write32(tp, req + off, APE_LOCK_REQ_DRIVER);
649
650 /* Wait for up to 1 millisecond to acquire lock. */
651 for (i = 0; i < 100; i++) {
652 status = tg3_ape_read32(tp, gnt + off);
653 if (status == APE_LOCK_GRANT_DRIVER)
654 break;
655 udelay(10);
656 }
657
658 if (status != APE_LOCK_GRANT_DRIVER) {
659 /* Revoke the lock request. */
660 tg3_ape_write32(tp, gnt + off,
661 APE_LOCK_GRANT_DRIVER);
662
663 ret = -EBUSY;
664 }
665
666 return ret;
667 }
668
669 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
670 {
671 u32 gnt;
672
673 if (!tg3_flag(tp, ENABLE_APE))
674 return;
675
676 switch (locknum) {
677 case TG3_APE_LOCK_GRC:
678 case TG3_APE_LOCK_MEM:
679 break;
680 default:
681 return;
682 }
683
684 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
685 gnt = TG3_APE_LOCK_GRANT;
686 else
687 gnt = TG3_APE_PER_LOCK_GRANT;
688
689 tg3_ape_write32(tp, gnt + 4 * locknum, APE_LOCK_GRANT_DRIVER);
690 }
691
692 static void tg3_disable_ints(struct tg3 *tp)
693 {
694 int i;
695
696 tw32(TG3PCI_MISC_HOST_CTRL,
697 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
698 for (i = 0; i < tp->irq_max; i++)
699 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
700 }
701
702 static void tg3_enable_ints(struct tg3 *tp)
703 {
704 int i;
705
706 tp->irq_sync = 0;
707 wmb();
708
709 tw32(TG3PCI_MISC_HOST_CTRL,
710 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
711
712 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
713 for (i = 0; i < tp->irq_cnt; i++) {
714 struct tg3_napi *tnapi = &tp->napi[i];
715
716 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
717 if (tg3_flag(tp, 1SHOT_MSI))
718 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
719
720 tp->coal_now |= tnapi->coal_now;
721 }
722
723 /* Force an initial interrupt */
724 if (!tg3_flag(tp, TAGGED_STATUS) &&
725 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
726 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
727 else
728 tw32(HOSTCC_MODE, tp->coal_now);
729
730 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
731 }
732
733 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
734 {
735 struct tg3 *tp = tnapi->tp;
736 struct tg3_hw_status *sblk = tnapi->hw_status;
737 unsigned int work_exists = 0;
738
739 /* check for phy events */
740 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
741 if (sblk->status & SD_STATUS_LINK_CHG)
742 work_exists = 1;
743 }
744 /* check for RX/TX work to do */
745 if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
746 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
747 work_exists = 1;
748
749 return work_exists;
750 }
751
752 /* tg3_int_reenable
753 * similar to tg3_enable_ints, but it accurately determines whether there
754 * is new work pending and can return without flushing the PIO write
755 * which reenables interrupts
756 */
757 static void tg3_int_reenable(struct tg3_napi *tnapi)
758 {
759 struct tg3 *tp = tnapi->tp;
760
761 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
762 mmiowb();
763
764 /* When doing tagged status, this work check is unnecessary.
765 * The last_tag we write above tells the chip which piece of
766 * work we've completed.
767 */
768 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
769 tw32(HOSTCC_MODE, tp->coalesce_mode |
770 HOSTCC_MODE_ENABLE | tnapi->coal_now);
771 }
772
773 static void tg3_switch_clocks(struct tg3 *tp)
774 {
775 u32 clock_ctrl;
776 u32 orig_clock_ctrl;
777
778 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
779 return;
780
781 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
782
783 orig_clock_ctrl = clock_ctrl;
784 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
785 CLOCK_CTRL_CLKRUN_OENABLE |
786 0x1f);
787 tp->pci_clock_ctrl = clock_ctrl;
788
789 if (tg3_flag(tp, 5705_PLUS)) {
790 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
791 tw32_wait_f(TG3PCI_CLOCK_CTRL,
792 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
793 }
794 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
795 tw32_wait_f(TG3PCI_CLOCK_CTRL,
796 clock_ctrl |
797 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
798 40);
799 tw32_wait_f(TG3PCI_CLOCK_CTRL,
800 clock_ctrl | (CLOCK_CTRL_ALTCLK),
801 40);
802 }
803 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
804 }
805
806 #define PHY_BUSY_LOOPS 5000
807
808 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
809 {
810 u32 frame_val;
811 unsigned int loops;
812 int ret;
813
814 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
815 tw32_f(MAC_MI_MODE,
816 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
817 udelay(80);
818 }
819
820 *val = 0x0;
821
822 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
823 MI_COM_PHY_ADDR_MASK);
824 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
825 MI_COM_REG_ADDR_MASK);
826 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
827
828 tw32_f(MAC_MI_COM, frame_val);
829
830 loops = PHY_BUSY_LOOPS;
831 while (loops != 0) {
832 udelay(10);
833 frame_val = tr32(MAC_MI_COM);
834
835 if ((frame_val & MI_COM_BUSY) == 0) {
836 udelay(5);
837 frame_val = tr32(MAC_MI_COM);
838 break;
839 }
840 loops -= 1;
841 }
842
843 ret = -EBUSY;
844 if (loops != 0) {
845 *val = frame_val & MI_COM_DATA_MASK;
846 ret = 0;
847 }
848
849 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
850 tw32_f(MAC_MI_MODE, tp->mi_mode);
851 udelay(80);
852 }
853
854 return ret;
855 }
856
857 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
858 {
859 u32 frame_val;
860 unsigned int loops;
861 int ret;
862
863 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
864 (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
865 return 0;
866
867 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
868 tw32_f(MAC_MI_MODE,
869 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
870 udelay(80);
871 }
872
873 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
874 MI_COM_PHY_ADDR_MASK);
875 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
876 MI_COM_REG_ADDR_MASK);
877 frame_val |= (val & MI_COM_DATA_MASK);
878 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
879
880 tw32_f(MAC_MI_COM, frame_val);
881
882 loops = PHY_BUSY_LOOPS;
883 while (loops != 0) {
884 udelay(10);
885 frame_val = tr32(MAC_MI_COM);
886 if ((frame_val & MI_COM_BUSY) == 0) {
887 udelay(5);
888 frame_val = tr32(MAC_MI_COM);
889 break;
890 }
891 loops -= 1;
892 }
893
894 ret = -EBUSY;
895 if (loops != 0)
896 ret = 0;
897
898 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
899 tw32_f(MAC_MI_MODE, tp->mi_mode);
900 udelay(80);
901 }
902
903 return ret;
904 }
905
906 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
907 {
908 int err;
909
910 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
911 if (err)
912 goto done;
913
914 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
915 if (err)
916 goto done;
917
918 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
919 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
920 if (err)
921 goto done;
922
923 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
924
925 done:
926 return err;
927 }
928
929 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
930 {
931 int err;
932
933 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
934 if (err)
935 goto done;
936
937 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
938 if (err)
939 goto done;
940
941 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
942 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
943 if (err)
944 goto done;
945
946 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
947
948 done:
949 return err;
950 }
951
952 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
953 {
954 int err;
955
956 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
957 if (!err)
958 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
959
960 return err;
961 }
962
963 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
964 {
965 int err;
966
967 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
968 if (!err)
969 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
970
971 return err;
972 }
973
974 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
975 {
976 int err;
977
978 err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
979 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
980 MII_TG3_AUXCTL_SHDWSEL_MISC);
981 if (!err)
982 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
983
984 return err;
985 }
986
987 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
988 {
989 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
990 set |= MII_TG3_AUXCTL_MISC_WREN;
991
992 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
993 }
994
995 #define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
996 tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
997 MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
998 MII_TG3_AUXCTL_ACTL_TX_6DB)
999
1000 #define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
1001 tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1002 MII_TG3_AUXCTL_ACTL_TX_6DB);
1003
1004 static int tg3_bmcr_reset(struct tg3 *tp)
1005 {
1006 u32 phy_control;
1007 int limit, err;
1008
1009 /* OK, reset it, and poll the BMCR_RESET bit until it
1010 * clears or we time out.
1011 */
1012 phy_control = BMCR_RESET;
1013 err = tg3_writephy(tp, MII_BMCR, phy_control);
1014 if (err != 0)
1015 return -EBUSY;
1016
1017 limit = 5000;
1018 while (limit--) {
1019 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1020 if (err != 0)
1021 return -EBUSY;
1022
1023 if ((phy_control & BMCR_RESET) == 0) {
1024 udelay(40);
1025 break;
1026 }
1027 udelay(10);
1028 }
1029 if (limit < 0)
1030 return -EBUSY;
1031
1032 return 0;
1033 }
1034
1035 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1036 {
1037 struct tg3 *tp = bp->priv;
1038 u32 val;
1039
1040 spin_lock_bh(&tp->lock);
1041
1042 if (tg3_readphy(tp, reg, &val))
1043 val = -EIO;
1044
1045 spin_unlock_bh(&tp->lock);
1046
1047 return val;
1048 }
1049
1050 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1051 {
1052 struct tg3 *tp = bp->priv;
1053 u32 ret = 0;
1054
1055 spin_lock_bh(&tp->lock);
1056
1057 if (tg3_writephy(tp, reg, val))
1058 ret = -EIO;
1059
1060 spin_unlock_bh(&tp->lock);
1061
1062 return ret;
1063 }
1064
1065 static int tg3_mdio_reset(struct mii_bus *bp)
1066 {
1067 return 0;
1068 }
1069
1070 static void tg3_mdio_config_5785(struct tg3 *tp)
1071 {
1072 u32 val;
1073 struct phy_device *phydev;
1074
1075 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1076 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1077 case PHY_ID_BCM50610:
1078 case PHY_ID_BCM50610M:
1079 val = MAC_PHYCFG2_50610_LED_MODES;
1080 break;
1081 case PHY_ID_BCMAC131:
1082 val = MAC_PHYCFG2_AC131_LED_MODES;
1083 break;
1084 case PHY_ID_RTL8211C:
1085 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1086 break;
1087 case PHY_ID_RTL8201E:
1088 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1089 break;
1090 default:
1091 return;
1092 }
1093
1094 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1095 tw32(MAC_PHYCFG2, val);
1096
1097 val = tr32(MAC_PHYCFG1);
1098 val &= ~(MAC_PHYCFG1_RGMII_INT |
1099 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1100 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1101 tw32(MAC_PHYCFG1, val);
1102
1103 return;
1104 }
1105
1106 if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1107 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1108 MAC_PHYCFG2_FMODE_MASK_MASK |
1109 MAC_PHYCFG2_GMODE_MASK_MASK |
1110 MAC_PHYCFG2_ACT_MASK_MASK |
1111 MAC_PHYCFG2_QUAL_MASK_MASK |
1112 MAC_PHYCFG2_INBAND_ENABLE;
1113
1114 tw32(MAC_PHYCFG2, val);
1115
1116 val = tr32(MAC_PHYCFG1);
1117 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1118 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1119 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1120 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1121 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1122 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1123 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1124 }
1125 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1126 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1127 tw32(MAC_PHYCFG1, val);
1128
1129 val = tr32(MAC_EXT_RGMII_MODE);
1130 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1131 MAC_RGMII_MODE_RX_QUALITY |
1132 MAC_RGMII_MODE_RX_ACTIVITY |
1133 MAC_RGMII_MODE_RX_ENG_DET |
1134 MAC_RGMII_MODE_TX_ENABLE |
1135 MAC_RGMII_MODE_TX_LOWPWR |
1136 MAC_RGMII_MODE_TX_RESET);
1137 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1138 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1139 val |= MAC_RGMII_MODE_RX_INT_B |
1140 MAC_RGMII_MODE_RX_QUALITY |
1141 MAC_RGMII_MODE_RX_ACTIVITY |
1142 MAC_RGMII_MODE_RX_ENG_DET;
1143 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1144 val |= MAC_RGMII_MODE_TX_ENABLE |
1145 MAC_RGMII_MODE_TX_LOWPWR |
1146 MAC_RGMII_MODE_TX_RESET;
1147 }
1148 tw32(MAC_EXT_RGMII_MODE, val);
1149 }
1150
1151 static void tg3_mdio_start(struct tg3 *tp)
1152 {
1153 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1154 tw32_f(MAC_MI_MODE, tp->mi_mode);
1155 udelay(80);
1156
1157 if (tg3_flag(tp, MDIOBUS_INITED) &&
1158 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1159 tg3_mdio_config_5785(tp);
1160 }
1161
1162 static int tg3_mdio_init(struct tg3 *tp)
1163 {
1164 int i;
1165 u32 reg;
1166 struct phy_device *phydev;
1167
1168 if (tg3_flag(tp, 5717_PLUS)) {
1169 u32 is_serdes;
1170
1171 tp->phy_addr = PCI_FUNC(tp->pdev->devfn) + 1;
1172
1173 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1174 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1175 else
1176 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1177 TG3_CPMU_PHY_STRAP_IS_SERDES;
1178 if (is_serdes)
1179 tp->phy_addr += 7;
1180 } else
1181 tp->phy_addr = TG3_PHY_MII_ADDR;
1182
1183 tg3_mdio_start(tp);
1184
1185 if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1186 return 0;
1187
1188 tp->mdio_bus = mdiobus_alloc();
1189 if (tp->mdio_bus == NULL)
1190 return -ENOMEM;
1191
1192 tp->mdio_bus->name = "tg3 mdio bus";
1193 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1194 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1195 tp->mdio_bus->priv = tp;
1196 tp->mdio_bus->parent = &tp->pdev->dev;
1197 tp->mdio_bus->read = &tg3_mdio_read;
1198 tp->mdio_bus->write = &tg3_mdio_write;
1199 tp->mdio_bus->reset = &tg3_mdio_reset;
1200 tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1201 tp->mdio_bus->irq = &tp->mdio_irq[0];
1202
1203 for (i = 0; i < PHY_MAX_ADDR; i++)
1204 tp->mdio_bus->irq[i] = PHY_POLL;
1205
1206 /* The bus registration will look for all the PHYs on the mdio bus.
1207 * Unfortunately, it does not ensure the PHY is powered up before
1208 * accessing the PHY ID registers. A chip reset is the
1209 * quickest way to bring the device back to an operational state..
1210 */
1211 if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1212 tg3_bmcr_reset(tp);
1213
1214 i = mdiobus_register(tp->mdio_bus);
1215 if (i) {
1216 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1217 mdiobus_free(tp->mdio_bus);
1218 return i;
1219 }
1220
1221 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1222
1223 if (!phydev || !phydev->drv) {
1224 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1225 mdiobus_unregister(tp->mdio_bus);
1226 mdiobus_free(tp->mdio_bus);
1227 return -ENODEV;
1228 }
1229
1230 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1231 case PHY_ID_BCM57780:
1232 phydev->interface = PHY_INTERFACE_MODE_GMII;
1233 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1234 break;
1235 case PHY_ID_BCM50610:
1236 case PHY_ID_BCM50610M:
1237 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1238 PHY_BRCM_RX_REFCLK_UNUSED |
1239 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1240 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1241 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1242 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1243 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1244 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1245 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1246 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1247 /* fallthru */
1248 case PHY_ID_RTL8211C:
1249 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1250 break;
1251 case PHY_ID_RTL8201E:
1252 case PHY_ID_BCMAC131:
1253 phydev->interface = PHY_INTERFACE_MODE_MII;
1254 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1255 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1256 break;
1257 }
1258
1259 tg3_flag_set(tp, MDIOBUS_INITED);
1260
1261 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1262 tg3_mdio_config_5785(tp);
1263
1264 return 0;
1265 }
1266
1267 static void tg3_mdio_fini(struct tg3 *tp)
1268 {
1269 if (tg3_flag(tp, MDIOBUS_INITED)) {
1270 tg3_flag_clear(tp, MDIOBUS_INITED);
1271 mdiobus_unregister(tp->mdio_bus);
1272 mdiobus_free(tp->mdio_bus);
1273 }
1274 }
1275
1276 /* tp->lock is held. */
1277 static inline void tg3_generate_fw_event(struct tg3 *tp)
1278 {
1279 u32 val;
1280
1281 val = tr32(GRC_RX_CPU_EVENT);
1282 val |= GRC_RX_CPU_DRIVER_EVENT;
1283 tw32_f(GRC_RX_CPU_EVENT, val);
1284
1285 tp->last_event_jiffies = jiffies;
1286 }
1287
1288 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1289
1290 /* tp->lock is held. */
1291 static void tg3_wait_for_event_ack(struct tg3 *tp)
1292 {
1293 int i;
1294 unsigned int delay_cnt;
1295 long time_remain;
1296
1297 /* If enough time has passed, no wait is necessary. */
1298 time_remain = (long)(tp->last_event_jiffies + 1 +
1299 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1300 (long)jiffies;
1301 if (time_remain < 0)
1302 return;
1303
1304 /* Check if we can shorten the wait time. */
1305 delay_cnt = jiffies_to_usecs(time_remain);
1306 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1307 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1308 delay_cnt = (delay_cnt >> 3) + 1;
1309
1310 for (i = 0; i < delay_cnt; i++) {
1311 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1312 break;
1313 udelay(8);
1314 }
1315 }
1316
1317 /* tp->lock is held. */
1318 static void tg3_ump_link_report(struct tg3 *tp)
1319 {
1320 u32 reg;
1321 u32 val;
1322
1323 if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1324 return;
1325
1326 tg3_wait_for_event_ack(tp);
1327
1328 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1329
1330 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1331
1332 val = 0;
1333 if (!tg3_readphy(tp, MII_BMCR, &reg))
1334 val = reg << 16;
1335 if (!tg3_readphy(tp, MII_BMSR, &reg))
1336 val |= (reg & 0xffff);
1337 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1338
1339 val = 0;
1340 if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1341 val = reg << 16;
1342 if (!tg3_readphy(tp, MII_LPA, &reg))
1343 val |= (reg & 0xffff);
1344 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1345
1346 val = 0;
1347 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1348 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1349 val = reg << 16;
1350 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1351 val |= (reg & 0xffff);
1352 }
1353 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1354
1355 if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1356 val = reg << 16;
1357 else
1358 val = 0;
1359 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1360
1361 tg3_generate_fw_event(tp);
1362 }
1363
1364 static void tg3_link_report(struct tg3 *tp)
1365 {
1366 if (!netif_carrier_ok(tp->dev)) {
1367 netif_info(tp, link, tp->dev, "Link is down\n");
1368 tg3_ump_link_report(tp);
1369 } else if (netif_msg_link(tp)) {
1370 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1371 (tp->link_config.active_speed == SPEED_1000 ?
1372 1000 :
1373 (tp->link_config.active_speed == SPEED_100 ?
1374 100 : 10)),
1375 (tp->link_config.active_duplex == DUPLEX_FULL ?
1376 "full" : "half"));
1377
1378 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1379 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1380 "on" : "off",
1381 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1382 "on" : "off");
1383
1384 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1385 netdev_info(tp->dev, "EEE is %s\n",
1386 tp->setlpicnt ? "enabled" : "disabled");
1387
1388 tg3_ump_link_report(tp);
1389 }
1390 }
1391
1392 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1393 {
1394 u16 miireg;
1395
1396 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1397 miireg = ADVERTISE_PAUSE_CAP;
1398 else if (flow_ctrl & FLOW_CTRL_TX)
1399 miireg = ADVERTISE_PAUSE_ASYM;
1400 else if (flow_ctrl & FLOW_CTRL_RX)
1401 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1402 else
1403 miireg = 0;
1404
1405 return miireg;
1406 }
1407
1408 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1409 {
1410 u16 miireg;
1411
1412 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1413 miireg = ADVERTISE_1000XPAUSE;
1414 else if (flow_ctrl & FLOW_CTRL_TX)
1415 miireg = ADVERTISE_1000XPSE_ASYM;
1416 else if (flow_ctrl & FLOW_CTRL_RX)
1417 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1418 else
1419 miireg = 0;
1420
1421 return miireg;
1422 }
1423
1424 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1425 {
1426 u8 cap = 0;
1427
1428 if (lcladv & ADVERTISE_1000XPAUSE) {
1429 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1430 if (rmtadv & LPA_1000XPAUSE)
1431 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1432 else if (rmtadv & LPA_1000XPAUSE_ASYM)
1433 cap = FLOW_CTRL_RX;
1434 } else {
1435 if (rmtadv & LPA_1000XPAUSE)
1436 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1437 }
1438 } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1439 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1440 cap = FLOW_CTRL_TX;
1441 }
1442
1443 return cap;
1444 }
1445
1446 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1447 {
1448 u8 autoneg;
1449 u8 flowctrl = 0;
1450 u32 old_rx_mode = tp->rx_mode;
1451 u32 old_tx_mode = tp->tx_mode;
1452
1453 if (tg3_flag(tp, USE_PHYLIB))
1454 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1455 else
1456 autoneg = tp->link_config.autoneg;
1457
1458 if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1459 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1460 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1461 else
1462 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1463 } else
1464 flowctrl = tp->link_config.flowctrl;
1465
1466 tp->link_config.active_flowctrl = flowctrl;
1467
1468 if (flowctrl & FLOW_CTRL_RX)
1469 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1470 else
1471 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1472
1473 if (old_rx_mode != tp->rx_mode)
1474 tw32_f(MAC_RX_MODE, tp->rx_mode);
1475
1476 if (flowctrl & FLOW_CTRL_TX)
1477 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1478 else
1479 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1480
1481 if (old_tx_mode != tp->tx_mode)
1482 tw32_f(MAC_TX_MODE, tp->tx_mode);
1483 }
1484
1485 static void tg3_adjust_link(struct net_device *dev)
1486 {
1487 u8 oldflowctrl, linkmesg = 0;
1488 u32 mac_mode, lcl_adv, rmt_adv;
1489 struct tg3 *tp = netdev_priv(dev);
1490 struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1491
1492 spin_lock_bh(&tp->lock);
1493
1494 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1495 MAC_MODE_HALF_DUPLEX);
1496
1497 oldflowctrl = tp->link_config.active_flowctrl;
1498
1499 if (phydev->link) {
1500 lcl_adv = 0;
1501 rmt_adv = 0;
1502
1503 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1504 mac_mode |= MAC_MODE_PORT_MODE_MII;
1505 else if (phydev->speed == SPEED_1000 ||
1506 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1507 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1508 else
1509 mac_mode |= MAC_MODE_PORT_MODE_MII;
1510
1511 if (phydev->duplex == DUPLEX_HALF)
1512 mac_mode |= MAC_MODE_HALF_DUPLEX;
1513 else {
1514 lcl_adv = tg3_advert_flowctrl_1000T(
1515 tp->link_config.flowctrl);
1516
1517 if (phydev->pause)
1518 rmt_adv = LPA_PAUSE_CAP;
1519 if (phydev->asym_pause)
1520 rmt_adv |= LPA_PAUSE_ASYM;
1521 }
1522
1523 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1524 } else
1525 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1526
1527 if (mac_mode != tp->mac_mode) {
1528 tp->mac_mode = mac_mode;
1529 tw32_f(MAC_MODE, tp->mac_mode);
1530 udelay(40);
1531 }
1532
1533 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1534 if (phydev->speed == SPEED_10)
1535 tw32(MAC_MI_STAT,
1536 MAC_MI_STAT_10MBPS_MODE |
1537 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1538 else
1539 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1540 }
1541
1542 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1543 tw32(MAC_TX_LENGTHS,
1544 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1545 (6 << TX_LENGTHS_IPG_SHIFT) |
1546 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1547 else
1548 tw32(MAC_TX_LENGTHS,
1549 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1550 (6 << TX_LENGTHS_IPG_SHIFT) |
1551 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1552
1553 if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1554 (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1555 phydev->speed != tp->link_config.active_speed ||
1556 phydev->duplex != tp->link_config.active_duplex ||
1557 oldflowctrl != tp->link_config.active_flowctrl)
1558 linkmesg = 1;
1559
1560 tp->link_config.active_speed = phydev->speed;
1561 tp->link_config.active_duplex = phydev->duplex;
1562
1563 spin_unlock_bh(&tp->lock);
1564
1565 if (linkmesg)
1566 tg3_link_report(tp);
1567 }
1568
1569 static int tg3_phy_init(struct tg3 *tp)
1570 {
1571 struct phy_device *phydev;
1572
1573 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
1574 return 0;
1575
1576 /* Bring the PHY back to a known state. */
1577 tg3_bmcr_reset(tp);
1578
1579 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1580
1581 /* Attach the MAC to the PHY. */
1582 phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1583 phydev->dev_flags, phydev->interface);
1584 if (IS_ERR(phydev)) {
1585 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
1586 return PTR_ERR(phydev);
1587 }
1588
1589 /* Mask with MAC supported features. */
1590 switch (phydev->interface) {
1591 case PHY_INTERFACE_MODE_GMII:
1592 case PHY_INTERFACE_MODE_RGMII:
1593 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1594 phydev->supported &= (PHY_GBIT_FEATURES |
1595 SUPPORTED_Pause |
1596 SUPPORTED_Asym_Pause);
1597 break;
1598 }
1599 /* fallthru */
1600 case PHY_INTERFACE_MODE_MII:
1601 phydev->supported &= (PHY_BASIC_FEATURES |
1602 SUPPORTED_Pause |
1603 SUPPORTED_Asym_Pause);
1604 break;
1605 default:
1606 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1607 return -EINVAL;
1608 }
1609
1610 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
1611
1612 phydev->advertising = phydev->supported;
1613
1614 return 0;
1615 }
1616
1617 static void tg3_phy_start(struct tg3 *tp)
1618 {
1619 struct phy_device *phydev;
1620
1621 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1622 return;
1623
1624 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1625
1626 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
1627 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
1628 phydev->speed = tp->link_config.orig_speed;
1629 phydev->duplex = tp->link_config.orig_duplex;
1630 phydev->autoneg = tp->link_config.orig_autoneg;
1631 phydev->advertising = tp->link_config.orig_advertising;
1632 }
1633
1634 phy_start(phydev);
1635
1636 phy_start_aneg(phydev);
1637 }
1638
1639 static void tg3_phy_stop(struct tg3 *tp)
1640 {
1641 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1642 return;
1643
1644 phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1645 }
1646
1647 static void tg3_phy_fini(struct tg3 *tp)
1648 {
1649 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
1650 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1651 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
1652 }
1653 }
1654
1655 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1656 {
1657 u32 phytest;
1658
1659 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1660 u32 phy;
1661
1662 tg3_writephy(tp, MII_TG3_FET_TEST,
1663 phytest | MII_TG3_FET_SHADOW_EN);
1664 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1665 if (enable)
1666 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1667 else
1668 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1669 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1670 }
1671 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1672 }
1673 }
1674
1675 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1676 {
1677 u32 reg;
1678
1679 if (!tg3_flag(tp, 5705_PLUS) ||
1680 (tg3_flag(tp, 5717_PLUS) &&
1681 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
1682 return;
1683
1684 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1685 tg3_phy_fet_toggle_apd(tp, enable);
1686 return;
1687 }
1688
1689 reg = MII_TG3_MISC_SHDW_WREN |
1690 MII_TG3_MISC_SHDW_SCR5_SEL |
1691 MII_TG3_MISC_SHDW_SCR5_LPED |
1692 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
1693 MII_TG3_MISC_SHDW_SCR5_SDTL |
1694 MII_TG3_MISC_SHDW_SCR5_C125OE;
1695 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
1696 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
1697
1698 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1699
1700
1701 reg = MII_TG3_MISC_SHDW_WREN |
1702 MII_TG3_MISC_SHDW_APD_SEL |
1703 MII_TG3_MISC_SHDW_APD_WKTM_84MS;
1704 if (enable)
1705 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
1706
1707 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1708 }
1709
1710 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1711 {
1712 u32 phy;
1713
1714 if (!tg3_flag(tp, 5705_PLUS) ||
1715 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
1716 return;
1717
1718 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1719 u32 ephy;
1720
1721 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
1722 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
1723
1724 tg3_writephy(tp, MII_TG3_FET_TEST,
1725 ephy | MII_TG3_FET_SHADOW_EN);
1726 if (!tg3_readphy(tp, reg, &phy)) {
1727 if (enable)
1728 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1729 else
1730 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1731 tg3_writephy(tp, reg, phy);
1732 }
1733 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
1734 }
1735 } else {
1736 int ret;
1737
1738 ret = tg3_phy_auxctl_read(tp,
1739 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
1740 if (!ret) {
1741 if (enable)
1742 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1743 else
1744 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1745 tg3_phy_auxctl_write(tp,
1746 MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
1747 }
1748 }
1749 }
1750
1751 static void tg3_phy_set_wirespeed(struct tg3 *tp)
1752 {
1753 int ret;
1754 u32 val;
1755
1756 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
1757 return;
1758
1759 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
1760 if (!ret)
1761 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
1762 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
1763 }
1764
1765 static void tg3_phy_apply_otp(struct tg3 *tp)
1766 {
1767 u32 otp, phy;
1768
1769 if (!tp->phy_otp)
1770 return;
1771
1772 otp = tp->phy_otp;
1773
1774 if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
1775 return;
1776
1777 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1778 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
1779 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
1780
1781 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
1782 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
1783 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
1784
1785 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
1786 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
1787 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
1788
1789 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
1790 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
1791
1792 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
1793 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
1794
1795 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
1796 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1797 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1798
1799 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1800 }
1801
1802 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
1803 {
1804 u32 val;
1805
1806 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
1807 return;
1808
1809 tp->setlpicnt = 0;
1810
1811 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
1812 current_link_up == 1 &&
1813 tp->link_config.active_duplex == DUPLEX_FULL &&
1814 (tp->link_config.active_speed == SPEED_100 ||
1815 tp->link_config.active_speed == SPEED_1000)) {
1816 u32 eeectl;
1817
1818 if (tp->link_config.active_speed == SPEED_1000)
1819 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
1820 else
1821 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
1822
1823 tw32(TG3_CPMU_EEE_CTRL, eeectl);
1824
1825 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
1826 TG3_CL45_D7_EEERES_STAT, &val);
1827
1828 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
1829 val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
1830 tp->setlpicnt = 2;
1831 }
1832
1833 if (!tp->setlpicnt) {
1834 val = tr32(TG3_CPMU_EEE_MODE);
1835 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
1836 }
1837 }
1838
1839 static void tg3_phy_eee_enable(struct tg3 *tp)
1840 {
1841 u32 val;
1842
1843 if (tp->link_config.active_speed == SPEED_1000 &&
1844 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
1845 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
1846 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) &&
1847 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
1848 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0003);
1849 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1850 }
1851
1852 val = tr32(TG3_CPMU_EEE_MODE);
1853 tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
1854 }
1855
1856 static int tg3_wait_macro_done(struct tg3 *tp)
1857 {
1858 int limit = 100;
1859
1860 while (limit--) {
1861 u32 tmp32;
1862
1863 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
1864 if ((tmp32 & 0x1000) == 0)
1865 break;
1866 }
1867 }
1868 if (limit < 0)
1869 return -EBUSY;
1870
1871 return 0;
1872 }
1873
1874 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1875 {
1876 static const u32 test_pat[4][6] = {
1877 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1878 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1879 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1880 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1881 };
1882 int chan;
1883
1884 for (chan = 0; chan < 4; chan++) {
1885 int i;
1886
1887 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1888 (chan * 0x2000) | 0x0200);
1889 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1890
1891 for (i = 0; i < 6; i++)
1892 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
1893 test_pat[chan][i]);
1894
1895 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1896 if (tg3_wait_macro_done(tp)) {
1897 *resetp = 1;
1898 return -EBUSY;
1899 }
1900
1901 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1902 (chan * 0x2000) | 0x0200);
1903 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
1904 if (tg3_wait_macro_done(tp)) {
1905 *resetp = 1;
1906 return -EBUSY;
1907 }
1908
1909 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
1910 if (tg3_wait_macro_done(tp)) {
1911 *resetp = 1;
1912 return -EBUSY;
1913 }
1914
1915 for (i = 0; i < 6; i += 2) {
1916 u32 low, high;
1917
1918 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1919 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1920 tg3_wait_macro_done(tp)) {
1921 *resetp = 1;
1922 return -EBUSY;
1923 }
1924 low &= 0x7fff;
1925 high &= 0x000f;
1926 if (low != test_pat[chan][i] ||
1927 high != test_pat[chan][i+1]) {
1928 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1929 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1930 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1931
1932 return -EBUSY;
1933 }
1934 }
1935 }
1936
1937 return 0;
1938 }
1939
1940 static int tg3_phy_reset_chanpat(struct tg3 *tp)
1941 {
1942 int chan;
1943
1944 for (chan = 0; chan < 4; chan++) {
1945 int i;
1946
1947 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1948 (chan * 0x2000) | 0x0200);
1949 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1950 for (i = 0; i < 6; i++)
1951 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1952 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1953 if (tg3_wait_macro_done(tp))
1954 return -EBUSY;
1955 }
1956
1957 return 0;
1958 }
1959
1960 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1961 {
1962 u32 reg32, phy9_orig;
1963 int retries, do_phy_reset, err;
1964
1965 retries = 10;
1966 do_phy_reset = 1;
1967 do {
1968 if (do_phy_reset) {
1969 err = tg3_bmcr_reset(tp);
1970 if (err)
1971 return err;
1972 do_phy_reset = 0;
1973 }
1974
1975 /* Disable transmitter and interrupt. */
1976 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
1977 continue;
1978
1979 reg32 |= 0x3000;
1980 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1981
1982 /* Set full-duplex, 1000 mbps. */
1983 tg3_writephy(tp, MII_BMCR,
1984 BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1985
1986 /* Set to master mode. */
1987 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1988 continue;
1989
1990 tg3_writephy(tp, MII_TG3_CTRL,
1991 (MII_TG3_CTRL_AS_MASTER |
1992 MII_TG3_CTRL_ENABLE_AS_MASTER));
1993
1994 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
1995 if (err)
1996 return err;
1997
1998 /* Block the PHY control access. */
1999 tg3_phydsp_write(tp, 0x8005, 0x0800);
2000
2001 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2002 if (!err)
2003 break;
2004 } while (--retries);
2005
2006 err = tg3_phy_reset_chanpat(tp);
2007 if (err)
2008 return err;
2009
2010 tg3_phydsp_write(tp, 0x8005, 0x0000);
2011
2012 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2013 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2014
2015 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2016
2017 tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
2018
2019 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2020 reg32 &= ~0x3000;
2021 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2022 } else if (!err)
2023 err = -EBUSY;
2024
2025 return err;
2026 }
2027
2028 /* This will reset the tigon3 PHY if there is no valid
2029 * link unless the FORCE argument is non-zero.
2030 */
2031 static int tg3_phy_reset(struct tg3 *tp)
2032 {
2033 u32 val, cpmuctrl;
2034 int err;
2035
2036 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2037 val = tr32(GRC_MISC_CFG);
2038 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2039 udelay(40);
2040 }
2041 err = tg3_readphy(tp, MII_BMSR, &val);
2042 err |= tg3_readphy(tp, MII_BMSR, &val);
2043 if (err != 0)
2044 return -EBUSY;
2045
2046 if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
2047 netif_carrier_off(tp->dev);
2048 tg3_link_report(tp);
2049 }
2050
2051 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2052 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2053 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2054 err = tg3_phy_reset_5703_4_5(tp);
2055 if (err)
2056 return err;
2057 goto out;
2058 }
2059
2060 cpmuctrl = 0;
2061 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2062 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2063 cpmuctrl = tr32(TG3_CPMU_CTRL);
2064 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2065 tw32(TG3_CPMU_CTRL,
2066 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2067 }
2068
2069 err = tg3_bmcr_reset(tp);
2070 if (err)
2071 return err;
2072
2073 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2074 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2075 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2076
2077 tw32(TG3_CPMU_CTRL, cpmuctrl);
2078 }
2079
2080 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2081 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2082 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2083 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2084 CPMU_LSPD_1000MB_MACCLK_12_5) {
2085 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2086 udelay(40);
2087 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2088 }
2089 }
2090
2091 if (tg3_flag(tp, 5717_PLUS) &&
2092 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2093 return 0;
2094
2095 tg3_phy_apply_otp(tp);
2096
2097 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2098 tg3_phy_toggle_apd(tp, true);
2099 else
2100 tg3_phy_toggle_apd(tp, false);
2101
2102 out:
2103 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2104 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2105 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2106 tg3_phydsp_write(tp, 0x000a, 0x0323);
2107 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2108 }
2109
2110 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2111 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2112 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2113 }
2114
2115 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2116 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2117 tg3_phydsp_write(tp, 0x000a, 0x310b);
2118 tg3_phydsp_write(tp, 0x201f, 0x9506);
2119 tg3_phydsp_write(tp, 0x401f, 0x14e2);
2120 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2121 }
2122 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2123 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2124 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2125 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2126 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2127 tg3_writephy(tp, MII_TG3_TEST1,
2128 MII_TG3_TEST1_TRIM_EN | 0x4);
2129 } else
2130 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2131
2132 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2133 }
2134 }
2135
2136 /* Set Extended packet length bit (bit 14) on all chips that */
2137 /* support jumbo frames */
2138 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2139 /* Cannot do read-modify-write on 5401 */
2140 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2141 } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2142 /* Set bit 14 with read-modify-write to preserve other bits */
2143 err = tg3_phy_auxctl_read(tp,
2144 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2145 if (!err)
2146 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2147 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2148 }
2149
2150 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2151 * jumbo frames transmission.
2152 */
2153 if (tg3_flag(tp, JUMBO_CAPABLE)) {
2154 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2155 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2156 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2157 }
2158
2159 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2160 /* adjust output voltage */
2161 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2162 }
2163
2164 tg3_phy_toggle_automdix(tp, 1);
2165 tg3_phy_set_wirespeed(tp);
2166 return 0;
2167 }
2168
2169 static void tg3_frob_aux_power(struct tg3 *tp)
2170 {
2171 bool need_vaux = false;
2172
2173 /* The GPIOs do something completely different on 57765. */
2174 if (!tg3_flag(tp, IS_NIC) ||
2175 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2176 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
2177 return;
2178
2179 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2180 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
2181 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2182 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) &&
2183 tp->pdev_peer != tp->pdev) {
2184 struct net_device *dev_peer;
2185
2186 dev_peer = pci_get_drvdata(tp->pdev_peer);
2187
2188 /* remove_one() may have been run on the peer. */
2189 if (dev_peer) {
2190 struct tg3 *tp_peer = netdev_priv(dev_peer);
2191
2192 if (tg3_flag(tp_peer, INIT_COMPLETE))
2193 return;
2194
2195 if (tg3_flag(tp_peer, WOL_ENABLE) ||
2196 tg3_flag(tp_peer, ENABLE_ASF))
2197 need_vaux = true;
2198 }
2199 }
2200
2201 if (tg3_flag(tp, WOL_ENABLE) || tg3_flag(tp, ENABLE_ASF))
2202 need_vaux = true;
2203
2204 if (need_vaux) {
2205 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2206 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2207 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2208 (GRC_LCLCTRL_GPIO_OE0 |
2209 GRC_LCLCTRL_GPIO_OE1 |
2210 GRC_LCLCTRL_GPIO_OE2 |
2211 GRC_LCLCTRL_GPIO_OUTPUT0 |
2212 GRC_LCLCTRL_GPIO_OUTPUT1),
2213 100);
2214 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2215 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2216 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2217 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2218 GRC_LCLCTRL_GPIO_OE1 |
2219 GRC_LCLCTRL_GPIO_OE2 |
2220 GRC_LCLCTRL_GPIO_OUTPUT0 |
2221 GRC_LCLCTRL_GPIO_OUTPUT1 |
2222 tp->grc_local_ctrl;
2223 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2224
2225 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2226 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2227
2228 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2229 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2230 } else {
2231 u32 no_gpio2;
2232 u32 grc_local_ctrl = 0;
2233
2234 /* Workaround to prevent overdrawing Amps. */
2235 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2236 ASIC_REV_5714) {
2237 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2238 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2239 grc_local_ctrl, 100);
2240 }
2241
2242 /* On 5753 and variants, GPIO2 cannot be used. */
2243 no_gpio2 = tp->nic_sram_data_cfg &
2244 NIC_SRAM_DATA_CFG_NO_GPIO2;
2245
2246 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2247 GRC_LCLCTRL_GPIO_OE1 |
2248 GRC_LCLCTRL_GPIO_OE2 |
2249 GRC_LCLCTRL_GPIO_OUTPUT1 |
2250 GRC_LCLCTRL_GPIO_OUTPUT2;
2251 if (no_gpio2) {
2252 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2253 GRC_LCLCTRL_GPIO_OUTPUT2);
2254 }
2255 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2256 grc_local_ctrl, 100);
2257
2258 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2259
2260 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2261 grc_local_ctrl, 100);
2262
2263 if (!no_gpio2) {
2264 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2265 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2266 grc_local_ctrl, 100);
2267 }
2268 }
2269 } else {
2270 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
2271 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
2272 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2273 (GRC_LCLCTRL_GPIO_OE1 |
2274 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2275
2276 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2277 GRC_LCLCTRL_GPIO_OE1, 100);
2278
2279 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2280 (GRC_LCLCTRL_GPIO_OE1 |
2281 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2282 }
2283 }
2284 }
2285
2286 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2287 {
2288 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2289 return 1;
2290 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2291 if (speed != SPEED_10)
2292 return 1;
2293 } else if (speed == SPEED_10)
2294 return 1;
2295
2296 return 0;
2297 }
2298
2299 static int tg3_setup_phy(struct tg3 *, int);
2300
2301 #define RESET_KIND_SHUTDOWN 0
2302 #define RESET_KIND_INIT 1
2303 #define RESET_KIND_SUSPEND 2
2304
2305 static void tg3_write_sig_post_reset(struct tg3 *, int);
2306 static int tg3_halt_cpu(struct tg3 *, u32);
2307
2308 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2309 {
2310 u32 val;
2311
2312 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2313 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2314 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2315 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2316
2317 sg_dig_ctrl |=
2318 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2319 tw32(SG_DIG_CTRL, sg_dig_ctrl);
2320 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2321 }
2322 return;
2323 }
2324
2325 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2326 tg3_bmcr_reset(tp);
2327 val = tr32(GRC_MISC_CFG);
2328 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2329 udelay(40);
2330 return;
2331 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2332 u32 phytest;
2333 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2334 u32 phy;
2335
2336 tg3_writephy(tp, MII_ADVERTISE, 0);
2337 tg3_writephy(tp, MII_BMCR,
2338 BMCR_ANENABLE | BMCR_ANRESTART);
2339
2340 tg3_writephy(tp, MII_TG3_FET_TEST,
2341 phytest | MII_TG3_FET_SHADOW_EN);
2342 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2343 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2344 tg3_writephy(tp,
2345 MII_TG3_FET_SHDW_AUXMODE4,
2346 phy);
2347 }
2348 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2349 }
2350 return;
2351 } else if (do_low_power) {
2352 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2353 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2354
2355 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2356 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2357 MII_TG3_AUXCTL_PCTL_VREG_11V;
2358 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2359 }
2360
2361 /* The PHY should not be powered down on some chips because
2362 * of bugs.
2363 */
2364 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2365 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2366 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2367 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2368 return;
2369
2370 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2371 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2372 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2373 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2374 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2375 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2376 }
2377
2378 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2379 }
2380
2381 /* tp->lock is held. */
2382 static int tg3_nvram_lock(struct tg3 *tp)
2383 {
2384 if (tg3_flag(tp, NVRAM)) {
2385 int i;
2386
2387 if (tp->nvram_lock_cnt == 0) {
2388 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2389 for (i = 0; i < 8000; i++) {
2390 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2391 break;
2392 udelay(20);
2393 }
2394 if (i == 8000) {
2395 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2396 return -ENODEV;
2397 }
2398 }
2399 tp->nvram_lock_cnt++;
2400 }
2401 return 0;
2402 }
2403
2404 /* tp->lock is held. */
2405 static void tg3_nvram_unlock(struct tg3 *tp)
2406 {
2407 if (tg3_flag(tp, NVRAM)) {
2408 if (tp->nvram_lock_cnt > 0)
2409 tp->nvram_lock_cnt--;
2410 if (tp->nvram_lock_cnt == 0)
2411 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2412 }
2413 }
2414
2415 /* tp->lock is held. */
2416 static void tg3_enable_nvram_access(struct tg3 *tp)
2417 {
2418 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2419 u32 nvaccess = tr32(NVRAM_ACCESS);
2420
2421 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2422 }
2423 }
2424
2425 /* tp->lock is held. */
2426 static void tg3_disable_nvram_access(struct tg3 *tp)
2427 {
2428 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2429 u32 nvaccess = tr32(NVRAM_ACCESS);
2430
2431 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2432 }
2433 }
2434
2435 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2436 u32 offset, u32 *val)
2437 {
2438 u32 tmp;
2439 int i;
2440
2441 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2442 return -EINVAL;
2443
2444 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2445 EEPROM_ADDR_DEVID_MASK |
2446 EEPROM_ADDR_READ);
2447 tw32(GRC_EEPROM_ADDR,
2448 tmp |
2449 (0 << EEPROM_ADDR_DEVID_SHIFT) |
2450 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2451 EEPROM_ADDR_ADDR_MASK) |
2452 EEPROM_ADDR_READ | EEPROM_ADDR_START);
2453
2454 for (i = 0; i < 1000; i++) {
2455 tmp = tr32(GRC_EEPROM_ADDR);
2456
2457 if (tmp & EEPROM_ADDR_COMPLETE)
2458 break;
2459 msleep(1);
2460 }
2461 if (!(tmp & EEPROM_ADDR_COMPLETE))
2462 return -EBUSY;
2463
2464 tmp = tr32(GRC_EEPROM_DATA);
2465
2466 /*
2467 * The data will always be opposite the native endian
2468 * format. Perform a blind byteswap to compensate.
2469 */
2470 *val = swab32(tmp);
2471
2472 return 0;
2473 }
2474
2475 #define NVRAM_CMD_TIMEOUT 10000
2476
2477 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2478 {
2479 int i;
2480
2481 tw32(NVRAM_CMD, nvram_cmd);
2482 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2483 udelay(10);
2484 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2485 udelay(10);
2486 break;
2487 }
2488 }
2489
2490 if (i == NVRAM_CMD_TIMEOUT)
2491 return -EBUSY;
2492
2493 return 0;
2494 }
2495
2496 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2497 {
2498 if (tg3_flag(tp, NVRAM) &&
2499 tg3_flag(tp, NVRAM_BUFFERED) &&
2500 tg3_flag(tp, FLASH) &&
2501 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2502 (tp->nvram_jedecnum == JEDEC_ATMEL))
2503
2504 addr = ((addr / tp->nvram_pagesize) <<
2505 ATMEL_AT45DB0X1B_PAGE_POS) +
2506 (addr % tp->nvram_pagesize);
2507
2508 return addr;
2509 }
2510
2511 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2512 {
2513 if (tg3_flag(tp, NVRAM) &&
2514 tg3_flag(tp, NVRAM_BUFFERED) &&
2515 tg3_flag(tp, FLASH) &&
2516 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2517 (tp->nvram_jedecnum == JEDEC_ATMEL))
2518
2519 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2520 tp->nvram_pagesize) +
2521 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2522
2523 return addr;
2524 }
2525
2526 /* NOTE: Data read in from NVRAM is byteswapped according to
2527 * the byteswapping settings for all other register accesses.
2528 * tg3 devices are BE devices, so on a BE machine, the data
2529 * returned will be exactly as it is seen in NVRAM. On a LE
2530 * machine, the 32-bit value will be byteswapped.
2531 */
2532 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2533 {
2534 int ret;
2535
2536 if (!tg3_flag(tp, NVRAM))
2537 return tg3_nvram_read_using_eeprom(tp, offset, val);
2538
2539 offset = tg3_nvram_phys_addr(tp, offset);
2540
2541 if (offset > NVRAM_ADDR_MSK)
2542 return -EINVAL;
2543
2544 ret = tg3_nvram_lock(tp);
2545 if (ret)
2546 return ret;
2547
2548 tg3_enable_nvram_access(tp);
2549
2550 tw32(NVRAM_ADDR, offset);
2551 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2552 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2553
2554 if (ret == 0)
2555 *val = tr32(NVRAM_RDDATA);
2556
2557 tg3_disable_nvram_access(tp);
2558
2559 tg3_nvram_unlock(tp);
2560
2561 return ret;
2562 }
2563
2564 /* Ensures NVRAM data is in bytestream format. */
2565 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
2566 {
2567 u32 v;
2568 int res = tg3_nvram_read(tp, offset, &v);
2569 if (!res)
2570 *val = cpu_to_be32(v);
2571 return res;
2572 }
2573
2574 /* tp->lock is held. */
2575 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
2576 {
2577 u32 addr_high, addr_low;
2578 int i;
2579
2580 addr_high = ((tp->dev->dev_addr[0] << 8) |
2581 tp->dev->dev_addr[1]);
2582 addr_low = ((tp->dev->dev_addr[2] << 24) |
2583 (tp->dev->dev_addr[3] << 16) |
2584 (tp->dev->dev_addr[4] << 8) |
2585 (tp->dev->dev_addr[5] << 0));
2586 for (i = 0; i < 4; i++) {
2587 if (i == 1 && skip_mac_1)
2588 continue;
2589 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
2590 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
2591 }
2592
2593 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2594 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2595 for (i = 0; i < 12; i++) {
2596 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
2597 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
2598 }
2599 }
2600
2601 addr_high = (tp->dev->dev_addr[0] +
2602 tp->dev->dev_addr[1] +
2603 tp->dev->dev_addr[2] +
2604 tp->dev->dev_addr[3] +
2605 tp->dev->dev_addr[4] +
2606 tp->dev->dev_addr[5]) &
2607 TX_BACKOFF_SEED_MASK;
2608 tw32(MAC_TX_BACKOFF_SEED, addr_high);
2609 }
2610
2611 static void tg3_enable_register_access(struct tg3 *tp)
2612 {
2613 /*
2614 * Make sure register accesses (indirect or otherwise) will function
2615 * correctly.
2616 */
2617 pci_write_config_dword(tp->pdev,
2618 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
2619 }
2620
2621 static int tg3_power_up(struct tg3 *tp)
2622 {
2623 tg3_enable_register_access(tp);
2624
2625 pci_set_power_state(tp->pdev, PCI_D0);
2626
2627 /* Switch out of Vaux if it is a NIC */
2628 if (tg3_flag(tp, IS_NIC))
2629 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
2630
2631 return 0;
2632 }
2633
2634 static int tg3_power_down_prepare(struct tg3 *tp)
2635 {
2636 u32 misc_host_ctrl;
2637 bool device_should_wake, do_low_power;
2638
2639 tg3_enable_register_access(tp);
2640
2641 /* Restore the CLKREQ setting. */
2642 if (tg3_flag(tp, CLKREQ_BUG)) {
2643 u16 lnkctl;
2644
2645 pci_read_config_word(tp->pdev,
2646 tp->pcie_cap + PCI_EXP_LNKCTL,
2647 &lnkctl);
2648 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
2649 pci_write_config_word(tp->pdev,
2650 tp->pcie_cap + PCI_EXP_LNKCTL,
2651 lnkctl);
2652 }
2653
2654 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
2655 tw32(TG3PCI_MISC_HOST_CTRL,
2656 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
2657
2658 device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
2659 tg3_flag(tp, WOL_ENABLE);
2660
2661 if (tg3_flag(tp, USE_PHYLIB)) {
2662 do_low_power = false;
2663 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
2664 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
2665 struct phy_device *phydev;
2666 u32 phyid, advertising;
2667
2668 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2669
2670 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
2671
2672 tp->link_config.orig_speed = phydev->speed;
2673 tp->link_config.orig_duplex = phydev->duplex;
2674 tp->link_config.orig_autoneg = phydev->autoneg;
2675 tp->link_config.orig_advertising = phydev->advertising;
2676
2677 advertising = ADVERTISED_TP |
2678 ADVERTISED_Pause |
2679 ADVERTISED_Autoneg |
2680 ADVERTISED_10baseT_Half;
2681
2682 if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
2683 if (tg3_flag(tp, WOL_SPEED_100MB))
2684 advertising |=
2685 ADVERTISED_100baseT_Half |
2686 ADVERTISED_100baseT_Full |
2687 ADVERTISED_10baseT_Full;
2688 else
2689 advertising |= ADVERTISED_10baseT_Full;
2690 }
2691
2692 phydev->advertising = advertising;
2693
2694 phy_start_aneg(phydev);
2695
2696 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
2697 if (phyid != PHY_ID_BCMAC131) {
2698 phyid &= PHY_BCM_OUI_MASK;
2699 if (phyid == PHY_BCM_OUI_1 ||
2700 phyid == PHY_BCM_OUI_2 ||
2701 phyid == PHY_BCM_OUI_3)
2702 do_low_power = true;
2703 }
2704 }
2705 } else {
2706 do_low_power = true;
2707
2708 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
2709 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
2710 tp->link_config.orig_speed = tp->link_config.speed;
2711 tp->link_config.orig_duplex = tp->link_config.duplex;
2712 tp->link_config.orig_autoneg = tp->link_config.autoneg;
2713 }
2714
2715 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
2716 tp->link_config.speed = SPEED_10;
2717 tp->link_config.duplex = DUPLEX_HALF;
2718 tp->link_config.autoneg = AUTONEG_ENABLE;
2719 tg3_setup_phy(tp, 0);
2720 }
2721 }
2722
2723 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2724 u32 val;
2725
2726 val = tr32(GRC_VCPU_EXT_CTRL);
2727 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
2728 } else if (!tg3_flag(tp, ENABLE_ASF)) {
2729 int i;
2730 u32 val;
2731
2732 for (i = 0; i < 200; i++) {
2733 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
2734 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
2735 break;
2736 msleep(1);
2737 }
2738 }
2739 if (tg3_flag(tp, WOL_CAP))
2740 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
2741 WOL_DRV_STATE_SHUTDOWN |
2742 WOL_DRV_WOL |
2743 WOL_SET_MAGIC_PKT);
2744
2745 if (device_should_wake) {
2746 u32 mac_mode;
2747
2748 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
2749 if (do_low_power &&
2750 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
2751 tg3_phy_auxctl_write(tp,
2752 MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
2753 MII_TG3_AUXCTL_PCTL_WOL_EN |
2754 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2755 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
2756 udelay(40);
2757 }
2758
2759 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
2760 mac_mode = MAC_MODE_PORT_MODE_GMII;
2761 else
2762 mac_mode = MAC_MODE_PORT_MODE_MII;
2763
2764 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
2765 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2766 ASIC_REV_5700) {
2767 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
2768 SPEED_100 : SPEED_10;
2769 if (tg3_5700_link_polarity(tp, speed))
2770 mac_mode |= MAC_MODE_LINK_POLARITY;
2771 else
2772 mac_mode &= ~MAC_MODE_LINK_POLARITY;
2773 }
2774 } else {
2775 mac_mode = MAC_MODE_PORT_MODE_TBI;
2776 }
2777
2778 if (!tg3_flag(tp, 5750_PLUS))
2779 tw32(MAC_LED_CTRL, tp->led_ctrl);
2780
2781 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
2782 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
2783 (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
2784 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
2785
2786 if (tg3_flag(tp, ENABLE_APE))
2787 mac_mode |= MAC_MODE_APE_TX_EN |
2788 MAC_MODE_APE_RX_EN |
2789 MAC_MODE_TDE_ENABLE;
2790
2791 tw32_f(MAC_MODE, mac_mode);
2792 udelay(100);
2793
2794 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
2795 udelay(10);
2796 }
2797
2798 if (!tg3_flag(tp, WOL_SPEED_100MB) &&
2799 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2800 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
2801 u32 base_val;
2802
2803 base_val = tp->pci_clock_ctrl;
2804 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
2805 CLOCK_CTRL_TXCLK_DISABLE);
2806
2807 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
2808 CLOCK_CTRL_PWRDOWN_PLL133, 40);
2809 } else if (tg3_flag(tp, 5780_CLASS) ||
2810 tg3_flag(tp, CPMU_PRESENT) ||
2811 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2812 /* do nothing */
2813 } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
2814 u32 newbits1, newbits2;
2815
2816 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2817 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2818 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
2819 CLOCK_CTRL_TXCLK_DISABLE |
2820 CLOCK_CTRL_ALTCLK);
2821 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2822 } else if (tg3_flag(tp, 5705_PLUS)) {
2823 newbits1 = CLOCK_CTRL_625_CORE;
2824 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
2825 } else {
2826 newbits1 = CLOCK_CTRL_ALTCLK;
2827 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2828 }
2829
2830 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
2831 40);
2832
2833 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
2834 40);
2835
2836 if (!tg3_flag(tp, 5705_PLUS)) {
2837 u32 newbits3;
2838
2839 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2840 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2841 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
2842 CLOCK_CTRL_TXCLK_DISABLE |
2843 CLOCK_CTRL_44MHZ_CORE);
2844 } else {
2845 newbits3 = CLOCK_CTRL_44MHZ_CORE;
2846 }
2847
2848 tw32_wait_f(TG3PCI_CLOCK_CTRL,
2849 tp->pci_clock_ctrl | newbits3, 40);
2850 }
2851 }
2852
2853 if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
2854 tg3_power_down_phy(tp, do_low_power);
2855
2856 tg3_frob_aux_power(tp);
2857
2858 /* Workaround for unstable PLL clock */
2859 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
2860 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
2861 u32 val = tr32(0x7d00);
2862
2863 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
2864 tw32(0x7d00, val);
2865 if (!tg3_flag(tp, ENABLE_ASF)) {
2866 int err;
2867
2868 err = tg3_nvram_lock(tp);
2869 tg3_halt_cpu(tp, RX_CPU_BASE);
2870 if (!err)
2871 tg3_nvram_unlock(tp);
2872 }
2873 }
2874
2875 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
2876
2877 return 0;
2878 }
2879
2880 static void tg3_power_down(struct tg3 *tp)
2881 {
2882 tg3_power_down_prepare(tp);
2883
2884 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
2885 pci_set_power_state(tp->pdev, PCI_D3hot);
2886 }
2887
2888 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
2889 {
2890 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
2891 case MII_TG3_AUX_STAT_10HALF:
2892 *speed = SPEED_10;
2893 *duplex = DUPLEX_HALF;
2894 break;
2895
2896 case MII_TG3_AUX_STAT_10FULL:
2897 *speed = SPEED_10;
2898 *duplex = DUPLEX_FULL;
2899 break;
2900
2901 case MII_TG3_AUX_STAT_100HALF:
2902 *speed = SPEED_100;
2903 *duplex = DUPLEX_HALF;
2904 break;
2905
2906 case MII_TG3_AUX_STAT_100FULL:
2907 *speed = SPEED_100;
2908 *duplex = DUPLEX_FULL;
2909 break;
2910
2911 case MII_TG3_AUX_STAT_1000HALF:
2912 *speed = SPEED_1000;
2913 *duplex = DUPLEX_HALF;
2914 break;
2915
2916 case MII_TG3_AUX_STAT_1000FULL:
2917 *speed = SPEED_1000;
2918 *duplex = DUPLEX_FULL;
2919 break;
2920
2921 default:
2922 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2923 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
2924 SPEED_10;
2925 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
2926 DUPLEX_HALF;
2927 break;
2928 }
2929 *speed = SPEED_INVALID;
2930 *duplex = DUPLEX_INVALID;
2931 break;
2932 }
2933 }
2934
2935 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
2936 {
2937 int err = 0;
2938 u32 val, new_adv;
2939
2940 new_adv = ADVERTISE_CSMA;
2941 if (advertise & ADVERTISED_10baseT_Half)
2942 new_adv |= ADVERTISE_10HALF;
2943 if (advertise & ADVERTISED_10baseT_Full)
2944 new_adv |= ADVERTISE_10FULL;
2945 if (advertise & ADVERTISED_100baseT_Half)
2946 new_adv |= ADVERTISE_100HALF;
2947 if (advertise & ADVERTISED_100baseT_Full)
2948 new_adv |= ADVERTISE_100FULL;
2949
2950 new_adv |= tg3_advert_flowctrl_1000T(flowctrl);
2951
2952 err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
2953 if (err)
2954 goto done;
2955
2956 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
2957 goto done;
2958
2959 new_adv = 0;
2960 if (advertise & ADVERTISED_1000baseT_Half)
2961 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
2962 if (advertise & ADVERTISED_1000baseT_Full)
2963 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
2964
2965 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2966 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
2967 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2968 MII_TG3_CTRL_ENABLE_AS_MASTER);
2969
2970 err = tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2971 if (err)
2972 goto done;
2973
2974 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2975 goto done;
2976
2977 tw32(TG3_CPMU_EEE_MODE,
2978 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2979
2980 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
2981 if (!err) {
2982 u32 err2;
2983
2984 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
2985 case ASIC_REV_5717:
2986 case ASIC_REV_57765:
2987 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
2988 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
2989 MII_TG3_DSP_CH34TP2_HIBW01);
2990 /* Fall through */
2991 case ASIC_REV_5719:
2992 val = MII_TG3_DSP_TAP26_ALNOKO |
2993 MII_TG3_DSP_TAP26_RMRXSTO |
2994 MII_TG3_DSP_TAP26_OPCSINPT;
2995 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2996 }
2997
2998 val = 0;
2999 /* Advertise 100-BaseTX EEE ability */
3000 if (advertise & ADVERTISED_100baseT_Full)
3001 val |= MDIO_AN_EEE_ADV_100TX;
3002 /* Advertise 1000-BaseT EEE ability */
3003 if (advertise & ADVERTISED_1000baseT_Full)
3004 val |= MDIO_AN_EEE_ADV_1000T;
3005 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
3006
3007 err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
3008 if (!err)
3009 err = err2;
3010 }
3011
3012 done:
3013 return err;
3014 }
3015
3016 static void tg3_phy_copper_begin(struct tg3 *tp)
3017 {
3018 u32 new_adv;
3019 int i;
3020
3021 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
3022 new_adv = ADVERTISED_10baseT_Half |
3023 ADVERTISED_10baseT_Full;
3024 if (tg3_flag(tp, WOL_SPEED_100MB))
3025 new_adv |= ADVERTISED_100baseT_Half |
3026 ADVERTISED_100baseT_Full;
3027
3028 tg3_phy_autoneg_cfg(tp, new_adv,
3029 FLOW_CTRL_TX | FLOW_CTRL_RX);
3030 } else if (tp->link_config.speed == SPEED_INVALID) {
3031 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
3032 tp->link_config.advertising &=
3033 ~(ADVERTISED_1000baseT_Half |
3034 ADVERTISED_1000baseT_Full);
3035
3036 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
3037 tp->link_config.flowctrl);
3038 } else {
3039 /* Asking for a specific link mode. */
3040 if (tp->link_config.speed == SPEED_1000) {
3041 if (tp->link_config.duplex == DUPLEX_FULL)
3042 new_adv = ADVERTISED_1000baseT_Full;
3043 else
3044 new_adv = ADVERTISED_1000baseT_Half;
3045 } else if (tp->link_config.speed == SPEED_100) {
3046 if (tp->link_config.duplex == DUPLEX_FULL)
3047 new_adv = ADVERTISED_100baseT_Full;
3048 else
3049 new_adv = ADVERTISED_100baseT_Half;
3050 } else {
3051 if (tp->link_config.duplex == DUPLEX_FULL)
3052 new_adv = ADVERTISED_10baseT_Full;
3053 else
3054 new_adv = ADVERTISED_10baseT_Half;
3055 }
3056
3057 tg3_phy_autoneg_cfg(tp, new_adv,
3058 tp->link_config.flowctrl);
3059 }
3060
3061 if (tp->link_config.autoneg == AUTONEG_DISABLE &&
3062 tp->link_config.speed != SPEED_INVALID) {
3063 u32 bmcr, orig_bmcr;
3064
3065 tp->link_config.active_speed = tp->link_config.speed;
3066 tp->link_config.active_duplex = tp->link_config.duplex;
3067
3068 bmcr = 0;
3069 switch (tp->link_config.speed) {
3070 default:
3071 case SPEED_10:
3072 break;
3073
3074 case SPEED_100:
3075 bmcr |= BMCR_SPEED100;
3076 break;
3077
3078 case SPEED_1000:
3079 bmcr |= TG3_BMCR_SPEED1000;
3080 break;
3081 }
3082
3083 if (tp->link_config.duplex == DUPLEX_FULL)
3084 bmcr |= BMCR_FULLDPLX;
3085
3086 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
3087 (bmcr != orig_bmcr)) {
3088 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
3089 for (i = 0; i < 1500; i++) {
3090 u32 tmp;
3091
3092 udelay(10);
3093 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
3094 tg3_readphy(tp, MII_BMSR, &tmp))
3095 continue;
3096 if (!(tmp & BMSR_LSTATUS)) {
3097 udelay(40);
3098 break;
3099 }
3100 }
3101 tg3_writephy(tp, MII_BMCR, bmcr);
3102 udelay(40);
3103 }
3104 } else {
3105 tg3_writephy(tp, MII_BMCR,
3106 BMCR_ANENABLE | BMCR_ANRESTART);
3107 }
3108 }
3109
3110 static int tg3_init_5401phy_dsp(struct tg3 *tp)
3111 {
3112 int err;
3113
3114 /* Turn off tap power management. */
3115 /* Set Extended packet length bit */
3116 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
3117
3118 err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
3119 err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
3120 err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
3121 err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
3122 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
3123
3124 udelay(40);
3125
3126 return err;
3127 }
3128
3129 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
3130 {
3131 u32 adv_reg, all_mask = 0;
3132
3133 if (mask & ADVERTISED_10baseT_Half)
3134 all_mask |= ADVERTISE_10HALF;
3135 if (mask & ADVERTISED_10baseT_Full)
3136 all_mask |= ADVERTISE_10FULL;
3137 if (mask & ADVERTISED_100baseT_Half)
3138 all_mask |= ADVERTISE_100HALF;
3139 if (mask & ADVERTISED_100baseT_Full)
3140 all_mask |= ADVERTISE_100FULL;
3141
3142 if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
3143 return 0;
3144
3145 if ((adv_reg & all_mask) != all_mask)
3146 return 0;
3147 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3148 u32 tg3_ctrl;
3149
3150 all_mask = 0;
3151 if (mask & ADVERTISED_1000baseT_Half)
3152 all_mask |= ADVERTISE_1000HALF;
3153 if (mask & ADVERTISED_1000baseT_Full)
3154 all_mask |= ADVERTISE_1000FULL;
3155
3156 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
3157 return 0;
3158
3159 if ((tg3_ctrl & all_mask) != all_mask)
3160 return 0;
3161 }
3162 return 1;
3163 }
3164
3165 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
3166 {
3167 u32 curadv, reqadv;
3168
3169 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
3170 return 1;
3171
3172 curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
3173 reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
3174
3175 if (tp->link_config.active_duplex == DUPLEX_FULL) {
3176 if (curadv != reqadv)
3177 return 0;
3178
3179 if (tg3_flag(tp, PAUSE_AUTONEG))
3180 tg3_readphy(tp, MII_LPA, rmtadv);
3181 } else {
3182 /* Reprogram the advertisement register, even if it
3183 * does not affect the current link. If the link
3184 * gets renegotiated in the future, we can save an
3185 * additional renegotiation cycle by advertising
3186 * it correctly in the first place.
3187 */
3188 if (curadv != reqadv) {
3189 *lcladv &= ~(ADVERTISE_PAUSE_CAP |
3190 ADVERTISE_PAUSE_ASYM);
3191 tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
3192 }
3193 }
3194
3195 return 1;
3196 }
3197
3198 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
3199 {
3200 int current_link_up;
3201 u32 bmsr, val;
3202 u32 lcl_adv, rmt_adv;
3203 u16 current_speed;
3204 u8 current_duplex;
3205 int i, err;
3206
3207 tw32(MAC_EVENT, 0);
3208
3209 tw32_f(MAC_STATUS,
3210 (MAC_STATUS_SYNC_CHANGED |
3211 MAC_STATUS_CFG_CHANGED |
3212 MAC_STATUS_MI_COMPLETION |
3213 MAC_STATUS_LNKSTATE_CHANGED));
3214 udelay(40);
3215
3216 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
3217 tw32_f(MAC_MI_MODE,
3218 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
3219 udelay(80);
3220 }
3221
3222 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
3223
3224 /* Some third-party PHYs need to be reset on link going
3225 * down.
3226 */
3227 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3228 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
3229 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
3230 netif_carrier_ok(tp->dev)) {
3231 tg3_readphy(tp, MII_BMSR, &bmsr);
3232 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3233 !(bmsr & BMSR_LSTATUS))
3234 force_reset = 1;
3235 }
3236 if (force_reset)
3237 tg3_phy_reset(tp);
3238
3239 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
3240 tg3_readphy(tp, MII_BMSR, &bmsr);
3241 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
3242 !tg3_flag(tp, INIT_COMPLETE))
3243 bmsr = 0;
3244
3245 if (!(bmsr & BMSR_LSTATUS)) {
3246 err = tg3_init_5401phy_dsp(tp);
3247 if (err)
3248 return err;
3249
3250 tg3_readphy(tp, MII_BMSR, &bmsr);
3251 for (i = 0; i < 1000; i++) {
3252 udelay(10);
3253 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3254 (bmsr & BMSR_LSTATUS)) {
3255 udelay(40);
3256 break;
3257 }
3258 }
3259
3260 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
3261 TG3_PHY_REV_BCM5401_B0 &&
3262 !(bmsr & BMSR_LSTATUS) &&
3263 tp->link_config.active_speed == SPEED_1000) {
3264 err = tg3_phy_reset(tp);
3265 if (!err)
3266 err = tg3_init_5401phy_dsp(tp);
3267 if (err)
3268 return err;
3269 }
3270 }
3271 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3272 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
3273 /* 5701 {A0,B0} CRC bug workaround */
3274 tg3_writephy(tp, 0x15, 0x0a75);
3275 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3276 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
3277 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3278 }
3279
3280 /* Clear pending interrupts... */
3281 tg3_readphy(tp, MII_TG3_ISTAT, &val);
3282 tg3_readphy(tp, MII_TG3_ISTAT, &val);
3283
3284 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
3285 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
3286 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
3287 tg3_writephy(tp, MII_TG3_IMASK, ~0);
3288
3289 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3290 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3291 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
3292 tg3_writephy(tp, MII_TG3_EXT_CTRL,
3293 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
3294 else
3295 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
3296 }
3297
3298 current_link_up = 0;
3299 current_speed = SPEED_INVALID;
3300 current_duplex = DUPLEX_INVALID;
3301
3302 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
3303 err = tg3_phy_auxctl_read(tp,
3304 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3305 &val);
3306 if (!err && !(val & (1 << 10))) {
3307 tg3_phy_auxctl_write(tp,
3308 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3309 val | (1 << 10));
3310 goto relink;
3311 }
3312 }
3313
3314 bmsr = 0;
3315 for (i = 0; i < 100; i++) {
3316 tg3_readphy(tp, MII_BMSR, &bmsr);
3317 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3318 (bmsr & BMSR_LSTATUS))
3319 break;
3320 udelay(40);
3321 }
3322
3323 if (bmsr & BMSR_LSTATUS) {
3324 u32 aux_stat, bmcr;
3325
3326 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
3327 for (i = 0; i < 2000; i++) {
3328 udelay(10);
3329 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
3330 aux_stat)
3331 break;
3332 }
3333
3334 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
3335 &current_speed,
3336 &current_duplex);
3337
3338 bmcr = 0;
3339 for (i = 0; i < 200; i++) {
3340 tg3_readphy(tp, MII_BMCR, &bmcr);
3341 if (tg3_readphy(tp, MII_BMCR, &bmcr))
3342 continue;
3343 if (bmcr && bmcr != 0x7fff)
3344 break;
3345 udelay(10);
3346 }
3347
3348 lcl_adv = 0;
3349 rmt_adv = 0;
3350
3351 tp->link_config.active_speed = current_speed;
3352 tp->link_config.active_duplex = current_duplex;
3353
3354 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3355 if ((bmcr & BMCR_ANENABLE) &&
3356 tg3_copper_is_advertising_all(tp,
3357 tp->link_config.advertising)) {
3358 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
3359 &rmt_adv))
3360 current_link_up = 1;
3361 }
3362 } else {
3363 if (!(bmcr & BMCR_ANENABLE) &&
3364 tp->link_config.speed == current_speed &&
3365 tp->link_config.duplex == current_duplex &&
3366 tp->link_config.flowctrl ==
3367 tp->link_config.active_flowctrl) {
3368 current_link_up = 1;
3369 }
3370 }
3371
3372 if (current_link_up == 1 &&
3373 tp->link_config.active_duplex == DUPLEX_FULL)
3374 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
3375 }
3376
3377 relink:
3378 if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3379 tg3_phy_copper_begin(tp);
3380
3381 tg3_readphy(tp, MII_BMSR, &bmsr);
3382 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
3383 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
3384 current_link_up = 1;
3385 }
3386
3387 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
3388 if (current_link_up == 1) {
3389 if (tp->link_config.active_speed == SPEED_100 ||
3390 tp->link_config.active_speed == SPEED_10)
3391 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3392 else
3393 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3394 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
3395 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3396 else
3397 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3398
3399 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3400 if (tp->link_config.active_duplex == DUPLEX_HALF)
3401 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3402
3403 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
3404 if (current_link_up == 1 &&
3405 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
3406 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
3407 else
3408 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
3409 }
3410
3411 /* ??? Without this setting Netgear GA302T PHY does not
3412 * ??? send/receive packets...
3413 */
3414 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
3415 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
3416 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
3417 tw32_f(MAC_MI_MODE, tp->mi_mode);
3418 udelay(80);
3419 }
3420
3421 tw32_f(MAC_MODE, tp->mac_mode);
3422 udelay(40);
3423
3424 tg3_phy_eee_adjust(tp, current_link_up);
3425
3426 if (tg3_flag(tp, USE_LINKCHG_REG)) {
3427 /* Polled via timer. */
3428 tw32_f(MAC_EVENT, 0);
3429 } else {
3430 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3431 }
3432 udelay(40);
3433
3434 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
3435 current_link_up == 1 &&
3436 tp->link_config.active_speed == SPEED_1000 &&
3437 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
3438 udelay(120);
3439 tw32_f(MAC_STATUS,
3440 (MAC_STATUS_SYNC_CHANGED |
3441 MAC_STATUS_CFG_CHANGED));
3442 udelay(40);
3443 tg3_write_mem(tp,
3444 NIC_SRAM_FIRMWARE_MBOX,
3445 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
3446 }
3447
3448 /* Prevent send BD corruption. */
3449 if (tg3_flag(tp, CLKREQ_BUG)) {
3450 u16 oldlnkctl, newlnkctl;
3451
3452 pci_read_config_word(tp->pdev,
3453 tp->pcie_cap + PCI_EXP_LNKCTL,
3454 &oldlnkctl);
3455 if (tp->link_config.active_speed == SPEED_100 ||
3456 tp->link_config.active_speed == SPEED_10)
3457 newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
3458 else
3459 newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
3460 if (newlnkctl != oldlnkctl)
3461 pci_write_config_word(tp->pdev,
3462 tp->pcie_cap + PCI_EXP_LNKCTL,
3463 newlnkctl);
3464 }
3465
3466 if (current_link_up != netif_carrier_ok(tp->dev)) {
3467 if (current_link_up)
3468 netif_carrier_on(tp->dev);
3469 else
3470 netif_carrier_off(tp->dev);
3471 tg3_link_report(tp);
3472 }
3473
3474 return 0;
3475 }
3476
3477 struct tg3_fiber_aneginfo {
3478 int state;
3479 #define ANEG_STATE_UNKNOWN 0
3480 #define ANEG_STATE_AN_ENABLE 1
3481 #define ANEG_STATE_RESTART_INIT 2
3482 #define ANEG_STATE_RESTART 3
3483 #define ANEG_STATE_DISABLE_LINK_OK 4
3484 #define ANEG_STATE_ABILITY_DETECT_INIT 5
3485 #define ANEG_STATE_ABILITY_DETECT 6
3486 #define ANEG_STATE_ACK_DETECT_INIT 7
3487 #define ANEG_STATE_ACK_DETECT 8
3488 #define ANEG_STATE_COMPLETE_ACK_INIT 9
3489 #define ANEG_STATE_COMPLETE_ACK 10
3490 #define ANEG_STATE_IDLE_DETECT_INIT 11
3491 #define ANEG_STATE_IDLE_DETECT 12
3492 #define ANEG_STATE_LINK_OK 13
3493 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
3494 #define ANEG_STATE_NEXT_PAGE_WAIT 15
3495
3496 u32 flags;
3497 #define MR_AN_ENABLE 0x00000001
3498 #define MR_RESTART_AN 0x00000002
3499 #define MR_AN_COMPLETE 0x00000004
3500 #define MR_PAGE_RX 0x00000008
3501 #define MR_NP_LOADED 0x00000010
3502 #define MR_TOGGLE_TX 0x00000020
3503 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
3504 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
3505 #define MR_LP_ADV_SYM_PAUSE 0x00000100
3506 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
3507 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
3508 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
3509 #define MR_LP_ADV_NEXT_PAGE 0x00001000
3510 #define MR_TOGGLE_RX 0x00002000
3511 #define MR_NP_RX 0x00004000
3512
3513 #define MR_LINK_OK 0x80000000
3514
3515 unsigned long link_time, cur_time;
3516
3517 u32 ability_match_cfg;
3518 int ability_match_count;
3519
3520 char ability_match, idle_match, ack_match;
3521
3522 u32 txconfig, rxconfig;
3523 #define ANEG_CFG_NP 0x00000080
3524 #define ANEG_CFG_ACK 0x00000040
3525 #define ANEG_CFG_RF2 0x00000020
3526 #define ANEG_CFG_RF1 0x00000010
3527 #define ANEG_CFG_PS2 0x00000001
3528 #define ANEG_CFG_PS1 0x00008000
3529 #define ANEG_CFG_HD 0x00004000
3530 #define ANEG_CFG_FD 0x00002000
3531 #define ANEG_CFG_INVAL 0x00001f06
3532
3533 };
3534 #define ANEG_OK 0
3535 #define ANEG_DONE 1
3536 #define ANEG_TIMER_ENAB 2
3537 #define ANEG_FAILED -1
3538
3539 #define ANEG_STATE_SETTLE_TIME 10000
3540
3541 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
3542 struct tg3_fiber_aneginfo *ap)
3543 {
3544 u16 flowctrl;
3545 unsigned long delta;
3546 u32 rx_cfg_reg;
3547 int ret;
3548
3549 if (ap->state == ANEG_STATE_UNKNOWN) {
3550 ap->rxconfig = 0;
3551 ap->link_time = 0;
3552 ap->cur_time = 0;
3553 ap->ability_match_cfg = 0;
3554 ap->ability_match_count = 0;
3555 ap->ability_match = 0;
3556 ap->idle_match = 0;
3557 ap->ack_match = 0;
3558 }
3559 ap->cur_time++;
3560
3561 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
3562 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
3563
3564 if (rx_cfg_reg != ap->ability_match_cfg) {
3565 ap->ability_match_cfg = rx_cfg_reg;
3566 ap->ability_match = 0;
3567 ap->ability_match_count = 0;
3568 } else {
3569 if (++ap->ability_match_count > 1) {
3570 ap->ability_match = 1;
3571 ap->ability_match_cfg = rx_cfg_reg;
3572 }
3573 }
3574 if (rx_cfg_reg & ANEG_CFG_ACK)
3575 ap->ack_match = 1;
3576 else
3577 ap->ack_match = 0;
3578
3579 ap->idle_match = 0;
3580 } else {
3581 ap->idle_match = 1;
3582 ap->ability_match_cfg = 0;
3583 ap->ability_match_count = 0;
3584 ap->ability_match = 0;
3585 ap->ack_match = 0;
3586
3587 rx_cfg_reg = 0;
3588 }
3589
3590 ap->rxconfig = rx_cfg_reg;
3591 ret = ANEG_OK;
3592
3593 switch (ap->state) {
3594 case ANEG_STATE_UNKNOWN:
3595 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
3596 ap->state = ANEG_STATE_AN_ENABLE;
3597
3598 /* fallthru */
3599 case ANEG_STATE_AN_ENABLE:
3600 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
3601 if (ap->flags & MR_AN_ENABLE) {
3602 ap->link_time = 0;
3603 ap->cur_time = 0;
3604 ap->ability_match_cfg = 0;
3605 ap->ability_match_count = 0;
3606 ap->ability_match = 0;
3607 ap->idle_match = 0;
3608 ap->ack_match = 0;
3609
3610 ap->state = ANEG_STATE_RESTART_INIT;
3611 } else {
3612 ap->state = ANEG_STATE_DISABLE_LINK_OK;
3613 }
3614 break;
3615
3616 case ANEG_STATE_RESTART_INIT:
3617 ap->link_time = ap->cur_time;
3618 ap->flags &= ~(MR_NP_LOADED);
3619 ap->txconfig = 0;
3620 tw32(MAC_TX_AUTO_NEG, 0);
3621 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3622 tw32_f(MAC_MODE, tp->mac_mode);
3623 udelay(40);
3624
3625 ret = ANEG_TIMER_ENAB;
3626 ap->state = ANEG_STATE_RESTART;
3627
3628 /* fallthru */
3629 case ANEG_STATE_RESTART:
3630 delta = ap->cur_time - ap->link_time;
3631 if (delta > ANEG_STATE_SETTLE_TIME)
3632 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
3633 else
3634 ret = ANEG_TIMER_ENAB;
3635 break;
3636
3637 case ANEG_STATE_DISABLE_LINK_OK:
3638 ret = ANEG_DONE;
3639 break;
3640
3641 case ANEG_STATE_ABILITY_DETECT_INIT:
3642 ap->flags &= ~(MR_TOGGLE_TX);
3643 ap->txconfig = ANEG_CFG_FD;
3644 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3645 if (flowctrl & ADVERTISE_1000XPAUSE)
3646 ap->txconfig |= ANEG_CFG_PS1;
3647 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3648 ap->txconfig |= ANEG_CFG_PS2;
3649 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3650 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3651 tw32_f(MAC_MODE, tp->mac_mode);
3652 udelay(40);
3653
3654 ap->state = ANEG_STATE_ABILITY_DETECT;
3655 break;
3656
3657 case ANEG_STATE_ABILITY_DETECT:
3658 if (ap->ability_match != 0 && ap->rxconfig != 0)
3659 ap->state = ANEG_STATE_ACK_DETECT_INIT;
3660 break;
3661
3662 case ANEG_STATE_ACK_DETECT_INIT:
3663 ap->txconfig |= ANEG_CFG_ACK;
3664 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3665 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3666 tw32_f(MAC_MODE, tp->mac_mode);
3667 udelay(40);
3668
3669 ap->state = ANEG_STATE_ACK_DETECT;
3670
3671 /* fallthru */
3672 case ANEG_STATE_ACK_DETECT:
3673 if (ap->ack_match != 0) {
3674 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
3675 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
3676 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
3677 } else {
3678 ap->state = ANEG_STATE_AN_ENABLE;
3679 }
3680 } else if (ap->ability_match != 0 &&
3681 ap->rxconfig == 0) {
3682 ap->state = ANEG_STATE_AN_ENABLE;
3683 }
3684 break;
3685
3686 case ANEG_STATE_COMPLETE_ACK_INIT:
3687 if (ap->rxconfig & ANEG_CFG_INVAL) {
3688 ret = ANEG_FAILED;
3689 break;
3690 }
3691 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
3692 MR_LP_ADV_HALF_DUPLEX |
3693 MR_LP_ADV_SYM_PAUSE |
3694 MR_LP_ADV_ASYM_PAUSE |
3695 MR_LP_ADV_REMOTE_FAULT1 |
3696 MR_LP_ADV_REMOTE_FAULT2 |
3697 MR_LP_ADV_NEXT_PAGE |
3698 MR_TOGGLE_RX |
3699 MR_NP_RX);
3700 if (ap->rxconfig & ANEG_CFG_FD)
3701 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
3702 if (ap->rxconfig & ANEG_CFG_HD)
3703 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
3704 if (ap->rxconfig & ANEG_CFG_PS1)
3705 ap->flags |= MR_LP_ADV_SYM_PAUSE;
3706 if (ap->rxconfig & ANEG_CFG_PS2)
3707 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
3708 if (ap->rxconfig & ANEG_CFG_RF1)
3709 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
3710 if (ap->rxconfig & ANEG_CFG_RF2)
3711 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
3712 if (ap->rxconfig & ANEG_CFG_NP)
3713 ap->flags |= MR_LP_ADV_NEXT_PAGE;
3714
3715 ap->link_time = ap->cur_time;
3716
3717 ap->flags ^= (MR_TOGGLE_TX);
3718 if (ap->rxconfig & 0x0008)
3719 ap->flags |= MR_TOGGLE_RX;
3720 if (ap->rxconfig & ANEG_CFG_NP)
3721 ap->flags |= MR_NP_RX;
3722 ap->flags |= MR_PAGE_RX;
3723
3724 ap->state = ANEG_STATE_COMPLETE_ACK;
3725 ret = ANEG_TIMER_ENAB;
3726 break;
3727
3728 case ANEG_STATE_COMPLETE_ACK:
3729 if (ap->ability_match != 0 &&
3730 ap->rxconfig == 0) {
3731 ap->state = ANEG_STATE_AN_ENABLE;
3732 break;
3733 }
3734 delta = ap->cur_time - ap->link_time;
3735 if (delta > ANEG_STATE_SETTLE_TIME) {
3736 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
3737 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3738 } else {
3739 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
3740 !(ap->flags & MR_NP_RX)) {
3741 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3742 } else {
3743 ret = ANEG_FAILED;
3744 }
3745 }
3746 }
3747 break;
3748
3749 case ANEG_STATE_IDLE_DETECT_INIT:
3750 ap->link_time = ap->cur_time;
3751 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3752 tw32_f(MAC_MODE, tp->mac_mode);
3753 udelay(40);
3754
3755 ap->state = ANEG_STATE_IDLE_DETECT;
3756 ret = ANEG_TIMER_ENAB;
3757 break;
3758
3759 case ANEG_STATE_IDLE_DETECT:
3760 if (ap->ability_match != 0 &&
3761 ap->rxconfig == 0) {
3762 ap->state = ANEG_STATE_AN_ENABLE;
3763 break;
3764 }
3765 delta = ap->cur_time - ap->link_time;
3766 if (delta > ANEG_STATE_SETTLE_TIME) {
3767 /* XXX another gem from the Broadcom driver :( */
3768 ap->state = ANEG_STATE_LINK_OK;
3769 }
3770 break;
3771
3772 case ANEG_STATE_LINK_OK:
3773 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
3774 ret = ANEG_DONE;
3775 break;
3776
3777 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
3778 /* ??? unimplemented */
3779 break;
3780
3781 case ANEG_STATE_NEXT_PAGE_WAIT:
3782 /* ??? unimplemented */
3783 break;
3784
3785 default:
3786 ret = ANEG_FAILED;
3787 break;
3788 }
3789
3790 return ret;
3791 }
3792
3793 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
3794 {
3795 int res = 0;
3796 struct tg3_fiber_aneginfo aninfo;
3797 int status = ANEG_FAILED;
3798 unsigned int tick;
3799 u32 tmp;
3800
3801 tw32_f(MAC_TX_AUTO_NEG, 0);
3802
3803 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
3804 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
3805 udelay(40);
3806
3807 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
3808 udelay(40);
3809
3810 memset(&aninfo, 0, sizeof(aninfo));
3811 aninfo.flags |= MR_AN_ENABLE;
3812 aninfo.state = ANEG_STATE_UNKNOWN;
3813 aninfo.cur_time = 0;
3814 tick = 0;
3815 while (++tick < 195000) {
3816 status = tg3_fiber_aneg_smachine(tp, &aninfo);
3817 if (status == ANEG_DONE || status == ANEG_FAILED)
3818 break;
3819
3820 udelay(1);
3821 }
3822
3823 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3824 tw32_f(MAC_MODE, tp->mac_mode);
3825 udelay(40);
3826
3827 *txflags = aninfo.txconfig;
3828 *rxflags = aninfo.flags;
3829
3830 if (status == ANEG_DONE &&
3831 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
3832 MR_LP_ADV_FULL_DUPLEX)))
3833 res = 1;
3834
3835 return res;
3836 }
3837
3838 static void tg3_init_bcm8002(struct tg3 *tp)
3839 {
3840 u32 mac_status = tr32(MAC_STATUS);
3841 int i;
3842
3843 /* Reset when initting first time or we have a link. */
3844 if (tg3_flag(tp, INIT_COMPLETE) &&
3845 !(mac_status & MAC_STATUS_PCS_SYNCED))
3846 return;
3847
3848 /* Set PLL lock range. */
3849 tg3_writephy(tp, 0x16, 0x8007);
3850
3851 /* SW reset */
3852 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
3853
3854 /* Wait for reset to complete. */
3855 /* XXX schedule_timeout() ... */
3856 for (i = 0; i < 500; i++)
3857 udelay(10);
3858
3859 /* Config mode; select PMA/Ch 1 regs. */
3860 tg3_writephy(tp, 0x10, 0x8411);
3861
3862 /* Enable auto-lock and comdet, select txclk for tx. */
3863 tg3_writephy(tp, 0x11, 0x0a10);
3864
3865 tg3_writephy(tp, 0x18, 0x00a0);
3866 tg3_writephy(tp, 0x16, 0x41ff);
3867
3868 /* Assert and deassert POR. */
3869 tg3_writephy(tp, 0x13, 0x0400);
3870 udelay(40);
3871 tg3_writephy(tp, 0x13, 0x0000);
3872
3873 tg3_writephy(tp, 0x11, 0x0a50);
3874 udelay(40);
3875 tg3_writephy(tp, 0x11, 0x0a10);
3876
3877 /* Wait for signal to stabilize */
3878 /* XXX schedule_timeout() ... */
3879 for (i = 0; i < 15000; i++)
3880 udelay(10);
3881
3882 /* Deselect the channel register so we can read the PHYID
3883 * later.
3884 */
3885 tg3_writephy(tp, 0x10, 0x8011);
3886 }
3887
3888 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
3889 {
3890 u16 flowctrl;
3891 u32 sg_dig_ctrl, sg_dig_status;
3892 u32 serdes_cfg, expected_sg_dig_ctrl;
3893 int workaround, port_a;
3894 int current_link_up;
3895
3896 serdes_cfg = 0;
3897 expected_sg_dig_ctrl = 0;
3898 workaround = 0;
3899 port_a = 1;
3900 current_link_up = 0;
3901
3902 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
3903 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
3904 workaround = 1;
3905 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
3906 port_a = 0;
3907
3908 /* preserve bits 0-11,13,14 for signal pre-emphasis */
3909 /* preserve bits 20-23 for voltage regulator */
3910 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
3911 }
3912
3913 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3914
3915 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
3916 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
3917 if (workaround) {
3918 u32 val = serdes_cfg;
3919
3920 if (port_a)
3921 val |= 0xc010000;
3922 else
3923 val |= 0x4010000;
3924 tw32_f(MAC_SERDES_CFG, val);
3925 }
3926
3927 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3928 }
3929 if (mac_status & MAC_STATUS_PCS_SYNCED) {
3930 tg3_setup_flow_control(tp, 0, 0);
3931 current_link_up = 1;
3932 }
3933 goto out;
3934 }
3935
3936 /* Want auto-negotiation. */
3937 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
3938
3939 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3940 if (flowctrl & ADVERTISE_1000XPAUSE)
3941 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
3942 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3943 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
3944
3945 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
3946 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
3947 tp->serdes_counter &&
3948 ((mac_status & (MAC_STATUS_PCS_SYNCED |
3949 MAC_STATUS_RCVD_CFG)) ==
3950 MAC_STATUS_PCS_SYNCED)) {
3951 tp->serdes_counter--;
3952 current_link_up = 1;
3953 goto out;
3954 }
3955 restart_autoneg:
3956 if (workaround)
3957 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
3958 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
3959 udelay(5);
3960 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
3961
3962 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3963 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
3964 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
3965 MAC_STATUS_SIGNAL_DET)) {
3966 sg_dig_status = tr32(SG_DIG_STATUS);
3967 mac_status = tr32(MAC_STATUS);
3968
3969 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
3970 (mac_status & MAC_STATUS_PCS_SYNCED)) {
3971 u32 local_adv = 0, remote_adv = 0;
3972
3973 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
3974 local_adv |= ADVERTISE_1000XPAUSE;
3975 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
3976 local_adv |= ADVERTISE_1000XPSE_ASYM;
3977
3978 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
3979 remote_adv |= LPA_1000XPAUSE;
3980 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
3981 remote_adv |= LPA_1000XPAUSE_ASYM;
3982
3983 tg3_setup_flow_control(tp, local_adv, remote_adv);
3984 current_link_up = 1;
3985 tp->serdes_counter = 0;
3986 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
3987 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
3988 if (tp->serdes_counter)
3989 tp->serdes_counter--;
3990 else {
3991 if (workaround) {
3992 u32 val = serdes_cfg;
3993
3994 if (port_a)
3995 val |= 0xc010000;
3996 else
3997 val |= 0x4010000;
3998
3999 tw32_f(MAC_SERDES_CFG, val);
4000 }
4001
4002 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4003 udelay(40);
4004
4005 /* Link parallel detection - link is up */
4006 /* only if we have PCS_SYNC and not */
4007 /* receiving config code words */
4008 mac_status = tr32(MAC_STATUS);
4009 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
4010 !(mac_status & MAC_STATUS_RCVD_CFG)) {
4011 tg3_setup_flow_control(tp, 0, 0);
4012 current_link_up = 1;
4013 tp->phy_flags |=
4014 TG3_PHYFLG_PARALLEL_DETECT;
4015 tp->serdes_counter =
4016 SERDES_PARALLEL_DET_TIMEOUT;
4017 } else
4018 goto restart_autoneg;
4019 }
4020 }
4021 } else {
4022 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4023 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4024 }
4025
4026 out:
4027 return current_link_up;
4028 }
4029
4030 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
4031 {
4032 int current_link_up = 0;
4033
4034 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
4035 goto out;
4036
4037 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4038 u32 txflags, rxflags;
4039 int i;
4040
4041 if (fiber_autoneg(tp, &txflags, &rxflags)) {
4042 u32 local_adv = 0, remote_adv = 0;
4043
4044 if (txflags & ANEG_CFG_PS1)
4045 local_adv |= ADVERTISE_1000XPAUSE;
4046 if (txflags & ANEG_CFG_PS2)
4047 local_adv |= ADVERTISE_1000XPSE_ASYM;
4048
4049 if (rxflags & MR_LP_ADV_SYM_PAUSE)
4050 remote_adv |= LPA_1000XPAUSE;
4051 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
4052 remote_adv |= LPA_1000XPAUSE_ASYM;
4053
4054 tg3_setup_flow_control(tp, local_adv, remote_adv);
4055
4056 current_link_up = 1;
4057 }
4058 for (i = 0; i < 30; i++) {
4059 udelay(20);
4060 tw32_f(MAC_STATUS,
4061 (MAC_STATUS_SYNC_CHANGED |
4062 MAC_STATUS_CFG_CHANGED));
4063 udelay(40);
4064 if ((tr32(MAC_STATUS) &
4065 (MAC_STATUS_SYNC_CHANGED |
4066 MAC_STATUS_CFG_CHANGED)) == 0)
4067 break;
4068 }
4069
4070 mac_status = tr32(MAC_STATUS);
4071 if (current_link_up == 0 &&
4072 (mac_status & MAC_STATUS_PCS_SYNCED) &&
4073 !(mac_status & MAC_STATUS_RCVD_CFG))
4074 current_link_up = 1;
4075 } else {
4076 tg3_setup_flow_control(tp, 0, 0);
4077
4078 /* Forcing 1000FD link up. */
4079 current_link_up = 1;
4080
4081 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
4082 udelay(40);
4083
4084 tw32_f(MAC_MODE, tp->mac_mode);
4085 udelay(40);
4086 }
4087
4088 out:
4089 return current_link_up;
4090 }
4091
4092 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
4093 {
4094 u32 orig_pause_cfg;
4095 u16 orig_active_speed;
4096 u8 orig_active_duplex;
4097 u32 mac_status;
4098 int current_link_up;
4099 int i;
4100
4101 orig_pause_cfg = tp->link_config.active_flowctrl;
4102 orig_active_speed = tp->link_config.active_speed;
4103 orig_active_duplex = tp->link_config.active_duplex;
4104
4105 if (!tg3_flag(tp, HW_AUTONEG) &&
4106 netif_carrier_ok(tp->dev) &&
4107 tg3_flag(tp, INIT_COMPLETE)) {
4108 mac_status = tr32(MAC_STATUS);
4109 mac_status &= (MAC_STATUS_PCS_SYNCED |
4110 MAC_STATUS_SIGNAL_DET |
4111 MAC_STATUS_CFG_CHANGED |
4112 MAC_STATUS_RCVD_CFG);
4113 if (mac_status == (MAC_STATUS_PCS_SYNCED |
4114 MAC_STATUS_SIGNAL_DET)) {
4115 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4116 MAC_STATUS_CFG_CHANGED));
4117 return 0;
4118 }
4119 }
4120
4121 tw32_f(MAC_TX_AUTO_NEG, 0);
4122
4123 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
4124 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
4125 tw32_f(MAC_MODE, tp->mac_mode);
4126 udelay(40);
4127
4128 if (tp->phy_id == TG3_PHY_ID_BCM8002)
4129 tg3_init_bcm8002(tp);
4130
4131 /* Enable link change event even when serdes polling. */
4132 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4133 udelay(40);
4134
4135 current_link_up = 0;
4136 mac_status = tr32(MAC_STATUS);
4137
4138 if (tg3_flag(tp, HW_AUTONEG))
4139 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
4140 else
4141 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
4142
4143 tp->napi[0].hw_status->status =
4144 (SD_STATUS_UPDATED |
4145 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
4146
4147 for (i = 0; i < 100; i++) {
4148 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4149 MAC_STATUS_CFG_CHANGED));
4150 udelay(5);
4151 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
4152 MAC_STATUS_CFG_CHANGED |
4153 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
4154 break;
4155 }
4156
4157 mac_status = tr32(MAC_STATUS);
4158 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
4159 current_link_up = 0;
4160 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
4161 tp->serdes_counter == 0) {
4162 tw32_f(MAC_MODE, (tp->mac_mode |
4163 MAC_MODE_SEND_CONFIGS));
4164 udelay(1);
4165 tw32_f(MAC_MODE, tp->mac_mode);
4166 }
4167 }
4168
4169 if (current_link_up == 1) {
4170 tp->link_config.active_speed = SPEED_1000;
4171 tp->link_config.active_duplex = DUPLEX_FULL;
4172 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4173 LED_CTRL_LNKLED_OVERRIDE |
4174 LED_CTRL_1000MBPS_ON));
4175 } else {
4176 tp->link_config.active_speed = SPEED_INVALID;
4177 tp->link_config.active_duplex = DUPLEX_INVALID;
4178 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4179 LED_CTRL_LNKLED_OVERRIDE |
4180 LED_CTRL_TRAFFIC_OVERRIDE));
4181 }
4182
4183 if (current_link_up != netif_carrier_ok(tp->dev)) {
4184 if (current_link_up)
4185 netif_carrier_on(tp->dev);
4186 else
4187 netif_carrier_off(tp->dev);
4188 tg3_link_report(tp);
4189 } else {
4190 u32 now_pause_cfg = tp->link_config.active_flowctrl;
4191 if (orig_pause_cfg != now_pause_cfg ||
4192 orig_active_speed != tp->link_config.active_speed ||
4193 orig_active_duplex != tp->link_config.active_duplex)
4194 tg3_link_report(tp);
4195 }
4196
4197 return 0;
4198 }
4199
4200 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
4201 {
4202 int current_link_up, err = 0;
4203 u32 bmsr, bmcr;
4204 u16 current_speed;
4205 u8 current_duplex;
4206 u32 local_adv, remote_adv;
4207
4208 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4209 tw32_f(MAC_MODE, tp->mac_mode);
4210 udelay(40);
4211
4212 tw32(MAC_EVENT, 0);
4213
4214 tw32_f(MAC_STATUS,
4215 (MAC_STATUS_SYNC_CHANGED |
4216 MAC_STATUS_CFG_CHANGED |
4217 MAC_STATUS_MI_COMPLETION |
4218 MAC_STATUS_LNKSTATE_CHANGED));
4219 udelay(40);
4220
4221 if (force_reset)
4222 tg3_phy_reset(tp);
4223
4224 current_link_up = 0;
4225 current_speed = SPEED_INVALID;
4226 current_duplex = DUPLEX_INVALID;
4227
4228 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4229 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4230 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
4231 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4232 bmsr |= BMSR_LSTATUS;
4233 else
4234 bmsr &= ~BMSR_LSTATUS;
4235 }
4236
4237 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
4238
4239 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
4240 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4241 /* do nothing, just check for link up at the end */
4242 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4243 u32 adv, new_adv;
4244
4245 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4246 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
4247 ADVERTISE_1000XPAUSE |
4248 ADVERTISE_1000XPSE_ASYM |
4249 ADVERTISE_SLCT);
4250
4251 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4252
4253 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
4254 new_adv |= ADVERTISE_1000XHALF;
4255 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
4256 new_adv |= ADVERTISE_1000XFULL;
4257
4258 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
4259 tg3_writephy(tp, MII_ADVERTISE, new_adv);
4260 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
4261 tg3_writephy(tp, MII_BMCR, bmcr);
4262
4263 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4264 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
4265 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4266
4267 return err;
4268 }
4269 } else {
4270 u32 new_bmcr;
4271
4272 bmcr &= ~BMCR_SPEED1000;
4273 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
4274
4275 if (tp->link_config.duplex == DUPLEX_FULL)
4276 new_bmcr |= BMCR_FULLDPLX;
4277
4278 if (new_bmcr != bmcr) {
4279 /* BMCR_SPEED1000 is a reserved bit that needs
4280 * to be set on write.
4281 */
4282 new_bmcr |= BMCR_SPEED1000;
4283
4284 /* Force a linkdown */
4285 if (netif_carrier_ok(tp->dev)) {
4286 u32 adv;
4287
4288 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4289 adv &= ~(ADVERTISE_1000XFULL |
4290 ADVERTISE_1000XHALF |
4291 ADVERTISE_SLCT);
4292 tg3_writephy(tp, MII_ADVERTISE, adv);
4293 tg3_writephy(tp, MII_BMCR, bmcr |
4294 BMCR_ANRESTART |
4295 BMCR_ANENABLE);
4296 udelay(10);
4297 netif_carrier_off(tp->dev);
4298 }
4299 tg3_writephy(tp, MII_BMCR, new_bmcr);
4300 bmcr = new_bmcr;
4301 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4302 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4303 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
4304 ASIC_REV_5714) {
4305 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4306 bmsr |= BMSR_LSTATUS;
4307 else
4308 bmsr &= ~BMSR_LSTATUS;
4309 }
4310 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4311 }
4312 }
4313
4314 if (bmsr & BMSR_LSTATUS) {
4315 current_speed = SPEED_1000;
4316 current_link_up = 1;
4317 if (bmcr & BMCR_FULLDPLX)
4318 current_duplex = DUPLEX_FULL;
4319 else
4320 current_duplex = DUPLEX_HALF;
4321
4322 local_adv = 0;
4323 remote_adv = 0;
4324
4325 if (bmcr & BMCR_ANENABLE) {
4326 u32 common;
4327
4328 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
4329 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
4330 common = local_adv & remote_adv;
4331 if (common & (ADVERTISE_1000XHALF |
4332 ADVERTISE_1000XFULL)) {
4333 if (common & ADVERTISE_1000XFULL)
4334 current_duplex = DUPLEX_FULL;
4335 else
4336 current_duplex = DUPLEX_HALF;
4337 } else if (!tg3_flag(tp, 5780_CLASS)) {
4338 /* Link is up via parallel detect */
4339 } else {
4340 current_link_up = 0;
4341 }
4342 }
4343 }
4344
4345 if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
4346 tg3_setup_flow_control(tp, local_adv, remote_adv);
4347
4348 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4349 if (tp->link_config.active_duplex == DUPLEX_HALF)
4350 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4351
4352 tw32_f(MAC_MODE, tp->mac_mode);
4353 udelay(40);
4354
4355 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4356
4357 tp->link_config.active_speed = current_speed;
4358 tp->link_config.active_duplex = current_duplex;
4359
4360 if (current_link_up != netif_carrier_ok(tp->dev)) {
4361 if (current_link_up)
4362 netif_carrier_on(tp->dev);
4363 else {
4364 netif_carrier_off(tp->dev);
4365 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4366 }
4367 tg3_link_report(tp);
4368 }
4369 return err;
4370 }
4371
4372 static void tg3_serdes_parallel_detect(struct tg3 *tp)
4373 {
4374 if (tp->serdes_counter) {
4375 /* Give autoneg time to complete. */
4376 tp->serdes_counter--;
4377 return;
4378 }
4379
4380 if (!netif_carrier_ok(tp->dev) &&
4381 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
4382 u32 bmcr;
4383
4384 tg3_readphy(tp, MII_BMCR, &bmcr);
4385 if (bmcr & BMCR_ANENABLE) {
4386 u32 phy1, phy2;
4387
4388 /* Select shadow register 0x1f */
4389 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
4390 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
4391
4392 /* Select expansion interrupt status register */
4393 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
4394 MII_TG3_DSP_EXP1_INT_STAT);
4395 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4396 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4397
4398 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
4399 /* We have signal detect and not receiving
4400 * config code words, link is up by parallel
4401 * detection.
4402 */
4403
4404 bmcr &= ~BMCR_ANENABLE;
4405 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4406 tg3_writephy(tp, MII_BMCR, bmcr);
4407 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
4408 }
4409 }
4410 } else if (netif_carrier_ok(tp->dev) &&
4411 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
4412 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4413 u32 phy2;
4414
4415 /* Select expansion interrupt status register */
4416 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
4417 MII_TG3_DSP_EXP1_INT_STAT);
4418 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4419 if (phy2 & 0x20) {
4420 u32 bmcr;
4421
4422 /* Config code words received, turn on autoneg. */
4423 tg3_readphy(tp, MII_BMCR, &bmcr);
4424 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
4425
4426 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4427
4428 }
4429 }
4430 }
4431
4432 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
4433 {
4434 u32 val;
4435 int err;
4436
4437 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
4438 err = tg3_setup_fiber_phy(tp, force_reset);
4439 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4440 err = tg3_setup_fiber_mii_phy(tp, force_reset);
4441 else
4442 err = tg3_setup_copper_phy(tp, force_reset);
4443
4444 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
4445 u32 scale;
4446
4447 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
4448 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
4449 scale = 65;
4450 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
4451 scale = 6;
4452 else
4453 scale = 12;
4454
4455 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
4456 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
4457 tw32(GRC_MISC_CFG, val);
4458 }
4459
4460 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
4461 (6 << TX_LENGTHS_IPG_SHIFT);
4462 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
4463 val |= tr32(MAC_TX_LENGTHS) &
4464 (TX_LENGTHS_JMB_FRM_LEN_MSK |
4465 TX_LENGTHS_CNT_DWN_VAL_MSK);
4466
4467 if (tp->link_config.active_speed == SPEED_1000 &&
4468 tp->link_config.active_duplex == DUPLEX_HALF)
4469 tw32(MAC_TX_LENGTHS, val |
4470 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
4471 else
4472 tw32(MAC_TX_LENGTHS, val |
4473 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
4474
4475 if (!tg3_flag(tp, 5705_PLUS)) {
4476 if (netif_carrier_ok(tp->dev)) {
4477 tw32(HOSTCC_STAT_COAL_TICKS,
4478 tp->coal.stats_block_coalesce_usecs);
4479 } else {
4480 tw32(HOSTCC_STAT_COAL_TICKS, 0);
4481 }
4482 }
4483
4484 if (tg3_flag(tp, ASPM_WORKAROUND)) {
4485 val = tr32(PCIE_PWR_MGMT_THRESH);
4486 if (!netif_carrier_ok(tp->dev))
4487 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
4488 tp->pwrmgmt_thresh;
4489 else
4490 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
4491 tw32(PCIE_PWR_MGMT_THRESH, val);
4492 }
4493
4494 return err;
4495 }
4496
4497 static inline int tg3_irq_sync(struct tg3 *tp)
4498 {
4499 return tp->irq_sync;
4500 }
4501
4502 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
4503 {
4504 int i;
4505
4506 dst = (u32 *)((u8 *)dst + off);
4507 for (i = 0; i < len; i += sizeof(u32))
4508 *dst++ = tr32(off + i);
4509 }
4510
4511 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
4512 {
4513 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
4514 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
4515 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
4516 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
4517 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
4518 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
4519 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
4520 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
4521 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
4522 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
4523 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
4524 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
4525 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
4526 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
4527 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
4528 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
4529 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
4530 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
4531 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
4532
4533 if (tg3_flag(tp, SUPPORT_MSIX))
4534 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
4535
4536 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
4537 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
4538 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
4539 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
4540 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
4541 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
4542 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
4543 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
4544
4545 if (!tg3_flag(tp, 5705_PLUS)) {
4546 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
4547 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
4548 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
4549 }
4550
4551 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
4552 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
4553 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
4554 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
4555 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
4556
4557 if (tg3_flag(tp, NVRAM))
4558 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
4559 }
4560
4561 static void tg3_dump_state(struct tg3 *tp)
4562 {
4563 int i;
4564 u32 *regs;
4565
4566 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
4567 if (!regs) {
4568 netdev_err(tp->dev, "Failed allocating register dump buffer\n");
4569 return;
4570 }
4571
4572 if (tg3_flag(tp, PCI_EXPRESS)) {
4573 /* Read up to but not including private PCI registers */
4574 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
4575 regs[i / sizeof(u32)] = tr32(i);
4576 } else
4577 tg3_dump_legacy_regs(tp, regs);
4578
4579 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
4580 if (!regs[i + 0] && !regs[i + 1] &&
4581 !regs[i + 2] && !regs[i + 3])
4582 continue;
4583
4584 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
4585 i * 4,
4586 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
4587 }
4588
4589 kfree(regs);
4590
4591 for (i = 0; i < tp->irq_cnt; i++) {
4592 struct tg3_napi *tnapi = &tp->napi[i];
4593
4594 /* SW status block */
4595 netdev_err(tp->dev,
4596 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
4597 i,
4598 tnapi->hw_status->status,
4599 tnapi->hw_status->status_tag,
4600 tnapi->hw_status->rx_jumbo_consumer,
4601 tnapi->hw_status->rx_consumer,
4602 tnapi->hw_status->rx_mini_consumer,
4603 tnapi->hw_status->idx[0].rx_producer,
4604 tnapi->hw_status->idx[0].tx_consumer);
4605
4606 netdev_err(tp->dev,
4607 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
4608 i,
4609 tnapi->last_tag, tnapi->last_irq_tag,
4610 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
4611 tnapi->rx_rcb_ptr,
4612 tnapi->prodring.rx_std_prod_idx,
4613 tnapi->prodring.rx_std_cons_idx,
4614 tnapi->prodring.rx_jmb_prod_idx,
4615 tnapi->prodring.rx_jmb_cons_idx);
4616 }
4617 }
4618
4619 /* This is called whenever we suspect that the system chipset is re-
4620 * ordering the sequence of MMIO to the tx send mailbox. The symptom
4621 * is bogus tx completions. We try to recover by setting the
4622 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
4623 * in the workqueue.
4624 */
4625 static void tg3_tx_recover(struct tg3 *tp)
4626 {
4627 BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
4628 tp->write32_tx_mbox == tg3_write_indirect_mbox);
4629
4630 netdev_warn(tp->dev,
4631 "The system may be re-ordering memory-mapped I/O "
4632 "cycles to the network device, attempting to recover. "
4633 "Please report the problem to the driver maintainer "
4634 "and include system chipset information.\n");
4635
4636 spin_lock(&tp->lock);
4637 tg3_flag_set(tp, TX_RECOVERY_PENDING);
4638 spin_unlock(&tp->lock);
4639 }
4640
4641 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
4642 {
4643 /* Tell compiler to fetch tx indices from memory. */
4644 barrier();
4645 return tnapi->tx_pending -
4646 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
4647 }
4648
4649 /* Tigon3 never reports partial packet sends. So we do not
4650 * need special logic to handle SKBs that have not had all
4651 * of their frags sent yet, like SunGEM does.
4652 */
4653 static void tg3_tx(struct tg3_napi *tnapi)
4654 {
4655 struct tg3 *tp = tnapi->tp;
4656 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
4657 u32 sw_idx = tnapi->tx_cons;
4658 struct netdev_queue *txq;
4659 int index = tnapi - tp->napi;
4660
4661 if (tg3_flag(tp, ENABLE_TSS))
4662 index--;
4663
4664 txq = netdev_get_tx_queue(tp->dev, index);
4665
4666 while (sw_idx != hw_idx) {
4667 struct ring_info *ri = &tnapi->tx_buffers[sw_idx];
4668 struct sk_buff *skb = ri->skb;
4669 int i, tx_bug = 0;
4670
4671 if (unlikely(skb == NULL)) {
4672 tg3_tx_recover(tp);
4673 return;
4674 }
4675
4676 pci_unmap_single(tp->pdev,
4677 dma_unmap_addr(ri, mapping),
4678 skb_headlen(skb),
4679 PCI_DMA_TODEVICE);
4680
4681 ri->skb = NULL;
4682
4683 sw_idx = NEXT_TX(sw_idx);
4684
4685 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4686 ri = &tnapi->tx_buffers[sw_idx];
4687 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
4688 tx_bug = 1;
4689
4690 pci_unmap_page(tp->pdev,
4691 dma_unmap_addr(ri, mapping),
4692 skb_shinfo(skb)->frags[i].size,
4693 PCI_DMA_TODEVICE);
4694 sw_idx = NEXT_TX(sw_idx);
4695 }
4696
4697 dev_kfree_skb(skb);
4698
4699 if (unlikely(tx_bug)) {
4700 tg3_tx_recover(tp);
4701 return;
4702 }
4703 }
4704
4705 tnapi->tx_cons = sw_idx;
4706
4707 /* Need to make the tx_cons update visible to tg3_start_xmit()
4708 * before checking for netif_queue_stopped(). Without the
4709 * memory barrier, there is a small possibility that tg3_start_xmit()
4710 * will miss it and cause the queue to be stopped forever.
4711 */
4712 smp_mb();
4713
4714 if (unlikely(netif_tx_queue_stopped(txq) &&
4715 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
4716 __netif_tx_lock(txq, smp_processor_id());
4717 if (netif_tx_queue_stopped(txq) &&
4718 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
4719 netif_tx_wake_queue(txq);
4720 __netif_tx_unlock(txq);
4721 }
4722 }
4723
4724 static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
4725 {
4726 if (!ri->skb)
4727 return;
4728
4729 pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
4730 map_sz, PCI_DMA_FROMDEVICE);
4731 dev_kfree_skb_any(ri->skb);
4732 ri->skb = NULL;
4733 }
4734
4735 /* Returns size of skb allocated or < 0 on error.
4736 *
4737 * We only need to fill in the address because the other members
4738 * of the RX descriptor are invariant, see tg3_init_rings.
4739 *
4740 * Note the purposeful assymetry of cpu vs. chip accesses. For
4741 * posting buffers we only dirty the first cache line of the RX
4742 * descriptor (containing the address). Whereas for the RX status
4743 * buffers the cpu only reads the last cacheline of the RX descriptor
4744 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
4745 */
4746 static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
4747 u32 opaque_key, u32 dest_idx_unmasked)
4748 {
4749 struct tg3_rx_buffer_desc *desc;
4750 struct ring_info *map;
4751 struct sk_buff *skb;
4752 dma_addr_t mapping;
4753 int skb_size, dest_idx;
4754
4755 switch (opaque_key) {
4756 case RXD_OPAQUE_RING_STD:
4757 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
4758 desc = &tpr->rx_std[dest_idx];
4759 map = &tpr->rx_std_buffers[dest_idx];
4760 skb_size = tp->rx_pkt_map_sz;
4761 break;
4762
4763 case RXD_OPAQUE_RING_JUMBO:
4764 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
4765 desc = &tpr->rx_jmb[dest_idx].std;
4766 map = &tpr->rx_jmb_buffers[dest_idx];
4767 skb_size = TG3_RX_JMB_MAP_SZ;
4768 break;
4769
4770 default:
4771 return -EINVAL;
4772 }
4773
4774 /* Do not overwrite any of the map or rp information
4775 * until we are sure we can commit to a new buffer.
4776 *
4777 * Callers depend upon this behavior and assume that
4778 * we leave everything unchanged if we fail.
4779 */
4780 skb = netdev_alloc_skb(tp->dev, skb_size + tp->rx_offset);
4781 if (skb == NULL)
4782 return -ENOMEM;
4783
4784 skb_reserve(skb, tp->rx_offset);
4785
4786 mapping = pci_map_single(tp->pdev, skb->data, skb_size,
4787 PCI_DMA_FROMDEVICE);
4788 if (pci_dma_mapping_error(tp->pdev, mapping)) {
4789 dev_kfree_skb(skb);
4790 return -EIO;
4791 }
4792
4793 map->skb = skb;
4794 dma_unmap_addr_set(map, mapping, mapping);
4795
4796 desc->addr_hi = ((u64)mapping >> 32);
4797 desc->addr_lo = ((u64)mapping & 0xffffffff);
4798
4799 return skb_size;
4800 }
4801
4802 /* We only need to move over in the address because the other
4803 * members of the RX descriptor are invariant. See notes above
4804 * tg3_alloc_rx_skb for full details.
4805 */
4806 static void tg3_recycle_rx(struct tg3_napi *tnapi,
4807 struct tg3_rx_prodring_set *dpr,
4808 u32 opaque_key, int src_idx,
4809 u32 dest_idx_unmasked)
4810 {
4811 struct tg3 *tp = tnapi->tp;
4812 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
4813 struct ring_info *src_map, *dest_map;
4814 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
4815 int dest_idx;
4816
4817 switch (opaque_key) {
4818 case RXD_OPAQUE_RING_STD:
4819 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
4820 dest_desc = &dpr->rx_std[dest_idx];
4821 dest_map = &dpr->rx_std_buffers[dest_idx];
4822 src_desc = &spr->rx_std[src_idx];
4823 src_map = &spr->rx_std_buffers[src_idx];
4824 break;
4825
4826 case RXD_OPAQUE_RING_JUMBO:
4827 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
4828 dest_desc = &dpr->rx_jmb[dest_idx].std;
4829 dest_map = &dpr->rx_jmb_buffers[dest_idx];
4830 src_desc = &spr->rx_jmb[src_idx].std;
4831 src_map = &spr->rx_jmb_buffers[src_idx];
4832 break;
4833
4834 default:
4835 return;
4836 }
4837
4838 dest_map->skb = src_map->skb;
4839 dma_unmap_addr_set(dest_map, mapping,
4840 dma_unmap_addr(src_map, mapping));
4841 dest_desc->addr_hi = src_desc->addr_hi;
4842 dest_desc->addr_lo = src_desc->addr_lo;
4843
4844 /* Ensure that the update to the skb happens after the physical
4845 * addresses have been transferred to the new BD location.
4846 */
4847 smp_wmb();
4848
4849 src_map->skb = NULL;
4850 }
4851
4852 /* The RX ring scheme is composed of multiple rings which post fresh
4853 * buffers to the chip, and one special ring the chip uses to report
4854 * status back to the host.
4855 *
4856 * The special ring reports the status of received packets to the
4857 * host. The chip does not write into the original descriptor the
4858 * RX buffer was obtained from. The chip simply takes the original
4859 * descriptor as provided by the host, updates the status and length
4860 * field, then writes this into the next status ring entry.
4861 *
4862 * Each ring the host uses to post buffers to the chip is described
4863 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
4864 * it is first placed into the on-chip ram. When the packet's length
4865 * is known, it walks down the TG3_BDINFO entries to select the ring.
4866 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
4867 * which is within the range of the new packet's length is chosen.
4868 *
4869 * The "separate ring for rx status" scheme may sound queer, but it makes
4870 * sense from a cache coherency perspective. If only the host writes
4871 * to the buffer post rings, and only the chip writes to the rx status
4872 * rings, then cache lines never move beyond shared-modified state.
4873 * If both the host and chip were to write into the same ring, cache line
4874 * eviction could occur since both entities want it in an exclusive state.
4875 */
4876 static int tg3_rx(struct tg3_napi *tnapi, int budget)
4877 {
4878 struct tg3 *tp = tnapi->tp;
4879 u32 work_mask, rx_std_posted = 0;
4880 u32 std_prod_idx, jmb_prod_idx;
4881 u32 sw_idx = tnapi->rx_rcb_ptr;
4882 u16 hw_idx;
4883 int received;
4884 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
4885
4886 hw_idx = *(tnapi->rx_rcb_prod_idx);
4887 /*
4888 * We need to order the read of hw_idx and the read of
4889 * the opaque cookie.
4890 */
4891 rmb();
4892 work_mask = 0;
4893 received = 0;
4894 std_prod_idx = tpr->rx_std_prod_idx;
4895 jmb_prod_idx = tpr->rx_jmb_prod_idx;
4896 while (sw_idx != hw_idx && budget > 0) {
4897 struct ring_info *ri;
4898 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
4899 unsigned int len;
4900 struct sk_buff *skb;
4901 dma_addr_t dma_addr;
4902 u32 opaque_key, desc_idx, *post_ptr;
4903
4904 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
4905 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
4906 if (opaque_key == RXD_OPAQUE_RING_STD) {
4907 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
4908 dma_addr = dma_unmap_addr(ri, mapping);
4909 skb = ri->skb;
4910 post_ptr = &std_prod_idx;
4911 rx_std_posted++;
4912 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
4913 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
4914 dma_addr = dma_unmap_addr(ri, mapping);
4915 skb = ri->skb;
4916 post_ptr = &jmb_prod_idx;
4917 } else
4918 goto next_pkt_nopost;
4919
4920 work_mask |= opaque_key;
4921
4922 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
4923 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
4924 drop_it:
4925 tg3_recycle_rx(tnapi, tpr, opaque_key,
4926 desc_idx, *post_ptr);
4927 drop_it_no_recycle:
4928 /* Other statistics kept track of by card. */
4929 tp->rx_dropped++;
4930 goto next_pkt;
4931 }
4932
4933 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
4934 ETH_FCS_LEN;
4935
4936 if (len > TG3_RX_COPY_THRESH(tp)) {
4937 int skb_size;
4938
4939 skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key,
4940 *post_ptr);
4941 if (skb_size < 0)
4942 goto drop_it;
4943
4944 pci_unmap_single(tp->pdev, dma_addr, skb_size,
4945 PCI_DMA_FROMDEVICE);
4946
4947 /* Ensure that the update to the skb happens
4948 * after the usage of the old DMA mapping.
4949 */
4950 smp_wmb();
4951
4952 ri->skb = NULL;
4953
4954 skb_put(skb, len);
4955 } else {
4956 struct sk_buff *copy_skb;
4957
4958 tg3_recycle_rx(tnapi, tpr, opaque_key,
4959 desc_idx, *post_ptr);
4960
4961 copy_skb = netdev_alloc_skb(tp->dev, len +
4962 TG3_RAW_IP_ALIGN);
4963 if (copy_skb == NULL)
4964 goto drop_it_no_recycle;
4965
4966 skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
4967 skb_put(copy_skb, len);
4968 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4969 skb_copy_from_linear_data(skb, copy_skb->data, len);
4970 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4971
4972 /* We'll reuse the original ring buffer. */
4973 skb = copy_skb;
4974 }
4975
4976 if ((tp->dev->features & NETIF_F_RXCSUM) &&
4977 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
4978 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
4979 >> RXD_TCPCSUM_SHIFT) == 0xffff))
4980 skb->ip_summed = CHECKSUM_UNNECESSARY;
4981 else
4982 skb_checksum_none_assert(skb);
4983
4984 skb->protocol = eth_type_trans(skb, tp->dev);
4985
4986 if (len > (tp->dev->mtu + ETH_HLEN) &&
4987 skb->protocol != htons(ETH_P_8021Q)) {
4988 dev_kfree_skb(skb);
4989 goto drop_it_no_recycle;
4990 }
4991
4992 if (desc->type_flags & RXD_FLAG_VLAN &&
4993 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
4994 __vlan_hwaccel_put_tag(skb,
4995 desc->err_vlan & RXD_VLAN_MASK);
4996
4997 napi_gro_receive(&tnapi->napi, skb);
4998
4999 received++;
5000 budget--;
5001
5002 next_pkt:
5003 (*post_ptr)++;
5004
5005 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
5006 tpr->rx_std_prod_idx = std_prod_idx &
5007 tp->rx_std_ring_mask;
5008 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5009 tpr->rx_std_prod_idx);
5010 work_mask &= ~RXD_OPAQUE_RING_STD;
5011 rx_std_posted = 0;
5012 }
5013 next_pkt_nopost:
5014 sw_idx++;
5015 sw_idx &= tp->rx_ret_ring_mask;
5016
5017 /* Refresh hw_idx to see if there is new work */
5018 if (sw_idx == hw_idx) {
5019 hw_idx = *(tnapi->rx_rcb_prod_idx);
5020 rmb();
5021 }
5022 }
5023
5024 /* ACK the status ring. */
5025 tnapi->rx_rcb_ptr = sw_idx;
5026 tw32_rx_mbox(tnapi->consmbox, sw_idx);
5027
5028 /* Refill RX ring(s). */
5029 if (!tg3_flag(tp, ENABLE_RSS)) {
5030 if (work_mask & RXD_OPAQUE_RING_STD) {
5031 tpr->rx_std_prod_idx = std_prod_idx &
5032 tp->rx_std_ring_mask;
5033 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5034 tpr->rx_std_prod_idx);
5035 }
5036 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
5037 tpr->rx_jmb_prod_idx = jmb_prod_idx &
5038 tp->rx_jmb_ring_mask;
5039 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5040 tpr->rx_jmb_prod_idx);
5041 }
5042 mmiowb();
5043 } else if (work_mask) {
5044 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
5045 * updated before the producer indices can be updated.
5046 */
5047 smp_wmb();
5048
5049 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
5050 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
5051
5052 if (tnapi != &tp->napi[1])
5053 napi_schedule(&tp->napi[1].napi);
5054 }
5055
5056 return received;
5057 }
5058
5059 static void tg3_poll_link(struct tg3 *tp)
5060 {
5061 /* handle link change and other phy events */
5062 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
5063 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
5064
5065 if (sblk->status & SD_STATUS_LINK_CHG) {
5066 sblk->status = SD_STATUS_UPDATED |
5067 (sblk->status & ~SD_STATUS_LINK_CHG);
5068 spin_lock(&tp->lock);
5069 if (tg3_flag(tp, USE_PHYLIB)) {
5070 tw32_f(MAC_STATUS,
5071 (MAC_STATUS_SYNC_CHANGED |
5072 MAC_STATUS_CFG_CHANGED |
5073 MAC_STATUS_MI_COMPLETION |
5074 MAC_STATUS_LNKSTATE_CHANGED));
5075 udelay(40);
5076 } else
5077 tg3_setup_phy(tp, 0);
5078 spin_unlock(&tp->lock);
5079 }
5080 }
5081 }
5082
5083 static int tg3_rx_prodring_xfer(struct tg3 *tp,
5084 struct tg3_rx_prodring_set *dpr,
5085 struct tg3_rx_prodring_set *spr)
5086 {
5087 u32 si, di, cpycnt, src_prod_idx;
5088 int i, err = 0;
5089
5090 while (1) {
5091 src_prod_idx = spr->rx_std_prod_idx;
5092
5093 /* Make sure updates to the rx_std_buffers[] entries and the
5094 * standard producer index are seen in the correct order.
5095 */
5096 smp_rmb();
5097
5098 if (spr->rx_std_cons_idx == src_prod_idx)
5099 break;
5100
5101 if (spr->rx_std_cons_idx < src_prod_idx)
5102 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
5103 else
5104 cpycnt = tp->rx_std_ring_mask + 1 -
5105 spr->rx_std_cons_idx;
5106
5107 cpycnt = min(cpycnt,
5108 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
5109
5110 si = spr->rx_std_cons_idx;
5111 di = dpr->rx_std_prod_idx;
5112
5113 for (i = di; i < di + cpycnt; i++) {
5114 if (dpr->rx_std_buffers[i].skb) {
5115 cpycnt = i - di;
5116 err = -ENOSPC;
5117 break;
5118 }
5119 }
5120
5121 if (!cpycnt)
5122 break;
5123
5124 /* Ensure that updates to the rx_std_buffers ring and the
5125 * shadowed hardware producer ring from tg3_recycle_skb() are
5126 * ordered correctly WRT the skb check above.
5127 */
5128 smp_rmb();
5129
5130 memcpy(&dpr->rx_std_buffers[di],
5131 &spr->rx_std_buffers[si],
5132 cpycnt * sizeof(struct ring_info));
5133
5134 for (i = 0; i < cpycnt; i++, di++, si++) {
5135 struct tg3_rx_buffer_desc *sbd, *dbd;
5136 sbd = &spr->rx_std[si];
5137 dbd = &dpr->rx_std[di];
5138 dbd->addr_hi = sbd->addr_hi;
5139 dbd->addr_lo = sbd->addr_lo;
5140 }
5141
5142 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
5143 tp->rx_std_ring_mask;
5144 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
5145 tp->rx_std_ring_mask;
5146 }
5147
5148 while (1) {
5149 src_prod_idx = spr->rx_jmb_prod_idx;
5150
5151 /* Make sure updates to the rx_jmb_buffers[] entries and
5152 * the jumbo producer index are seen in the correct order.
5153 */
5154 smp_rmb();
5155
5156 if (spr->rx_jmb_cons_idx == src_prod_idx)
5157 break;
5158
5159 if (spr->rx_jmb_cons_idx < src_prod_idx)
5160 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
5161 else
5162 cpycnt = tp->rx_jmb_ring_mask + 1 -
5163 spr->rx_jmb_cons_idx;
5164
5165 cpycnt = min(cpycnt,
5166 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
5167
5168 si = spr->rx_jmb_cons_idx;
5169 di = dpr->rx_jmb_prod_idx;
5170
5171 for (i = di; i < di + cpycnt; i++) {
5172 if (dpr->rx_jmb_buffers[i].skb) {
5173 cpycnt = i - di;
5174 err = -ENOSPC;
5175 break;
5176 }
5177 }
5178
5179 if (!cpycnt)
5180 break;
5181
5182 /* Ensure that updates to the rx_jmb_buffers ring and the
5183 * shadowed hardware producer ring from tg3_recycle_skb() are
5184 * ordered correctly WRT the skb check above.
5185 */
5186 smp_rmb();
5187
5188 memcpy(&dpr->rx_jmb_buffers[di],
5189 &spr->rx_jmb_buffers[si],
5190 cpycnt * sizeof(struct ring_info));
5191
5192 for (i = 0; i < cpycnt; i++, di++, si++) {
5193 struct tg3_rx_buffer_desc *sbd, *dbd;
5194 sbd = &spr->rx_jmb[si].std;
5195 dbd = &dpr->rx_jmb[di].std;
5196 dbd->addr_hi = sbd->addr_hi;
5197 dbd->addr_lo = sbd->addr_lo;
5198 }
5199
5200 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
5201 tp->rx_jmb_ring_mask;
5202 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
5203 tp->rx_jmb_ring_mask;
5204 }
5205
5206 return err;
5207 }
5208
5209 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
5210 {
5211 struct tg3 *tp = tnapi->tp;
5212
5213 /* run TX completion thread */
5214 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
5215 tg3_tx(tnapi);
5216 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5217 return work_done;
5218 }
5219
5220 /* run RX thread, within the bounds set by NAPI.
5221 * All RX "locking" is done by ensuring outside
5222 * code synchronizes with tg3->napi.poll()
5223 */
5224 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
5225 work_done += tg3_rx(tnapi, budget - work_done);
5226
5227 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
5228 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
5229 int i, err = 0;
5230 u32 std_prod_idx = dpr->rx_std_prod_idx;
5231 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
5232
5233 for (i = 1; i < tp->irq_cnt; i++)
5234 err |= tg3_rx_prodring_xfer(tp, dpr,
5235 &tp->napi[i].prodring);
5236
5237 wmb();
5238
5239 if (std_prod_idx != dpr->rx_std_prod_idx)
5240 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5241 dpr->rx_std_prod_idx);
5242
5243 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
5244 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5245 dpr->rx_jmb_prod_idx);
5246
5247 mmiowb();
5248
5249 if (err)
5250 tw32_f(HOSTCC_MODE, tp->coal_now);
5251 }
5252
5253 return work_done;
5254 }
5255
5256 static int tg3_poll_msix(struct napi_struct *napi, int budget)
5257 {
5258 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5259 struct tg3 *tp = tnapi->tp;
5260 int work_done = 0;
5261 struct tg3_hw_status *sblk = tnapi->hw_status;
5262
5263 while (1) {
5264 work_done = tg3_poll_work(tnapi, work_done, budget);
5265
5266 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5267 goto tx_recovery;
5268
5269 if (unlikely(work_done >= budget))
5270 break;
5271
5272 /* tp->last_tag is used in tg3_int_reenable() below
5273 * to tell the hw how much work has been processed,
5274 * so we must read it before checking for more work.
5275 */
5276 tnapi->last_tag = sblk->status_tag;
5277 tnapi->last_irq_tag = tnapi->last_tag;
5278 rmb();
5279
5280 /* check for RX/TX work to do */
5281 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
5282 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
5283 napi_complete(napi);
5284 /* Reenable interrupts. */
5285 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
5286 mmiowb();
5287 break;
5288 }
5289 }
5290
5291 return work_done;
5292
5293 tx_recovery:
5294 /* work_done is guaranteed to be less than budget. */
5295 napi_complete(napi);
5296 schedule_work(&tp->reset_task);
5297 return work_done;
5298 }
5299
5300 static void tg3_process_error(struct tg3 *tp)
5301 {
5302 u32 val;
5303 bool real_error = false;
5304
5305 if (tg3_flag(tp, ERROR_PROCESSED))
5306 return;
5307
5308 /* Check Flow Attention register */
5309 val = tr32(HOSTCC_FLOW_ATTN);
5310 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
5311 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n");
5312 real_error = true;
5313 }
5314
5315 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
5316 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n");
5317 real_error = true;
5318 }
5319
5320 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
5321 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n");
5322 real_error = true;
5323 }
5324
5325 if (!real_error)
5326 return;
5327
5328 tg3_dump_state(tp);
5329
5330 tg3_flag_set(tp, ERROR_PROCESSED);
5331 schedule_work(&tp->reset_task);
5332 }
5333
5334 static int tg3_poll(struct napi_struct *napi, int budget)
5335 {
5336 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5337 struct tg3 *tp = tnapi->tp;
5338 int work_done = 0;
5339 struct tg3_hw_status *sblk = tnapi->hw_status;
5340
5341 while (1) {
5342 if (sblk->status & SD_STATUS_ERROR)
5343 tg3_process_error(tp);
5344
5345 tg3_poll_link(tp);
5346
5347 work_done = tg3_poll_work(tnapi, work_done, budget);
5348
5349 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5350 goto tx_recovery;
5351
5352 if (unlikely(work_done >= budget))
5353 break;
5354
5355 if (tg3_flag(tp, TAGGED_STATUS)) {
5356 /* tp->last_tag is used in tg3_int_reenable() below
5357 * to tell the hw how much work has been processed,
5358 * so we must read it before checking for more work.
5359 */
5360 tnapi->last_tag = sblk->status_tag;
5361 tnapi->last_irq_tag = tnapi->last_tag;
5362 rmb();
5363 } else
5364 sblk->status &= ~SD_STATUS_UPDATED;
5365
5366 if (likely(!tg3_has_work(tnapi))) {
5367 napi_complete(napi);
5368 tg3_int_reenable(tnapi);
5369 break;
5370 }
5371 }
5372
5373 return work_done;
5374
5375 tx_recovery:
5376 /* work_done is guaranteed to be less than budget. */
5377 napi_complete(napi);
5378 schedule_work(&tp->reset_task);
5379 return work_done;
5380 }
5381
5382 static void tg3_napi_disable(struct tg3 *tp)
5383 {
5384 int i;
5385
5386 for (i = tp->irq_cnt - 1; i >= 0; i--)
5387 napi_disable(&tp->napi[i].napi);
5388 }
5389
5390 static void tg3_napi_enable(struct tg3 *tp)
5391 {
5392 int i;
5393
5394 for (i = 0; i < tp->irq_cnt; i++)
5395 napi_enable(&tp->napi[i].napi);
5396 }
5397
5398 static void tg3_napi_init(struct tg3 *tp)
5399 {
5400 int i;
5401
5402 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
5403 for (i = 1; i < tp->irq_cnt; i++)
5404 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
5405 }
5406
5407 static void tg3_napi_fini(struct tg3 *tp)
5408 {
5409 int i;
5410
5411 for (i = 0; i < tp->irq_cnt; i++)
5412 netif_napi_del(&tp->napi[i].napi);
5413 }
5414
5415 static inline void tg3_netif_stop(struct tg3 *tp)
5416 {
5417 tp->dev->trans_start = jiffies; /* prevent tx timeout */
5418 tg3_napi_disable(tp);
5419 netif_tx_disable(tp->dev);
5420 }
5421
5422 static inline void tg3_netif_start(struct tg3 *tp)
5423 {
5424 /* NOTE: unconditional netif_tx_wake_all_queues is only
5425 * appropriate so long as all callers are assured to
5426 * have free tx slots (such as after tg3_init_hw)
5427 */
5428 netif_tx_wake_all_queues(tp->dev);
5429
5430 tg3_napi_enable(tp);
5431 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
5432 tg3_enable_ints(tp);
5433 }
5434
5435 static void tg3_irq_quiesce(struct tg3 *tp)
5436 {
5437 int i;
5438
5439 BUG_ON(tp->irq_sync);
5440
5441 tp->irq_sync = 1;
5442 smp_mb();
5443
5444 for (i = 0; i < tp->irq_cnt; i++)
5445 synchronize_irq(tp->napi[i].irq_vec);
5446 }
5447
5448 /* Fully shutdown all tg3 driver activity elsewhere in the system.
5449 * If irq_sync is non-zero, then the IRQ handler must be synchronized
5450 * with as well. Most of the time, this is not necessary except when
5451 * shutting down the device.
5452 */
5453 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
5454 {
5455 spin_lock_bh(&tp->lock);
5456 if (irq_sync)
5457 tg3_irq_quiesce(tp);
5458 }
5459
5460 static inline void tg3_full_unlock(struct tg3 *tp)
5461 {
5462 spin_unlock_bh(&tp->lock);
5463 }
5464
5465 /* One-shot MSI handler - Chip automatically disables interrupt
5466 * after sending MSI so driver doesn't have to do it.
5467 */
5468 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
5469 {
5470 struct tg3_napi *tnapi = dev_id;
5471 struct tg3 *tp = tnapi->tp;
5472
5473 prefetch(tnapi->hw_status);
5474 if (tnapi->rx_rcb)
5475 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5476
5477 if (likely(!tg3_irq_sync(tp)))
5478 napi_schedule(&tnapi->napi);
5479
5480 return IRQ_HANDLED;
5481 }
5482
5483 /* MSI ISR - No need to check for interrupt sharing and no need to
5484 * flush status block and interrupt mailbox. PCI ordering rules
5485 * guarantee that MSI will arrive after the status block.
5486 */
5487 static irqreturn_t tg3_msi(int irq, void *dev_id)
5488 {
5489 struct tg3_napi *tnapi = dev_id;
5490 struct tg3 *tp = tnapi->tp;
5491
5492 prefetch(tnapi->hw_status);
5493 if (tnapi->rx_rcb)
5494 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5495 /*
5496 * Writing any value to intr-mbox-0 clears PCI INTA# and
5497 * chip-internal interrupt pending events.
5498 * Writing non-zero to intr-mbox-0 additional tells the
5499 * NIC to stop sending us irqs, engaging "in-intr-handler"
5500 * event coalescing.
5501 */
5502 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5503 if (likely(!tg3_irq_sync(tp)))
5504 napi_schedule(&tnapi->napi);
5505
5506 return IRQ_RETVAL(1);
5507 }
5508
5509 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
5510 {
5511 struct tg3_napi *tnapi = dev_id;
5512 struct tg3 *tp = tnapi->tp;
5513 struct tg3_hw_status *sblk = tnapi->hw_status;
5514 unsigned int handled = 1;
5515
5516 /* In INTx mode, it is possible for the interrupt to arrive at
5517 * the CPU before the status block posted prior to the interrupt.
5518 * Reading the PCI State register will confirm whether the
5519 * interrupt is ours and will flush the status block.
5520 */
5521 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
5522 if (tg3_flag(tp, CHIP_RESETTING) ||
5523 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5524 handled = 0;
5525 goto out;
5526 }
5527 }
5528
5529 /*
5530 * Writing any value to intr-mbox-0 clears PCI INTA# and
5531 * chip-internal interrupt pending events.
5532 * Writing non-zero to intr-mbox-0 additional tells the
5533 * NIC to stop sending us irqs, engaging "in-intr-handler"
5534 * event coalescing.
5535 *
5536 * Flush the mailbox to de-assert the IRQ immediately to prevent
5537 * spurious interrupts. The flush impacts performance but
5538 * excessive spurious interrupts can be worse in some cases.
5539 */
5540 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5541 if (tg3_irq_sync(tp))
5542 goto out;
5543 sblk->status &= ~SD_STATUS_UPDATED;
5544 if (likely(tg3_has_work(tnapi))) {
5545 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5546 napi_schedule(&tnapi->napi);
5547 } else {
5548 /* No work, shared interrupt perhaps? re-enable
5549 * interrupts, and flush that PCI write
5550 */
5551 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
5552 0x00000000);
5553 }
5554 out:
5555 return IRQ_RETVAL(handled);
5556 }
5557
5558 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
5559 {
5560 struct tg3_napi *tnapi = dev_id;
5561 struct tg3 *tp = tnapi->tp;
5562 struct tg3_hw_status *sblk = tnapi->hw_status;
5563 unsigned int handled = 1;
5564
5565 /* In INTx mode, it is possible for the interrupt to arrive at
5566 * the CPU before the status block posted prior to the interrupt.
5567 * Reading the PCI State register will confirm whether the
5568 * interrupt is ours and will flush the status block.
5569 */
5570 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
5571 if (tg3_flag(tp, CHIP_RESETTING) ||
5572 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5573 handled = 0;
5574 goto out;
5575 }
5576 }
5577
5578 /*
5579 * writing any value to intr-mbox-0 clears PCI INTA# and
5580 * chip-internal interrupt pending events.
5581 * writing non-zero to intr-mbox-0 additional tells the
5582 * NIC to stop sending us irqs, engaging "in-intr-handler"
5583 * event coalescing.
5584 *
5585 * Flush the mailbox to de-assert the IRQ immediately to prevent
5586 * spurious interrupts. The flush impacts performance but
5587 * excessive spurious interrupts can be worse in some cases.
5588 */
5589 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5590
5591 /*
5592 * In a shared interrupt configuration, sometimes other devices'
5593 * interrupts will scream. We record the current status tag here
5594 * so that the above check can report that the screaming interrupts
5595 * are unhandled. Eventually they will be silenced.
5596 */
5597 tnapi->last_irq_tag = sblk->status_tag;
5598
5599 if (tg3_irq_sync(tp))
5600 goto out;
5601
5602 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5603
5604 napi_schedule(&tnapi->napi);
5605
5606 out:
5607 return IRQ_RETVAL(handled);
5608 }
5609
5610 /* ISR for interrupt test */
5611 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
5612 {
5613 struct tg3_napi *tnapi = dev_id;
5614 struct tg3 *tp = tnapi->tp;
5615 struct tg3_hw_status *sblk = tnapi->hw_status;
5616
5617 if ((sblk->status & SD_STATUS_UPDATED) ||
5618 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5619 tg3_disable_ints(tp);
5620 return IRQ_RETVAL(1);
5621 }
5622 return IRQ_RETVAL(0);
5623 }
5624
5625 static int tg3_init_hw(struct tg3 *, int);
5626 static int tg3_halt(struct tg3 *, int, int);
5627
5628 /* Restart hardware after configuration changes, self-test, etc.
5629 * Invoked with tp->lock held.
5630 */
5631 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
5632 __releases(tp->lock)
5633 __acquires(tp->lock)
5634 {
5635 int err;
5636
5637 err = tg3_init_hw(tp, reset_phy);
5638 if (err) {
5639 netdev_err(tp->dev,
5640 "Failed to re-initialize device, aborting\n");
5641 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5642 tg3_full_unlock(tp);
5643 del_timer_sync(&tp->timer);
5644 tp->irq_sync = 0;
5645 tg3_napi_enable(tp);
5646 dev_close(tp->dev);
5647 tg3_full_lock(tp, 0);
5648 }
5649 return err;
5650 }
5651
5652 #ifdef CONFIG_NET_POLL_CONTROLLER
5653 static void tg3_poll_controller(struct net_device *dev)
5654 {
5655 int i;
5656 struct tg3 *tp = netdev_priv(dev);
5657
5658 for (i = 0; i < tp->irq_cnt; i++)
5659 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
5660 }
5661 #endif
5662
5663 static void tg3_reset_task(struct work_struct *work)
5664 {
5665 struct tg3 *tp = container_of(work, struct tg3, reset_task);
5666 int err;
5667 unsigned int restart_timer;
5668
5669 tg3_full_lock(tp, 0);
5670
5671 if (!netif_running(tp->dev)) {
5672 tg3_full_unlock(tp);
5673 return;
5674 }
5675
5676 tg3_full_unlock(tp);
5677
5678 tg3_phy_stop(tp);
5679
5680 tg3_netif_stop(tp);
5681
5682 tg3_full_lock(tp, 1);
5683
5684 restart_timer = tg3_flag(tp, RESTART_TIMER);
5685 tg3_flag_clear(tp, RESTART_TIMER);
5686
5687 if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
5688 tp->write32_tx_mbox = tg3_write32_tx_mbox;
5689 tp->write32_rx_mbox = tg3_write_flush_reg32;
5690 tg3_flag_set(tp, MBOX_WRITE_REORDER);
5691 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
5692 }
5693
5694 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
5695 err = tg3_init_hw(tp, 1);
5696 if (err)
5697 goto out;
5698
5699 tg3_netif_start(tp);
5700
5701 if (restart_timer)
5702 mod_timer(&tp->timer, jiffies + 1);
5703
5704 out:
5705 tg3_full_unlock(tp);
5706
5707 if (!err)
5708 tg3_phy_start(tp);
5709 }
5710
5711 static void tg3_tx_timeout(struct net_device *dev)
5712 {
5713 struct tg3 *tp = netdev_priv(dev);
5714
5715 if (netif_msg_tx_err(tp)) {
5716 netdev_err(dev, "transmit timed out, resetting\n");
5717 tg3_dump_state(tp);
5718 }
5719
5720 schedule_work(&tp->reset_task);
5721 }
5722
5723 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
5724 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
5725 {
5726 u32 base = (u32) mapping & 0xffffffff;
5727
5728 return (base > 0xffffdcc0) && (base + len + 8 < base);
5729 }
5730
5731 /* Test for DMA addresses > 40-bit */
5732 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
5733 int len)
5734 {
5735 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
5736 if (tg3_flag(tp, 40BIT_DMA_BUG))
5737 return ((u64) mapping + len) > DMA_BIT_MASK(40);
5738 return 0;
5739 #else
5740 return 0;
5741 #endif
5742 }
5743
5744 static void tg3_set_txd(struct tg3_napi *tnapi, int entry,
5745 dma_addr_t mapping, int len, u32 flags,
5746 u32 mss_and_is_end)
5747 {
5748 struct tg3_tx_buffer_desc *txd = &tnapi->tx_ring[entry];
5749 int is_end = (mss_and_is_end & 0x1);
5750 u32 mss = (mss_and_is_end >> 1);
5751 u32 vlan_tag = 0;
5752
5753 if (is_end)
5754 flags |= TXD_FLAG_END;
5755 if (flags & TXD_FLAG_VLAN) {
5756 vlan_tag = flags >> 16;
5757 flags &= 0xffff;
5758 }
5759 vlan_tag |= (mss << TXD_MSS_SHIFT);
5760
5761 txd->addr_hi = ((u64) mapping >> 32);
5762 txd->addr_lo = ((u64) mapping & 0xffffffff);
5763 txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
5764 txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
5765 }
5766
5767 static void tg3_skb_error_unmap(struct tg3_napi *tnapi,
5768 struct sk_buff *skb, int last)
5769 {
5770 int i;
5771 u32 entry = tnapi->tx_prod;
5772 struct ring_info *txb = &tnapi->tx_buffers[entry];
5773
5774 pci_unmap_single(tnapi->tp->pdev,
5775 dma_unmap_addr(txb, mapping),
5776 skb_headlen(skb),
5777 PCI_DMA_TODEVICE);
5778 for (i = 0; i < last; i++) {
5779 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5780
5781 entry = NEXT_TX(entry);
5782 txb = &tnapi->tx_buffers[entry];
5783
5784 pci_unmap_page(tnapi->tp->pdev,
5785 dma_unmap_addr(txb, mapping),
5786 frag->size, PCI_DMA_TODEVICE);
5787 }
5788 }
5789
5790 /* Workaround 4GB and 40-bit hardware DMA bugs. */
5791 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
5792 struct sk_buff *skb,
5793 u32 base_flags, u32 mss)
5794 {
5795 struct tg3 *tp = tnapi->tp;
5796 struct sk_buff *new_skb;
5797 dma_addr_t new_addr = 0;
5798 u32 entry = tnapi->tx_prod;
5799 int ret = 0;
5800
5801 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
5802 new_skb = skb_copy(skb, GFP_ATOMIC);
5803 else {
5804 int more_headroom = 4 - ((unsigned long)skb->data & 3);
5805
5806 new_skb = skb_copy_expand(skb,
5807 skb_headroom(skb) + more_headroom,
5808 skb_tailroom(skb), GFP_ATOMIC);
5809 }
5810
5811 if (!new_skb) {
5812 ret = -1;
5813 } else {
5814 /* New SKB is guaranteed to be linear. */
5815 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
5816 PCI_DMA_TODEVICE);
5817 /* Make sure the mapping succeeded */
5818 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
5819 ret = -1;
5820 dev_kfree_skb(new_skb);
5821
5822 /* Make sure new skb does not cross any 4G boundaries.
5823 * Drop the packet if it does.
5824 */
5825 } else if (tg3_flag(tp, 4G_DMA_BNDRY_BUG) &&
5826 tg3_4g_overflow_test(new_addr, new_skb->len)) {
5827 pci_unmap_single(tp->pdev, new_addr, new_skb->len,
5828 PCI_DMA_TODEVICE);
5829 ret = -1;
5830 dev_kfree_skb(new_skb);
5831 } else {
5832 tnapi->tx_buffers[entry].skb = new_skb;
5833 dma_unmap_addr_set(&tnapi->tx_buffers[entry],
5834 mapping, new_addr);
5835
5836 tg3_set_txd(tnapi, entry, new_addr, new_skb->len,
5837 base_flags, 1 | (mss << 1));
5838 }
5839 }
5840
5841 dev_kfree_skb(skb);
5842
5843 return ret;
5844 }
5845
5846 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
5847
5848 /* Use GSO to workaround a rare TSO bug that may be triggered when the
5849 * TSO header is greater than 80 bytes.
5850 */
5851 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
5852 {
5853 struct sk_buff *segs, *nskb;
5854 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
5855
5856 /* Estimate the number of fragments in the worst case */
5857 if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
5858 netif_stop_queue(tp->dev);
5859
5860 /* netif_tx_stop_queue() must be done before checking
5861 * checking tx index in tg3_tx_avail() below, because in
5862 * tg3_tx(), we update tx index before checking for
5863 * netif_tx_queue_stopped().
5864 */
5865 smp_mb();
5866 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
5867 return NETDEV_TX_BUSY;
5868
5869 netif_wake_queue(tp->dev);
5870 }
5871
5872 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
5873 if (IS_ERR(segs))
5874 goto tg3_tso_bug_end;
5875
5876 do {
5877 nskb = segs;
5878 segs = segs->next;
5879 nskb->next = NULL;
5880 tg3_start_xmit(nskb, tp->dev);
5881 } while (segs);
5882
5883 tg3_tso_bug_end:
5884 dev_kfree_skb(skb);
5885
5886 return NETDEV_TX_OK;
5887 }
5888
5889 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
5890 * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
5891 */
5892 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
5893 {
5894 struct tg3 *tp = netdev_priv(dev);
5895 u32 len, entry, base_flags, mss;
5896 int i = -1, would_hit_hwbug;
5897 dma_addr_t mapping;
5898 struct tg3_napi *tnapi;
5899 struct netdev_queue *txq;
5900 unsigned int last;
5901
5902 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
5903 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
5904 if (tg3_flag(tp, ENABLE_TSS))
5905 tnapi++;
5906
5907 /* We are running in BH disabled context with netif_tx_lock
5908 * and TX reclaim runs via tp->napi.poll inside of a software
5909 * interrupt. Furthermore, IRQ processing runs lockless so we have
5910 * no IRQ context deadlocks to worry about either. Rejoice!
5911 */
5912 if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) {
5913 if (!netif_tx_queue_stopped(txq)) {
5914 netif_tx_stop_queue(txq);
5915
5916 /* This is a hard error, log it. */
5917 netdev_err(dev,
5918 "BUG! Tx Ring full when queue awake!\n");
5919 }
5920 return NETDEV_TX_BUSY;
5921 }
5922
5923 entry = tnapi->tx_prod;
5924 base_flags = 0;
5925 if (skb->ip_summed == CHECKSUM_PARTIAL)
5926 base_flags |= TXD_FLAG_TCPUDP_CSUM;
5927
5928 mss = skb_shinfo(skb)->gso_size;
5929 if (mss) {
5930 struct iphdr *iph;
5931 u32 tcp_opt_len, hdr_len;
5932
5933 if (skb_header_cloned(skb) &&
5934 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5935 dev_kfree_skb(skb);
5936 goto out_unlock;
5937 }
5938
5939 iph = ip_hdr(skb);
5940 tcp_opt_len = tcp_optlen(skb);
5941
5942 if (skb_is_gso_v6(skb)) {
5943 hdr_len = skb_headlen(skb) - ETH_HLEN;
5944 } else {
5945 u32 ip_tcp_len;
5946
5947 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5948 hdr_len = ip_tcp_len + tcp_opt_len;
5949
5950 iph->check = 0;
5951 iph->tot_len = htons(mss + hdr_len);
5952 }
5953
5954 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
5955 tg3_flag(tp, TSO_BUG))
5956 return tg3_tso_bug(tp, skb);
5957
5958 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
5959 TXD_FLAG_CPU_POST_DMA);
5960
5961 if (tg3_flag(tp, HW_TSO_1) ||
5962 tg3_flag(tp, HW_TSO_2) ||
5963 tg3_flag(tp, HW_TSO_3)) {
5964 tcp_hdr(skb)->check = 0;
5965 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
5966 } else
5967 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5968 iph->daddr, 0,
5969 IPPROTO_TCP,
5970 0);
5971
5972 if (tg3_flag(tp, HW_TSO_3)) {
5973 mss |= (hdr_len & 0xc) << 12;
5974 if (hdr_len & 0x10)
5975 base_flags |= 0x00000010;
5976 base_flags |= (hdr_len & 0x3e0) << 5;
5977 } else if (tg3_flag(tp, HW_TSO_2))
5978 mss |= hdr_len << 9;
5979 else if (tg3_flag(tp, HW_TSO_1) ||
5980 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5981 if (tcp_opt_len || iph->ihl > 5) {
5982 int tsflags;
5983
5984 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
5985 mss |= (tsflags << 11);
5986 }
5987 } else {
5988 if (tcp_opt_len || iph->ihl > 5) {
5989 int tsflags;
5990
5991 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
5992 base_flags |= tsflags << 12;
5993 }
5994 }
5995 }
5996
5997 if (vlan_tx_tag_present(skb))
5998 base_flags |= (TXD_FLAG_VLAN |
5999 (vlan_tx_tag_get(skb) << 16));
6000
6001 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
6002 !mss && skb->len > VLAN_ETH_FRAME_LEN)
6003 base_flags |= TXD_FLAG_JMB_PKT;
6004
6005 len = skb_headlen(skb);
6006
6007 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6008 if (pci_dma_mapping_error(tp->pdev, mapping)) {
6009 dev_kfree_skb(skb);
6010 goto out_unlock;
6011 }
6012
6013 tnapi->tx_buffers[entry].skb = skb;
6014 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
6015
6016 would_hit_hwbug = 0;
6017
6018 if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
6019 would_hit_hwbug = 1;
6020
6021 if (tg3_flag(tp, 4G_DMA_BNDRY_BUG) &&
6022 tg3_4g_overflow_test(mapping, len))
6023 would_hit_hwbug = 1;
6024
6025 if (tg3_40bit_overflow_test(tp, mapping, len))
6026 would_hit_hwbug = 1;
6027
6028 if (tg3_flag(tp, 5701_DMA_BUG))
6029 would_hit_hwbug = 1;
6030
6031 tg3_set_txd(tnapi, entry, mapping, len, base_flags,
6032 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
6033
6034 entry = NEXT_TX(entry);
6035
6036 /* Now loop through additional data fragments, and queue them. */
6037 if (skb_shinfo(skb)->nr_frags > 0) {
6038 last = skb_shinfo(skb)->nr_frags - 1;
6039 for (i = 0; i <= last; i++) {
6040 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6041
6042 len = frag->size;
6043 mapping = pci_map_page(tp->pdev,
6044 frag->page,
6045 frag->page_offset,
6046 len, PCI_DMA_TODEVICE);
6047
6048 tnapi->tx_buffers[entry].skb = NULL;
6049 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
6050 mapping);
6051 if (pci_dma_mapping_error(tp->pdev, mapping))
6052 goto dma_error;
6053
6054 if (tg3_flag(tp, SHORT_DMA_BUG) &&
6055 len <= 8)
6056 would_hit_hwbug = 1;
6057
6058 if (tg3_flag(tp, 4G_DMA_BNDRY_BUG) &&
6059 tg3_4g_overflow_test(mapping, len))
6060 would_hit_hwbug = 1;
6061
6062 if (tg3_40bit_overflow_test(tp, mapping, len))
6063 would_hit_hwbug = 1;
6064
6065 if (tg3_flag(tp, HW_TSO_1) ||
6066 tg3_flag(tp, HW_TSO_2) ||
6067 tg3_flag(tp, HW_TSO_3))
6068 tg3_set_txd(tnapi, entry, mapping, len,
6069 base_flags, (i == last)|(mss << 1));
6070 else
6071 tg3_set_txd(tnapi, entry, mapping, len,
6072 base_flags, (i == last));
6073
6074 entry = NEXT_TX(entry);
6075 }
6076 }
6077
6078 if (would_hit_hwbug) {
6079 tg3_skb_error_unmap(tnapi, skb, i);
6080
6081 /* If the workaround fails due to memory/mapping
6082 * failure, silently drop this packet.
6083 */
6084 if (tigon3_dma_hwbug_workaround(tnapi, skb, base_flags, mss))
6085 goto out_unlock;
6086
6087 entry = NEXT_TX(tnapi->tx_prod);
6088 }
6089
6090 /* Packets are ready, update Tx producer idx local and on card. */
6091 tw32_tx_mbox(tnapi->prodmbox, entry);
6092
6093 skb_tx_timestamp(skb);
6094
6095 tnapi->tx_prod = entry;
6096 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
6097 netif_tx_stop_queue(txq);
6098
6099 /* netif_tx_stop_queue() must be done before checking
6100 * checking tx index in tg3_tx_avail() below, because in
6101 * tg3_tx(), we update tx index before checking for
6102 * netif_tx_queue_stopped().
6103 */
6104 smp_mb();
6105 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
6106 netif_tx_wake_queue(txq);
6107 }
6108
6109 out_unlock:
6110 mmiowb();
6111
6112 return NETDEV_TX_OK;
6113
6114 dma_error:
6115 tg3_skb_error_unmap(tnapi, skb, i);
6116 dev_kfree_skb(skb);
6117 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
6118 return NETDEV_TX_OK;
6119 }
6120
6121 static void tg3_set_loopback(struct net_device *dev, u32 features)
6122 {
6123 struct tg3 *tp = netdev_priv(dev);
6124
6125 if (features & NETIF_F_LOOPBACK) {
6126 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
6127 return;
6128
6129 /*
6130 * Clear MAC_MODE_HALF_DUPLEX or you won't get packets back in
6131 * loopback mode if Half-Duplex mode was negotiated earlier.
6132 */
6133 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
6134
6135 /* Enable internal MAC loopback mode */
6136 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
6137 spin_lock_bh(&tp->lock);
6138 tw32(MAC_MODE, tp->mac_mode);
6139 netif_carrier_on(tp->dev);
6140 spin_unlock_bh(&tp->lock);
6141 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
6142 } else {
6143 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
6144 return;
6145
6146 /* Disable internal MAC loopback mode */
6147 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
6148 spin_lock_bh(&tp->lock);
6149 tw32(MAC_MODE, tp->mac_mode);
6150 /* Force link status check */
6151 tg3_setup_phy(tp, 1);
6152 spin_unlock_bh(&tp->lock);
6153 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
6154 }
6155 }
6156
6157 static u32 tg3_fix_features(struct net_device *dev, u32 features)
6158 {
6159 struct tg3 *tp = netdev_priv(dev);
6160
6161 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
6162 features &= ~NETIF_F_ALL_TSO;
6163
6164 return features;
6165 }
6166
6167 static int tg3_set_features(struct net_device *dev, u32 features)
6168 {
6169 u32 changed = dev->features ^ features;
6170
6171 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
6172 tg3_set_loopback(dev, features);
6173
6174 return 0;
6175 }
6176
6177 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
6178 int new_mtu)
6179 {
6180 dev->mtu = new_mtu;
6181
6182 if (new_mtu > ETH_DATA_LEN) {
6183 if (tg3_flag(tp, 5780_CLASS)) {
6184 netdev_update_features(dev);
6185 tg3_flag_clear(tp, TSO_CAPABLE);
6186 } else {
6187 tg3_flag_set(tp, JUMBO_RING_ENABLE);
6188 }
6189 } else {
6190 if (tg3_flag(tp, 5780_CLASS)) {
6191 tg3_flag_set(tp, TSO_CAPABLE);
6192 netdev_update_features(dev);
6193 }
6194 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
6195 }
6196 }
6197
6198 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
6199 {
6200 struct tg3 *tp = netdev_priv(dev);
6201 int err;
6202
6203 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
6204 return -EINVAL;
6205
6206 if (!netif_running(dev)) {
6207 /* We'll just catch it later when the
6208 * device is up'd.
6209 */
6210 tg3_set_mtu(dev, tp, new_mtu);
6211 return 0;
6212 }
6213
6214 tg3_phy_stop(tp);
6215
6216 tg3_netif_stop(tp);
6217
6218 tg3_full_lock(tp, 1);
6219
6220 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6221
6222 tg3_set_mtu(dev, tp, new_mtu);
6223
6224 err = tg3_restart_hw(tp, 0);
6225
6226 if (!err)
6227 tg3_netif_start(tp);
6228
6229 tg3_full_unlock(tp);
6230
6231 if (!err)
6232 tg3_phy_start(tp);
6233
6234 return err;
6235 }
6236
6237 static void tg3_rx_prodring_free(struct tg3 *tp,
6238 struct tg3_rx_prodring_set *tpr)
6239 {
6240 int i;
6241
6242 if (tpr != &tp->napi[0].prodring) {
6243 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
6244 i = (i + 1) & tp->rx_std_ring_mask)
6245 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
6246 tp->rx_pkt_map_sz);
6247
6248 if (tg3_flag(tp, JUMBO_CAPABLE)) {
6249 for (i = tpr->rx_jmb_cons_idx;
6250 i != tpr->rx_jmb_prod_idx;
6251 i = (i + 1) & tp->rx_jmb_ring_mask) {
6252 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
6253 TG3_RX_JMB_MAP_SZ);
6254 }
6255 }
6256
6257 return;
6258 }
6259
6260 for (i = 0; i <= tp->rx_std_ring_mask; i++)
6261 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
6262 tp->rx_pkt_map_sz);
6263
6264 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
6265 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
6266 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
6267 TG3_RX_JMB_MAP_SZ);
6268 }
6269 }
6270
6271 /* Initialize rx rings for packet processing.
6272 *
6273 * The chip has been shut down and the driver detached from
6274 * the networking, so no interrupts or new tx packets will
6275 * end up in the driver. tp->{tx,}lock are held and thus
6276 * we may not sleep.
6277 */
6278 static int tg3_rx_prodring_alloc(struct tg3 *tp,
6279 struct tg3_rx_prodring_set *tpr)
6280 {
6281 u32 i, rx_pkt_dma_sz;
6282
6283 tpr->rx_std_cons_idx = 0;
6284 tpr->rx_std_prod_idx = 0;
6285 tpr->rx_jmb_cons_idx = 0;
6286 tpr->rx_jmb_prod_idx = 0;
6287
6288 if (tpr != &tp->napi[0].prodring) {
6289 memset(&tpr->rx_std_buffers[0], 0,
6290 TG3_RX_STD_BUFF_RING_SIZE(tp));
6291 if (tpr->rx_jmb_buffers)
6292 memset(&tpr->rx_jmb_buffers[0], 0,
6293 TG3_RX_JMB_BUFF_RING_SIZE(tp));
6294 goto done;
6295 }
6296
6297 /* Zero out all descriptors. */
6298 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
6299
6300 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
6301 if (tg3_flag(tp, 5780_CLASS) &&
6302 tp->dev->mtu > ETH_DATA_LEN)
6303 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
6304 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
6305
6306 /* Initialize invariants of the rings, we only set this
6307 * stuff once. This works because the card does not
6308 * write into the rx buffer posting rings.
6309 */
6310 for (i = 0; i <= tp->rx_std_ring_mask; i++) {
6311 struct tg3_rx_buffer_desc *rxd;
6312
6313 rxd = &tpr->rx_std[i];
6314 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
6315 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
6316 rxd->opaque = (RXD_OPAQUE_RING_STD |
6317 (i << RXD_OPAQUE_INDEX_SHIFT));
6318 }
6319
6320 /* Now allocate fresh SKBs for each rx ring. */
6321 for (i = 0; i < tp->rx_pending; i++) {
6322 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
6323 netdev_warn(tp->dev,
6324 "Using a smaller RX standard ring. Only "
6325 "%d out of %d buffers were allocated "
6326 "successfully\n", i, tp->rx_pending);
6327 if (i == 0)
6328 goto initfail;
6329 tp->rx_pending = i;
6330 break;
6331 }
6332 }
6333
6334 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
6335 goto done;
6336
6337 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
6338
6339 if (!tg3_flag(tp, JUMBO_RING_ENABLE))
6340 goto done;
6341
6342 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
6343 struct tg3_rx_buffer_desc *rxd;
6344
6345 rxd = &tpr->rx_jmb[i].std;
6346 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
6347 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
6348 RXD_FLAG_JUMBO;
6349 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
6350 (i << RXD_OPAQUE_INDEX_SHIFT));
6351 }
6352
6353 for (i = 0; i < tp->rx_jumbo_pending; i++) {
6354 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
6355 netdev_warn(tp->dev,
6356 "Using a smaller RX jumbo ring. Only %d "
6357 "out of %d buffers were allocated "
6358 "successfully\n", i, tp->rx_jumbo_pending);
6359 if (i == 0)
6360 goto initfail;
6361 tp->rx_jumbo_pending = i;
6362 break;
6363 }
6364 }
6365
6366 done:
6367 return 0;
6368
6369 initfail:
6370 tg3_rx_prodring_free(tp, tpr);
6371 return -ENOMEM;
6372 }
6373
6374 static void tg3_rx_prodring_fini(struct tg3 *tp,
6375 struct tg3_rx_prodring_set *tpr)
6376 {
6377 kfree(tpr->rx_std_buffers);
6378 tpr->rx_std_buffers = NULL;
6379 kfree(tpr->rx_jmb_buffers);
6380 tpr->rx_jmb_buffers = NULL;
6381 if (tpr->rx_std) {
6382 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
6383 tpr->rx_std, tpr->rx_std_mapping);
6384 tpr->rx_std = NULL;
6385 }
6386 if (tpr->rx_jmb) {
6387 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
6388 tpr->rx_jmb, tpr->rx_jmb_mapping);
6389 tpr->rx_jmb = NULL;
6390 }
6391 }
6392
6393 static int tg3_rx_prodring_init(struct tg3 *tp,
6394 struct tg3_rx_prodring_set *tpr)
6395 {
6396 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
6397 GFP_KERNEL);
6398 if (!tpr->rx_std_buffers)
6399 return -ENOMEM;
6400
6401 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
6402 TG3_RX_STD_RING_BYTES(tp),
6403 &tpr->rx_std_mapping,
6404 GFP_KERNEL);
6405 if (!tpr->rx_std)
6406 goto err_out;
6407
6408 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
6409 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
6410 GFP_KERNEL);
6411 if (!tpr->rx_jmb_buffers)
6412 goto err_out;
6413
6414 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
6415 TG3_RX_JMB_RING_BYTES(tp),
6416 &tpr->rx_jmb_mapping,
6417 GFP_KERNEL);
6418 if (!tpr->rx_jmb)
6419 goto err_out;
6420 }
6421
6422 return 0;
6423
6424 err_out:
6425 tg3_rx_prodring_fini(tp, tpr);
6426 return -ENOMEM;
6427 }
6428
6429 /* Free up pending packets in all rx/tx rings.
6430 *
6431 * The chip has been shut down and the driver detached from
6432 * the networking, so no interrupts or new tx packets will
6433 * end up in the driver. tp->{tx,}lock is not held and we are not
6434 * in an interrupt context and thus may sleep.
6435 */
6436 static void tg3_free_rings(struct tg3 *tp)
6437 {
6438 int i, j;
6439
6440 for (j = 0; j < tp->irq_cnt; j++) {
6441 struct tg3_napi *tnapi = &tp->napi[j];
6442
6443 tg3_rx_prodring_free(tp, &tnapi->prodring);
6444
6445 if (!tnapi->tx_buffers)
6446 continue;
6447
6448 for (i = 0; i < TG3_TX_RING_SIZE; ) {
6449 struct ring_info *txp;
6450 struct sk_buff *skb;
6451 unsigned int k;
6452
6453 txp = &tnapi->tx_buffers[i];
6454 skb = txp->skb;
6455
6456 if (skb == NULL) {
6457 i++;
6458 continue;
6459 }
6460
6461 pci_unmap_single(tp->pdev,
6462 dma_unmap_addr(txp, mapping),
6463 skb_headlen(skb),
6464 PCI_DMA_TODEVICE);
6465 txp->skb = NULL;
6466
6467 i++;
6468
6469 for (k = 0; k < skb_shinfo(skb)->nr_frags; k++) {
6470 txp = &tnapi->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
6471 pci_unmap_page(tp->pdev,
6472 dma_unmap_addr(txp, mapping),
6473 skb_shinfo(skb)->frags[k].size,
6474 PCI_DMA_TODEVICE);
6475 i++;
6476 }
6477
6478 dev_kfree_skb_any(skb);
6479 }
6480 }
6481 }
6482
6483 /* Initialize tx/rx rings for packet processing.
6484 *
6485 * The chip has been shut down and the driver detached from
6486 * the networking, so no interrupts or new tx packets will
6487 * end up in the driver. tp->{tx,}lock are held and thus
6488 * we may not sleep.
6489 */
6490 static int tg3_init_rings(struct tg3 *tp)
6491 {
6492 int i;
6493
6494 /* Free up all the SKBs. */
6495 tg3_free_rings(tp);
6496
6497 for (i = 0; i < tp->irq_cnt; i++) {
6498 struct tg3_napi *tnapi = &tp->napi[i];
6499
6500 tnapi->last_tag = 0;
6501 tnapi->last_irq_tag = 0;
6502 tnapi->hw_status->status = 0;
6503 tnapi->hw_status->status_tag = 0;
6504 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6505
6506 tnapi->tx_prod = 0;
6507 tnapi->tx_cons = 0;
6508 if (tnapi->tx_ring)
6509 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
6510
6511 tnapi->rx_rcb_ptr = 0;
6512 if (tnapi->rx_rcb)
6513 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
6514
6515 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
6516 tg3_free_rings(tp);
6517 return -ENOMEM;
6518 }
6519 }
6520
6521 return 0;
6522 }
6523
6524 /*
6525 * Must not be invoked with interrupt sources disabled and
6526 * the hardware shutdown down.
6527 */
6528 static void tg3_free_consistent(struct tg3 *tp)
6529 {
6530 int i;
6531
6532 for (i = 0; i < tp->irq_cnt; i++) {
6533 struct tg3_napi *tnapi = &tp->napi[i];
6534
6535 if (tnapi->tx_ring) {
6536 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
6537 tnapi->tx_ring, tnapi->tx_desc_mapping);
6538 tnapi->tx_ring = NULL;
6539 }
6540
6541 kfree(tnapi->tx_buffers);
6542 tnapi->tx_buffers = NULL;
6543
6544 if (tnapi->rx_rcb) {
6545 dma_free_coherent(&tp->pdev->dev,
6546 TG3_RX_RCB_RING_BYTES(tp),
6547 tnapi->rx_rcb,
6548 tnapi->rx_rcb_mapping);
6549 tnapi->rx_rcb = NULL;
6550 }
6551
6552 tg3_rx_prodring_fini(tp, &tnapi->prodring);
6553
6554 if (tnapi->hw_status) {
6555 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
6556 tnapi->hw_status,
6557 tnapi->status_mapping);
6558 tnapi->hw_status = NULL;
6559 }
6560 }
6561
6562 if (tp->hw_stats) {
6563 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
6564 tp->hw_stats, tp->stats_mapping);
6565 tp->hw_stats = NULL;
6566 }
6567 }
6568
6569 /*
6570 * Must not be invoked with interrupt sources disabled and
6571 * the hardware shutdown down. Can sleep.
6572 */
6573 static int tg3_alloc_consistent(struct tg3 *tp)
6574 {
6575 int i;
6576
6577 tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
6578 sizeof(struct tg3_hw_stats),
6579 &tp->stats_mapping,
6580 GFP_KERNEL);
6581 if (!tp->hw_stats)
6582 goto err_out;
6583
6584 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
6585
6586 for (i = 0; i < tp->irq_cnt; i++) {
6587 struct tg3_napi *tnapi = &tp->napi[i];
6588 struct tg3_hw_status *sblk;
6589
6590 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
6591 TG3_HW_STATUS_SIZE,
6592 &tnapi->status_mapping,
6593 GFP_KERNEL);
6594 if (!tnapi->hw_status)
6595 goto err_out;
6596
6597 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6598 sblk = tnapi->hw_status;
6599
6600 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
6601 goto err_out;
6602
6603 /* If multivector TSS is enabled, vector 0 does not handle
6604 * tx interrupts. Don't allocate any resources for it.
6605 */
6606 if ((!i && !tg3_flag(tp, ENABLE_TSS)) ||
6607 (i && tg3_flag(tp, ENABLE_TSS))) {
6608 tnapi->tx_buffers = kzalloc(sizeof(struct ring_info) *
6609 TG3_TX_RING_SIZE,
6610 GFP_KERNEL);
6611 if (!tnapi->tx_buffers)
6612 goto err_out;
6613
6614 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
6615 TG3_TX_RING_BYTES,
6616 &tnapi->tx_desc_mapping,
6617 GFP_KERNEL);
6618 if (!tnapi->tx_ring)
6619 goto err_out;
6620 }
6621
6622 /*
6623 * When RSS is enabled, the status block format changes
6624 * slightly. The "rx_jumbo_consumer", "reserved",
6625 * and "rx_mini_consumer" members get mapped to the
6626 * other three rx return ring producer indexes.
6627 */
6628 switch (i) {
6629 default:
6630 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
6631 break;
6632 case 2:
6633 tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
6634 break;
6635 case 3:
6636 tnapi->rx_rcb_prod_idx = &sblk->reserved;
6637 break;
6638 case 4:
6639 tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
6640 break;
6641 }
6642
6643 /*
6644 * If multivector RSS is enabled, vector 0 does not handle
6645 * rx or tx interrupts. Don't allocate any resources for it.
6646 */
6647 if (!i && tg3_flag(tp, ENABLE_RSS))
6648 continue;
6649
6650 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
6651 TG3_RX_RCB_RING_BYTES(tp),
6652 &tnapi->rx_rcb_mapping,
6653 GFP_KERNEL);
6654 if (!tnapi->rx_rcb)
6655 goto err_out;
6656
6657 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
6658 }
6659
6660 return 0;
6661
6662 err_out:
6663 tg3_free_consistent(tp);
6664 return -ENOMEM;
6665 }
6666
6667 #define MAX_WAIT_CNT 1000
6668
6669 /* To stop a block, clear the enable bit and poll till it
6670 * clears. tp->lock is held.
6671 */
6672 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
6673 {
6674 unsigned int i;
6675 u32 val;
6676
6677 if (tg3_flag(tp, 5705_PLUS)) {
6678 switch (ofs) {
6679 case RCVLSC_MODE:
6680 case DMAC_MODE:
6681 case MBFREE_MODE:
6682 case BUFMGR_MODE:
6683 case MEMARB_MODE:
6684 /* We can't enable/disable these bits of the
6685 * 5705/5750, just say success.
6686 */
6687 return 0;
6688
6689 default:
6690 break;
6691 }
6692 }
6693
6694 val = tr32(ofs);
6695 val &= ~enable_bit;
6696 tw32_f(ofs, val);
6697
6698 for (i = 0; i < MAX_WAIT_CNT; i++) {
6699 udelay(100);
6700 val = tr32(ofs);
6701 if ((val & enable_bit) == 0)
6702 break;
6703 }
6704
6705 if (i == MAX_WAIT_CNT && !silent) {
6706 dev_err(&tp->pdev->dev,
6707 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
6708 ofs, enable_bit);
6709 return -ENODEV;
6710 }
6711
6712 return 0;
6713 }
6714
6715 /* tp->lock is held. */
6716 static int tg3_abort_hw(struct tg3 *tp, int silent)
6717 {
6718 int i, err;
6719
6720 tg3_disable_ints(tp);
6721
6722 tp->rx_mode &= ~RX_MODE_ENABLE;
6723 tw32_f(MAC_RX_MODE, tp->rx_mode);
6724 udelay(10);
6725
6726 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
6727 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
6728 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
6729 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
6730 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
6731 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
6732
6733 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
6734 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
6735 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
6736 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
6737 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
6738 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
6739 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
6740
6741 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
6742 tw32_f(MAC_MODE, tp->mac_mode);
6743 udelay(40);
6744
6745 tp->tx_mode &= ~TX_MODE_ENABLE;
6746 tw32_f(MAC_TX_MODE, tp->tx_mode);
6747
6748 for (i = 0; i < MAX_WAIT_CNT; i++) {
6749 udelay(100);
6750 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
6751 break;
6752 }
6753 if (i >= MAX_WAIT_CNT) {
6754 dev_err(&tp->pdev->dev,
6755 "%s timed out, TX_MODE_ENABLE will not clear "
6756 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
6757 err |= -ENODEV;
6758 }
6759
6760 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
6761 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
6762 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
6763
6764 tw32(FTQ_RESET, 0xffffffff);
6765 tw32(FTQ_RESET, 0x00000000);
6766
6767 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
6768 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
6769
6770 for (i = 0; i < tp->irq_cnt; i++) {
6771 struct tg3_napi *tnapi = &tp->napi[i];
6772 if (tnapi->hw_status)
6773 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6774 }
6775 if (tp->hw_stats)
6776 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
6777
6778 return err;
6779 }
6780
6781 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
6782 {
6783 int i;
6784 u32 apedata;
6785
6786 /* NCSI does not support APE events */
6787 if (tg3_flag(tp, APE_HAS_NCSI))
6788 return;
6789
6790 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
6791 if (apedata != APE_SEG_SIG_MAGIC)
6792 return;
6793
6794 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
6795 if (!(apedata & APE_FW_STATUS_READY))
6796 return;
6797
6798 /* Wait for up to 1 millisecond for APE to service previous event. */
6799 for (i = 0; i < 10; i++) {
6800 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
6801 return;
6802
6803 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
6804
6805 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6806 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
6807 event | APE_EVENT_STATUS_EVENT_PENDING);
6808
6809 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
6810
6811 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6812 break;
6813
6814 udelay(100);
6815 }
6816
6817 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6818 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
6819 }
6820
6821 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
6822 {
6823 u32 event;
6824 u32 apedata;
6825
6826 if (!tg3_flag(tp, ENABLE_APE))
6827 return;
6828
6829 switch (kind) {
6830 case RESET_KIND_INIT:
6831 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
6832 APE_HOST_SEG_SIG_MAGIC);
6833 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
6834 APE_HOST_SEG_LEN_MAGIC);
6835 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
6836 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
6837 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
6838 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
6839 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
6840 APE_HOST_BEHAV_NO_PHYLOCK);
6841 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
6842 TG3_APE_HOST_DRVR_STATE_START);
6843
6844 event = APE_EVENT_STATUS_STATE_START;
6845 break;
6846 case RESET_KIND_SHUTDOWN:
6847 /* With the interface we are currently using,
6848 * APE does not track driver state. Wiping
6849 * out the HOST SEGMENT SIGNATURE forces
6850 * the APE to assume OS absent status.
6851 */
6852 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
6853
6854 if (device_may_wakeup(&tp->pdev->dev) &&
6855 tg3_flag(tp, WOL_ENABLE)) {
6856 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
6857 TG3_APE_HOST_WOL_SPEED_AUTO);
6858 apedata = TG3_APE_HOST_DRVR_STATE_WOL;
6859 } else
6860 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
6861
6862 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
6863
6864 event = APE_EVENT_STATUS_STATE_UNLOAD;
6865 break;
6866 case RESET_KIND_SUSPEND:
6867 event = APE_EVENT_STATUS_STATE_SUSPEND;
6868 break;
6869 default:
6870 return;
6871 }
6872
6873 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
6874
6875 tg3_ape_send_event(tp, event);
6876 }
6877
6878 /* tp->lock is held. */
6879 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
6880 {
6881 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
6882 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
6883
6884 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
6885 switch (kind) {
6886 case RESET_KIND_INIT:
6887 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6888 DRV_STATE_START);
6889 break;
6890
6891 case RESET_KIND_SHUTDOWN:
6892 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6893 DRV_STATE_UNLOAD);
6894 break;
6895
6896 case RESET_KIND_SUSPEND:
6897 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6898 DRV_STATE_SUSPEND);
6899 break;
6900
6901 default:
6902 break;
6903 }
6904 }
6905
6906 if (kind == RESET_KIND_INIT ||
6907 kind == RESET_KIND_SUSPEND)
6908 tg3_ape_driver_state_change(tp, kind);
6909 }
6910
6911 /* tp->lock is held. */
6912 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
6913 {
6914 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
6915 switch (kind) {
6916 case RESET_KIND_INIT:
6917 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6918 DRV_STATE_START_DONE);
6919 break;
6920
6921 case RESET_KIND_SHUTDOWN:
6922 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6923 DRV_STATE_UNLOAD_DONE);
6924 break;
6925
6926 default:
6927 break;
6928 }
6929 }
6930
6931 if (kind == RESET_KIND_SHUTDOWN)
6932 tg3_ape_driver_state_change(tp, kind);
6933 }
6934
6935 /* tp->lock is held. */
6936 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
6937 {
6938 if (tg3_flag(tp, ENABLE_ASF)) {
6939 switch (kind) {
6940 case RESET_KIND_INIT:
6941 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6942 DRV_STATE_START);
6943 break;
6944
6945 case RESET_KIND_SHUTDOWN:
6946 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6947 DRV_STATE_UNLOAD);
6948 break;
6949
6950 case RESET_KIND_SUSPEND:
6951 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6952 DRV_STATE_SUSPEND);
6953 break;
6954
6955 default:
6956 break;
6957 }
6958 }
6959 }
6960
6961 static int tg3_poll_fw(struct tg3 *tp)
6962 {
6963 int i;
6964 u32 val;
6965
6966 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6967 /* Wait up to 20ms for init done. */
6968 for (i = 0; i < 200; i++) {
6969 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
6970 return 0;
6971 udelay(100);
6972 }
6973 return -ENODEV;
6974 }
6975
6976 /* Wait for firmware initialization to complete. */
6977 for (i = 0; i < 100000; i++) {
6978 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
6979 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
6980 break;
6981 udelay(10);
6982 }
6983
6984 /* Chip might not be fitted with firmware. Some Sun onboard
6985 * parts are configured like that. So don't signal the timeout
6986 * of the above loop as an error, but do report the lack of
6987 * running firmware once.
6988 */
6989 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
6990 tg3_flag_set(tp, NO_FWARE_REPORTED);
6991
6992 netdev_info(tp->dev, "No firmware running\n");
6993 }
6994
6995 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
6996 /* The 57765 A0 needs a little more
6997 * time to do some important work.
6998 */
6999 mdelay(10);
7000 }
7001
7002 return 0;
7003 }
7004
7005 /* Save PCI command register before chip reset */
7006 static void tg3_save_pci_state(struct tg3 *tp)
7007 {
7008 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
7009 }
7010
7011 /* Restore PCI state after chip reset */
7012 static void tg3_restore_pci_state(struct tg3 *tp)
7013 {
7014 u32 val;
7015
7016 /* Re-enable indirect register accesses. */
7017 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7018 tp->misc_host_ctrl);
7019
7020 /* Set MAX PCI retry to zero. */
7021 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
7022 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7023 tg3_flag(tp, PCIX_MODE))
7024 val |= PCISTATE_RETRY_SAME_DMA;
7025 /* Allow reads and writes to the APE register and memory space. */
7026 if (tg3_flag(tp, ENABLE_APE))
7027 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7028 PCISTATE_ALLOW_APE_SHMEM_WR |
7029 PCISTATE_ALLOW_APE_PSPACE_WR;
7030 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
7031
7032 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
7033
7034 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
7035 if (tg3_flag(tp, PCI_EXPRESS))
7036 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
7037 else {
7038 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
7039 tp->pci_cacheline_sz);
7040 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
7041 tp->pci_lat_timer);
7042 }
7043 }
7044
7045 /* Make sure PCI-X relaxed ordering bit is clear. */
7046 if (tg3_flag(tp, PCIX_MODE)) {
7047 u16 pcix_cmd;
7048
7049 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7050 &pcix_cmd);
7051 pcix_cmd &= ~PCI_X_CMD_ERO;
7052 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7053 pcix_cmd);
7054 }
7055
7056 if (tg3_flag(tp, 5780_CLASS)) {
7057
7058 /* Chip reset on 5780 will reset MSI enable bit,
7059 * so need to restore it.
7060 */
7061 if (tg3_flag(tp, USING_MSI)) {
7062 u16 ctrl;
7063
7064 pci_read_config_word(tp->pdev,
7065 tp->msi_cap + PCI_MSI_FLAGS,
7066 &ctrl);
7067 pci_write_config_word(tp->pdev,
7068 tp->msi_cap + PCI_MSI_FLAGS,
7069 ctrl | PCI_MSI_FLAGS_ENABLE);
7070 val = tr32(MSGINT_MODE);
7071 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
7072 }
7073 }
7074 }
7075
7076 static void tg3_stop_fw(struct tg3 *);
7077
7078 /* tp->lock is held. */
7079 static int tg3_chip_reset(struct tg3 *tp)
7080 {
7081 u32 val;
7082 void (*write_op)(struct tg3 *, u32, u32);
7083 int i, err;
7084
7085 tg3_nvram_lock(tp);
7086
7087 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
7088
7089 /* No matching tg3_nvram_unlock() after this because
7090 * chip reset below will undo the nvram lock.
7091 */
7092 tp->nvram_lock_cnt = 0;
7093
7094 /* GRC_MISC_CFG core clock reset will clear the memory
7095 * enable bit in PCI register 4 and the MSI enable bit
7096 * on some chips, so we save relevant registers here.
7097 */
7098 tg3_save_pci_state(tp);
7099
7100 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
7101 tg3_flag(tp, 5755_PLUS))
7102 tw32(GRC_FASTBOOT_PC, 0);
7103
7104 /*
7105 * We must avoid the readl() that normally takes place.
7106 * It locks machines, causes machine checks, and other
7107 * fun things. So, temporarily disable the 5701
7108 * hardware workaround, while we do the reset.
7109 */
7110 write_op = tp->write32;
7111 if (write_op == tg3_write_flush_reg32)
7112 tp->write32 = tg3_write32;
7113
7114 /* Prevent the irq handler from reading or writing PCI registers
7115 * during chip reset when the memory enable bit in the PCI command
7116 * register may be cleared. The chip does not generate interrupt
7117 * at this time, but the irq handler may still be called due to irq
7118 * sharing or irqpoll.
7119 */
7120 tg3_flag_set(tp, CHIP_RESETTING);
7121 for (i = 0; i < tp->irq_cnt; i++) {
7122 struct tg3_napi *tnapi = &tp->napi[i];
7123 if (tnapi->hw_status) {
7124 tnapi->hw_status->status = 0;
7125 tnapi->hw_status->status_tag = 0;
7126 }
7127 tnapi->last_tag = 0;
7128 tnapi->last_irq_tag = 0;
7129 }
7130 smp_mb();
7131
7132 for (i = 0; i < tp->irq_cnt; i++)
7133 synchronize_irq(tp->napi[i].irq_vec);
7134
7135 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7136 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7137 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7138 }
7139
7140 /* do the reset */
7141 val = GRC_MISC_CFG_CORECLK_RESET;
7142
7143 if (tg3_flag(tp, PCI_EXPRESS)) {
7144 /* Force PCIe 1.0a mode */
7145 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7146 !tg3_flag(tp, 57765_PLUS) &&
7147 tr32(TG3_PCIE_PHY_TSTCTL) ==
7148 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
7149 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
7150
7151 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
7152 tw32(GRC_MISC_CFG, (1 << 29));
7153 val |= (1 << 29);
7154 }
7155 }
7156
7157 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7158 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
7159 tw32(GRC_VCPU_EXT_CTRL,
7160 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
7161 }
7162
7163 /* Manage gphy power for all CPMU absent PCIe devices. */
7164 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
7165 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
7166
7167 tw32(GRC_MISC_CFG, val);
7168
7169 /* restore 5701 hardware bug workaround write method */
7170 tp->write32 = write_op;
7171
7172 /* Unfortunately, we have to delay before the PCI read back.
7173 * Some 575X chips even will not respond to a PCI cfg access
7174 * when the reset command is given to the chip.
7175 *
7176 * How do these hardware designers expect things to work
7177 * properly if the PCI write is posted for a long period
7178 * of time? It is always necessary to have some method by
7179 * which a register read back can occur to push the write
7180 * out which does the reset.
7181 *
7182 * For most tg3 variants the trick below was working.
7183 * Ho hum...
7184 */
7185 udelay(120);
7186
7187 /* Flush PCI posted writes. The normal MMIO registers
7188 * are inaccessible at this time so this is the only
7189 * way to make this reliably (actually, this is no longer
7190 * the case, see above). I tried to use indirect
7191 * register read/write but this upset some 5701 variants.
7192 */
7193 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
7194
7195 udelay(120);
7196
7197 if (tg3_flag(tp, PCI_EXPRESS) && tp->pcie_cap) {
7198 u16 val16;
7199
7200 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
7201 int i;
7202 u32 cfg_val;
7203
7204 /* Wait for link training to complete. */
7205 for (i = 0; i < 5000; i++)
7206 udelay(100);
7207
7208 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
7209 pci_write_config_dword(tp->pdev, 0xc4,
7210 cfg_val | (1 << 15));
7211 }
7212
7213 /* Clear the "no snoop" and "relaxed ordering" bits. */
7214 pci_read_config_word(tp->pdev,
7215 tp->pcie_cap + PCI_EXP_DEVCTL,
7216 &val16);
7217 val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
7218 PCI_EXP_DEVCTL_NOSNOOP_EN);
7219 /*
7220 * Older PCIe devices only support the 128 byte
7221 * MPS setting. Enforce the restriction.
7222 */
7223 if (!tg3_flag(tp, CPMU_PRESENT))
7224 val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
7225 pci_write_config_word(tp->pdev,
7226 tp->pcie_cap + PCI_EXP_DEVCTL,
7227 val16);
7228
7229 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
7230
7231 /* Clear error status */
7232 pci_write_config_word(tp->pdev,
7233 tp->pcie_cap + PCI_EXP_DEVSTA,
7234 PCI_EXP_DEVSTA_CED |
7235 PCI_EXP_DEVSTA_NFED |
7236 PCI_EXP_DEVSTA_FED |
7237 PCI_EXP_DEVSTA_URD);
7238 }
7239
7240 tg3_restore_pci_state(tp);
7241
7242 tg3_flag_clear(tp, CHIP_RESETTING);
7243 tg3_flag_clear(tp, ERROR_PROCESSED);
7244
7245 val = 0;
7246 if (tg3_flag(tp, 5780_CLASS))
7247 val = tr32(MEMARB_MODE);
7248 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
7249
7250 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
7251 tg3_stop_fw(tp);
7252 tw32(0x5000, 0x400);
7253 }
7254
7255 tw32(GRC_MODE, tp->grc_mode);
7256
7257 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
7258 val = tr32(0xc4);
7259
7260 tw32(0xc4, val | (1 << 15));
7261 }
7262
7263 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
7264 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7265 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
7266 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
7267 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
7268 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7269 }
7270
7271 if (tg3_flag(tp, ENABLE_APE))
7272 tp->mac_mode = MAC_MODE_APE_TX_EN |
7273 MAC_MODE_APE_RX_EN |
7274 MAC_MODE_TDE_ENABLE;
7275
7276 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
7277 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
7278 val = tp->mac_mode;
7279 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
7280 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7281 val = tp->mac_mode;
7282 } else
7283 val = 0;
7284
7285 tw32_f(MAC_MODE, val);
7286 udelay(40);
7287
7288 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
7289
7290 err = tg3_poll_fw(tp);
7291 if (err)
7292 return err;
7293
7294 tg3_mdio_start(tp);
7295
7296 if (tg3_flag(tp, PCI_EXPRESS) &&
7297 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
7298 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7299 !tg3_flag(tp, 57765_PLUS)) {
7300 val = tr32(0x7c00);
7301
7302 tw32(0x7c00, val | (1 << 25));
7303 }
7304
7305 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
7306 val = tr32(TG3_CPMU_CLCK_ORIDE);
7307 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
7308 }
7309
7310 /* Reprobe ASF enable state. */
7311 tg3_flag_clear(tp, ENABLE_ASF);
7312 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
7313 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
7314 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
7315 u32 nic_cfg;
7316
7317 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
7318 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
7319 tg3_flag_set(tp, ENABLE_ASF);
7320 tp->last_event_jiffies = jiffies;
7321 if (tg3_flag(tp, 5750_PLUS))
7322 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
7323 }
7324 }
7325
7326 return 0;
7327 }
7328
7329 /* tp->lock is held. */
7330 static void tg3_stop_fw(struct tg3 *tp)
7331 {
7332 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
7333 /* Wait for RX cpu to ACK the previous event. */
7334 tg3_wait_for_event_ack(tp);
7335
7336 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
7337
7338 tg3_generate_fw_event(tp);
7339
7340 /* Wait for RX cpu to ACK this event. */
7341 tg3_wait_for_event_ack(tp);
7342 }
7343 }
7344
7345 /* tp->lock is held. */
7346 static int tg3_halt(struct tg3 *tp, int kind, int silent)
7347 {
7348 int err;
7349
7350 tg3_stop_fw(tp);
7351
7352 tg3_write_sig_pre_reset(tp, kind);
7353
7354 tg3_abort_hw(tp, silent);
7355 err = tg3_chip_reset(tp);
7356
7357 __tg3_set_mac_addr(tp, 0);
7358
7359 tg3_write_sig_legacy(tp, kind);
7360 tg3_write_sig_post_reset(tp, kind);
7361
7362 if (err)
7363 return err;
7364
7365 return 0;
7366 }
7367
7368 #define RX_CPU_SCRATCH_BASE 0x30000
7369 #define RX_CPU_SCRATCH_SIZE 0x04000
7370 #define TX_CPU_SCRATCH_BASE 0x34000
7371 #define TX_CPU_SCRATCH_SIZE 0x04000
7372
7373 /* tp->lock is held. */
7374 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
7375 {
7376 int i;
7377
7378 BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
7379
7380 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7381 u32 val = tr32(GRC_VCPU_EXT_CTRL);
7382
7383 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
7384 return 0;
7385 }
7386 if (offset == RX_CPU_BASE) {
7387 for (i = 0; i < 10000; i++) {
7388 tw32(offset + CPU_STATE, 0xffffffff);
7389 tw32(offset + CPU_MODE, CPU_MODE_HALT);
7390 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
7391 break;
7392 }
7393
7394 tw32(offset + CPU_STATE, 0xffffffff);
7395 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
7396 udelay(10);
7397 } else {
7398 for (i = 0; i < 10000; i++) {
7399 tw32(offset + CPU_STATE, 0xffffffff);
7400 tw32(offset + CPU_MODE, CPU_MODE_HALT);
7401 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
7402 break;
7403 }
7404 }
7405
7406 if (i >= 10000) {
7407 netdev_err(tp->dev, "%s timed out, %s CPU\n",
7408 __func__, offset == RX_CPU_BASE ? "RX" : "TX");
7409 return -ENODEV;
7410 }
7411
7412 /* Clear firmware's nvram arbitration. */
7413 if (tg3_flag(tp, NVRAM))
7414 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
7415 return 0;
7416 }
7417
7418 struct fw_info {
7419 unsigned int fw_base;
7420 unsigned int fw_len;
7421 const __be32 *fw_data;
7422 };
7423
7424 /* tp->lock is held. */
7425 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
7426 int cpu_scratch_size, struct fw_info *info)
7427 {
7428 int err, lock_err, i;
7429 void (*write_op)(struct tg3 *, u32, u32);
7430
7431 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
7432 netdev_err(tp->dev,
7433 "%s: Trying to load TX cpu firmware which is 5705\n",
7434 __func__);
7435 return -EINVAL;
7436 }
7437
7438 if (tg3_flag(tp, 5705_PLUS))
7439 write_op = tg3_write_mem;
7440 else
7441 write_op = tg3_write_indirect_reg32;
7442
7443 /* It is possible that bootcode is still loading at this point.
7444 * Get the nvram lock first before halting the cpu.
7445 */
7446 lock_err = tg3_nvram_lock(tp);
7447 err = tg3_halt_cpu(tp, cpu_base);
7448 if (!lock_err)
7449 tg3_nvram_unlock(tp);
7450 if (err)
7451 goto out;
7452
7453 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
7454 write_op(tp, cpu_scratch_base + i, 0);
7455 tw32(cpu_base + CPU_STATE, 0xffffffff);
7456 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
7457 for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
7458 write_op(tp, (cpu_scratch_base +
7459 (info->fw_base & 0xffff) +
7460 (i * sizeof(u32))),
7461 be32_to_cpu(info->fw_data[i]));
7462
7463 err = 0;
7464
7465 out:
7466 return err;
7467 }
7468
7469 /* tp->lock is held. */
7470 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
7471 {
7472 struct fw_info info;
7473 const __be32 *fw_data;
7474 int err, i;
7475
7476 fw_data = (void *)tp->fw->data;
7477
7478 /* Firmware blob starts with version numbers, followed by
7479 start address and length. We are setting complete length.
7480 length = end_address_of_bss - start_address_of_text.
7481 Remainder is the blob to be loaded contiguously
7482 from start address. */
7483
7484 info.fw_base = be32_to_cpu(fw_data[1]);
7485 info.fw_len = tp->fw->size - 12;
7486 info.fw_data = &fw_data[3];
7487
7488 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
7489 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
7490 &info);
7491 if (err)
7492 return err;
7493
7494 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
7495 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
7496 &info);
7497 if (err)
7498 return err;
7499
7500 /* Now startup only the RX cpu. */
7501 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7502 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
7503
7504 for (i = 0; i < 5; i++) {
7505 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
7506 break;
7507 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7508 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
7509 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
7510 udelay(1000);
7511 }
7512 if (i >= 5) {
7513 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
7514 "should be %08x\n", __func__,
7515 tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
7516 return -ENODEV;
7517 }
7518 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7519 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
7520
7521 return 0;
7522 }
7523
7524 /* tp->lock is held. */
7525 static int tg3_load_tso_firmware(struct tg3 *tp)
7526 {
7527 struct fw_info info;
7528 const __be32 *fw_data;
7529 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
7530 int err, i;
7531
7532 if (tg3_flag(tp, HW_TSO_1) ||
7533 tg3_flag(tp, HW_TSO_2) ||
7534 tg3_flag(tp, HW_TSO_3))
7535 return 0;
7536
7537 fw_data = (void *)tp->fw->data;
7538
7539 /* Firmware blob starts with version numbers, followed by
7540 start address and length. We are setting complete length.
7541 length = end_address_of_bss - start_address_of_text.
7542 Remainder is the blob to be loaded contiguously
7543 from start address. */
7544
7545 info.fw_base = be32_to_cpu(fw_data[1]);
7546 cpu_scratch_size = tp->fw_len;
7547 info.fw_len = tp->fw->size - 12;
7548 info.fw_data = &fw_data[3];
7549
7550 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7551 cpu_base = RX_CPU_BASE;
7552 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
7553 } else {
7554 cpu_base = TX_CPU_BASE;
7555 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
7556 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
7557 }
7558
7559 err = tg3_load_firmware_cpu(tp, cpu_base,
7560 cpu_scratch_base, cpu_scratch_size,
7561 &info);
7562 if (err)
7563 return err;
7564
7565 /* Now startup the cpu. */
7566 tw32(cpu_base + CPU_STATE, 0xffffffff);
7567 tw32_f(cpu_base + CPU_PC, info.fw_base);
7568
7569 for (i = 0; i < 5; i++) {
7570 if (tr32(cpu_base + CPU_PC) == info.fw_base)
7571 break;
7572 tw32(cpu_base + CPU_STATE, 0xffffffff);
7573 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
7574 tw32_f(cpu_base + CPU_PC, info.fw_base);
7575 udelay(1000);
7576 }
7577 if (i >= 5) {
7578 netdev_err(tp->dev,
7579 "%s fails to set CPU PC, is %08x should be %08x\n",
7580 __func__, tr32(cpu_base + CPU_PC), info.fw_base);
7581 return -ENODEV;
7582 }
7583 tw32(cpu_base + CPU_STATE, 0xffffffff);
7584 tw32_f(cpu_base + CPU_MODE, 0x00000000);
7585 return 0;
7586 }
7587
7588
7589 static int tg3_set_mac_addr(struct net_device *dev, void *p)
7590 {
7591 struct tg3 *tp = netdev_priv(dev);
7592 struct sockaddr *addr = p;
7593 int err = 0, skip_mac_1 = 0;
7594
7595 if (!is_valid_ether_addr(addr->sa_data))
7596 return -EINVAL;
7597
7598 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7599
7600 if (!netif_running(dev))
7601 return 0;
7602
7603 if (tg3_flag(tp, ENABLE_ASF)) {
7604 u32 addr0_high, addr0_low, addr1_high, addr1_low;
7605
7606 addr0_high = tr32(MAC_ADDR_0_HIGH);
7607 addr0_low = tr32(MAC_ADDR_0_LOW);
7608 addr1_high = tr32(MAC_ADDR_1_HIGH);
7609 addr1_low = tr32(MAC_ADDR_1_LOW);
7610
7611 /* Skip MAC addr 1 if ASF is using it. */
7612 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
7613 !(addr1_high == 0 && addr1_low == 0))
7614 skip_mac_1 = 1;
7615 }
7616 spin_lock_bh(&tp->lock);
7617 __tg3_set_mac_addr(tp, skip_mac_1);
7618 spin_unlock_bh(&tp->lock);
7619
7620 return err;
7621 }
7622
7623 /* tp->lock is held. */
7624 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
7625 dma_addr_t mapping, u32 maxlen_flags,
7626 u32 nic_addr)
7627 {
7628 tg3_write_mem(tp,
7629 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
7630 ((u64) mapping >> 32));
7631 tg3_write_mem(tp,
7632 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
7633 ((u64) mapping & 0xffffffff));
7634 tg3_write_mem(tp,
7635 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
7636 maxlen_flags);
7637
7638 if (!tg3_flag(tp, 5705_PLUS))
7639 tg3_write_mem(tp,
7640 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
7641 nic_addr);
7642 }
7643
7644 static void __tg3_set_rx_mode(struct net_device *);
7645 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
7646 {
7647 int i;
7648
7649 if (!tg3_flag(tp, ENABLE_TSS)) {
7650 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
7651 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
7652 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
7653 } else {
7654 tw32(HOSTCC_TXCOL_TICKS, 0);
7655 tw32(HOSTCC_TXMAX_FRAMES, 0);
7656 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
7657 }
7658
7659 if (!tg3_flag(tp, ENABLE_RSS)) {
7660 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
7661 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
7662 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
7663 } else {
7664 tw32(HOSTCC_RXCOL_TICKS, 0);
7665 tw32(HOSTCC_RXMAX_FRAMES, 0);
7666 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
7667 }
7668
7669 if (!tg3_flag(tp, 5705_PLUS)) {
7670 u32 val = ec->stats_block_coalesce_usecs;
7671
7672 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
7673 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
7674
7675 if (!netif_carrier_ok(tp->dev))
7676 val = 0;
7677
7678 tw32(HOSTCC_STAT_COAL_TICKS, val);
7679 }
7680
7681 for (i = 0; i < tp->irq_cnt - 1; i++) {
7682 u32 reg;
7683
7684 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
7685 tw32(reg, ec->rx_coalesce_usecs);
7686 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
7687 tw32(reg, ec->rx_max_coalesced_frames);
7688 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
7689 tw32(reg, ec->rx_max_coalesced_frames_irq);
7690
7691 if (tg3_flag(tp, ENABLE_TSS)) {
7692 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
7693 tw32(reg, ec->tx_coalesce_usecs);
7694 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
7695 tw32(reg, ec->tx_max_coalesced_frames);
7696 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
7697 tw32(reg, ec->tx_max_coalesced_frames_irq);
7698 }
7699 }
7700
7701 for (; i < tp->irq_max - 1; i++) {
7702 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
7703 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
7704 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
7705
7706 if (tg3_flag(tp, ENABLE_TSS)) {
7707 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
7708 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
7709 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
7710 }
7711 }
7712 }
7713
7714 /* tp->lock is held. */
7715 static void tg3_rings_reset(struct tg3 *tp)
7716 {
7717 int i;
7718 u32 stblk, txrcb, rxrcb, limit;
7719 struct tg3_napi *tnapi = &tp->napi[0];
7720
7721 /* Disable all transmit rings but the first. */
7722 if (!tg3_flag(tp, 5705_PLUS))
7723 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
7724 else if (tg3_flag(tp, 5717_PLUS))
7725 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
7726 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7727 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
7728 else
7729 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
7730
7731 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
7732 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
7733 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
7734 BDINFO_FLAGS_DISABLED);
7735
7736
7737 /* Disable all receive return rings but the first. */
7738 if (tg3_flag(tp, 5717_PLUS))
7739 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
7740 else if (!tg3_flag(tp, 5705_PLUS))
7741 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
7742 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7743 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7744 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
7745 else
7746 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
7747
7748 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
7749 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
7750 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
7751 BDINFO_FLAGS_DISABLED);
7752
7753 /* Disable interrupts */
7754 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
7755 tp->napi[0].chk_msi_cnt = 0;
7756 tp->napi[0].last_rx_cons = 0;
7757 tp->napi[0].last_tx_cons = 0;
7758
7759 /* Zero mailbox registers. */
7760 if (tg3_flag(tp, SUPPORT_MSIX)) {
7761 for (i = 1; i < tp->irq_max; i++) {
7762 tp->napi[i].tx_prod = 0;
7763 tp->napi[i].tx_cons = 0;
7764 if (tg3_flag(tp, ENABLE_TSS))
7765 tw32_mailbox(tp->napi[i].prodmbox, 0);
7766 tw32_rx_mbox(tp->napi[i].consmbox, 0);
7767 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
7768 tp->napi[0].chk_msi_cnt = 0;
7769 tp->napi[i].last_rx_cons = 0;
7770 tp->napi[i].last_tx_cons = 0;
7771 }
7772 if (!tg3_flag(tp, ENABLE_TSS))
7773 tw32_mailbox(tp->napi[0].prodmbox, 0);
7774 } else {
7775 tp->napi[0].tx_prod = 0;
7776 tp->napi[0].tx_cons = 0;
7777 tw32_mailbox(tp->napi[0].prodmbox, 0);
7778 tw32_rx_mbox(tp->napi[0].consmbox, 0);
7779 }
7780
7781 /* Make sure the NIC-based send BD rings are disabled. */
7782 if (!tg3_flag(tp, 5705_PLUS)) {
7783 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
7784 for (i = 0; i < 16; i++)
7785 tw32_tx_mbox(mbox + i * 8, 0);
7786 }
7787
7788 txrcb = NIC_SRAM_SEND_RCB;
7789 rxrcb = NIC_SRAM_RCV_RET_RCB;
7790
7791 /* Clear status block in ram. */
7792 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7793
7794 /* Set status block DMA address */
7795 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7796 ((u64) tnapi->status_mapping >> 32));
7797 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7798 ((u64) tnapi->status_mapping & 0xffffffff));
7799
7800 if (tnapi->tx_ring) {
7801 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
7802 (TG3_TX_RING_SIZE <<
7803 BDINFO_FLAGS_MAXLEN_SHIFT),
7804 NIC_SRAM_TX_BUFFER_DESC);
7805 txrcb += TG3_BDINFO_SIZE;
7806 }
7807
7808 if (tnapi->rx_rcb) {
7809 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7810 (tp->rx_ret_ring_mask + 1) <<
7811 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
7812 rxrcb += TG3_BDINFO_SIZE;
7813 }
7814
7815 stblk = HOSTCC_STATBLCK_RING1;
7816
7817 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
7818 u64 mapping = (u64)tnapi->status_mapping;
7819 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
7820 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
7821
7822 /* Clear status block in ram. */
7823 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7824
7825 if (tnapi->tx_ring) {
7826 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
7827 (TG3_TX_RING_SIZE <<
7828 BDINFO_FLAGS_MAXLEN_SHIFT),
7829 NIC_SRAM_TX_BUFFER_DESC);
7830 txrcb += TG3_BDINFO_SIZE;
7831 }
7832
7833 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7834 ((tp->rx_ret_ring_mask + 1) <<
7835 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
7836
7837 stblk += 8;
7838 rxrcb += TG3_BDINFO_SIZE;
7839 }
7840 }
7841
7842 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
7843 {
7844 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
7845
7846 if (!tg3_flag(tp, 5750_PLUS) ||
7847 tg3_flag(tp, 5780_CLASS) ||
7848 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
7849 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
7850 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
7851 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7852 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
7853 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
7854 else
7855 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
7856
7857 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
7858 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
7859
7860 val = min(nic_rep_thresh, host_rep_thresh);
7861 tw32(RCVBDI_STD_THRESH, val);
7862
7863 if (tg3_flag(tp, 57765_PLUS))
7864 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
7865
7866 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
7867 return;
7868
7869 if (!tg3_flag(tp, 5705_PLUS))
7870 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
7871 else
7872 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5717;
7873
7874 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
7875
7876 val = min(bdcache_maxcnt / 2, host_rep_thresh);
7877 tw32(RCVBDI_JUMBO_THRESH, val);
7878
7879 if (tg3_flag(tp, 57765_PLUS))
7880 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
7881 }
7882
7883 /* tp->lock is held. */
7884 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7885 {
7886 u32 val, rdmac_mode;
7887 int i, err, limit;
7888 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
7889
7890 tg3_disable_ints(tp);
7891
7892 tg3_stop_fw(tp);
7893
7894 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
7895
7896 if (tg3_flag(tp, INIT_COMPLETE))
7897 tg3_abort_hw(tp, 1);
7898
7899 /* Enable MAC control of LPI */
7900 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
7901 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
7902 TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
7903 TG3_CPMU_EEE_LNKIDL_UART_IDL);
7904
7905 tw32_f(TG3_CPMU_EEE_CTRL,
7906 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
7907
7908 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
7909 TG3_CPMU_EEEMD_LPI_IN_TX |
7910 TG3_CPMU_EEEMD_LPI_IN_RX |
7911 TG3_CPMU_EEEMD_EEE_ENABLE;
7912
7913 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
7914 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
7915
7916 if (tg3_flag(tp, ENABLE_APE))
7917 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
7918
7919 tw32_f(TG3_CPMU_EEE_MODE, val);
7920
7921 tw32_f(TG3_CPMU_EEE_DBTMR1,
7922 TG3_CPMU_DBTMR1_PCIEXIT_2047US |
7923 TG3_CPMU_DBTMR1_LNKIDLE_2047US);
7924
7925 tw32_f(TG3_CPMU_EEE_DBTMR2,
7926 TG3_CPMU_DBTMR2_APE_TX_2047US |
7927 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
7928 }
7929
7930 if (reset_phy)
7931 tg3_phy_reset(tp);
7932
7933 err = tg3_chip_reset(tp);
7934 if (err)
7935 return err;
7936
7937 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
7938
7939 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
7940 val = tr32(TG3_CPMU_CTRL);
7941 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
7942 tw32(TG3_CPMU_CTRL, val);
7943
7944 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
7945 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
7946 val |= CPMU_LSPD_10MB_MACCLK_6_25;
7947 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
7948
7949 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
7950 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
7951 val |= CPMU_LNK_AWARE_MACCLK_6_25;
7952 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
7953
7954 val = tr32(TG3_CPMU_HST_ACC);
7955 val &= ~CPMU_HST_ACC_MACCLK_MASK;
7956 val |= CPMU_HST_ACC_MACCLK_6_25;
7957 tw32(TG3_CPMU_HST_ACC, val);
7958 }
7959
7960 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7961 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
7962 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
7963 PCIE_PWR_MGMT_L1_THRESH_4MS;
7964 tw32(PCIE_PWR_MGMT_THRESH, val);
7965
7966 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
7967 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
7968
7969 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
7970
7971 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7972 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7973 }
7974
7975 if (tg3_flag(tp, L1PLLPD_EN)) {
7976 u32 grc_mode = tr32(GRC_MODE);
7977
7978 /* Access the lower 1K of PL PCIE block registers. */
7979 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
7980 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
7981
7982 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
7983 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
7984 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
7985
7986 tw32(GRC_MODE, grc_mode);
7987 }
7988
7989 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
7990 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
7991 u32 grc_mode = tr32(GRC_MODE);
7992
7993 /* Access the lower 1K of PL PCIE block registers. */
7994 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
7995 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
7996
7997 val = tr32(TG3_PCIE_TLDLPL_PORT +
7998 TG3_PCIE_PL_LO_PHYCTL5);
7999 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
8000 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
8001
8002 tw32(GRC_MODE, grc_mode);
8003 }
8004
8005 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
8006 u32 grc_mode = tr32(GRC_MODE);
8007
8008 /* Access the lower 1K of DL PCIE block registers. */
8009 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8010 tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
8011
8012 val = tr32(TG3_PCIE_TLDLPL_PORT +
8013 TG3_PCIE_DL_LO_FTSMAX);
8014 val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
8015 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
8016 val | TG3_PCIE_DL_LO_FTSMAX_VAL);
8017
8018 tw32(GRC_MODE, grc_mode);
8019 }
8020
8021 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8022 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8023 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8024 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8025 }
8026
8027 /* This works around an issue with Athlon chipsets on
8028 * B3 tigon3 silicon. This bit has no effect on any
8029 * other revision. But do not set this on PCI Express
8030 * chips and don't even touch the clocks if the CPMU is present.
8031 */
8032 if (!tg3_flag(tp, CPMU_PRESENT)) {
8033 if (!tg3_flag(tp, PCI_EXPRESS))
8034 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
8035 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8036 }
8037
8038 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
8039 tg3_flag(tp, PCIX_MODE)) {
8040 val = tr32(TG3PCI_PCISTATE);
8041 val |= PCISTATE_RETRY_SAME_DMA;
8042 tw32(TG3PCI_PCISTATE, val);
8043 }
8044
8045 if (tg3_flag(tp, ENABLE_APE)) {
8046 /* Allow reads and writes to the
8047 * APE register and memory space.
8048 */
8049 val = tr32(TG3PCI_PCISTATE);
8050 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8051 PCISTATE_ALLOW_APE_SHMEM_WR |
8052 PCISTATE_ALLOW_APE_PSPACE_WR;
8053 tw32(TG3PCI_PCISTATE, val);
8054 }
8055
8056 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
8057 /* Enable some hw fixes. */
8058 val = tr32(TG3PCI_MSI_DATA);
8059 val |= (1 << 26) | (1 << 28) | (1 << 29);
8060 tw32(TG3PCI_MSI_DATA, val);
8061 }
8062
8063 /* Descriptor ring init may make accesses to the
8064 * NIC SRAM area to setup the TX descriptors, so we
8065 * can only do this after the hardware has been
8066 * successfully reset.
8067 */
8068 err = tg3_init_rings(tp);
8069 if (err)
8070 return err;
8071
8072 if (tg3_flag(tp, 57765_PLUS)) {
8073 val = tr32(TG3PCI_DMA_RW_CTRL) &
8074 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
8075 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
8076 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
8077 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765 &&
8078 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8079 val |= DMA_RWCTRL_TAGGED_STAT_WA;
8080 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
8081 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
8082 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
8083 /* This value is determined during the probe time DMA
8084 * engine test, tg3_test_dma.
8085 */
8086 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8087 }
8088
8089 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
8090 GRC_MODE_4X_NIC_SEND_RINGS |
8091 GRC_MODE_NO_TX_PHDR_CSUM |
8092 GRC_MODE_NO_RX_PHDR_CSUM);
8093 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
8094
8095 /* Pseudo-header checksum is done by hardware logic and not
8096 * the offload processers, so make the chip do the pseudo-
8097 * header checksums on receive. For transmit it is more
8098 * convenient to do the pseudo-header checksum in software
8099 * as Linux does that on transmit for us in all cases.
8100 */
8101 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
8102
8103 tw32(GRC_MODE,
8104 tp->grc_mode |
8105 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
8106
8107 /* Setup the timer prescalar register. Clock is always 66Mhz. */
8108 val = tr32(GRC_MISC_CFG);
8109 val &= ~0xff;
8110 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
8111 tw32(GRC_MISC_CFG, val);
8112
8113 /* Initialize MBUF/DESC pool. */
8114 if (tg3_flag(tp, 5750_PLUS)) {
8115 /* Do nothing. */
8116 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
8117 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
8118 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8119 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
8120 else
8121 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
8122 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
8123 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
8124 } else if (tg3_flag(tp, TSO_CAPABLE)) {
8125 int fw_len;
8126
8127 fw_len = tp->fw_len;
8128 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
8129 tw32(BUFMGR_MB_POOL_ADDR,
8130 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
8131 tw32(BUFMGR_MB_POOL_SIZE,
8132 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
8133 }
8134
8135 if (tp->dev->mtu <= ETH_DATA_LEN) {
8136 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8137 tp->bufmgr_config.mbuf_read_dma_low_water);
8138 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8139 tp->bufmgr_config.mbuf_mac_rx_low_water);
8140 tw32(BUFMGR_MB_HIGH_WATER,
8141 tp->bufmgr_config.mbuf_high_water);
8142 } else {
8143 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8144 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
8145 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8146 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
8147 tw32(BUFMGR_MB_HIGH_WATER,
8148 tp->bufmgr_config.mbuf_high_water_jumbo);
8149 }
8150 tw32(BUFMGR_DMA_LOW_WATER,
8151 tp->bufmgr_config.dma_low_water);
8152 tw32(BUFMGR_DMA_HIGH_WATER,
8153 tp->bufmgr_config.dma_high_water);
8154
8155 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
8156 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8157 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
8158 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8159 tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8160 tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
8161 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
8162 tw32(BUFMGR_MODE, val);
8163 for (i = 0; i < 2000; i++) {
8164 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
8165 break;
8166 udelay(10);
8167 }
8168 if (i >= 2000) {
8169 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
8170 return -ENODEV;
8171 }
8172
8173 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
8174 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
8175
8176 tg3_setup_rxbd_thresholds(tp);
8177
8178 /* Initialize TG3_BDINFO's at:
8179 * RCVDBDI_STD_BD: standard eth size rx ring
8180 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
8181 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
8182 *
8183 * like so:
8184 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
8185 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
8186 * ring attribute flags
8187 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
8188 *
8189 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
8190 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
8191 *
8192 * The size of each ring is fixed in the firmware, but the location is
8193 * configurable.
8194 */
8195 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8196 ((u64) tpr->rx_std_mapping >> 32));
8197 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8198 ((u64) tpr->rx_std_mapping & 0xffffffff));
8199 if (!tg3_flag(tp, 5717_PLUS))
8200 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
8201 NIC_SRAM_RX_BUFFER_DESC);
8202
8203 /* Disable the mini ring */
8204 if (!tg3_flag(tp, 5705_PLUS))
8205 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
8206 BDINFO_FLAGS_DISABLED);
8207
8208 /* Program the jumbo buffer descriptor ring control
8209 * blocks on those devices that have them.
8210 */
8211 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8212 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
8213
8214 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
8215 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8216 ((u64) tpr->rx_jmb_mapping >> 32));
8217 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8218 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
8219 val = TG3_RX_JMB_RING_SIZE(tp) <<
8220 BDINFO_FLAGS_MAXLEN_SHIFT;
8221 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8222 val | BDINFO_FLAGS_USE_EXT_RECV);
8223 if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
8224 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8225 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
8226 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
8227 } else {
8228 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8229 BDINFO_FLAGS_DISABLED);
8230 }
8231
8232 if (tg3_flag(tp, 57765_PLUS)) {
8233 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8234 val = TG3_RX_STD_MAX_SIZE_5700;
8235 else
8236 val = TG3_RX_STD_MAX_SIZE_5717;
8237 val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
8238 val |= (TG3_RX_STD_DMA_SZ << 2);
8239 } else
8240 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
8241 } else
8242 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
8243
8244 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
8245
8246 tpr->rx_std_prod_idx = tp->rx_pending;
8247 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
8248
8249 tpr->rx_jmb_prod_idx =
8250 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
8251 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
8252
8253 tg3_rings_reset(tp);
8254
8255 /* Initialize MAC address and backoff seed. */
8256 __tg3_set_mac_addr(tp, 0);
8257
8258 /* MTU + ethernet header + FCS + optional VLAN tag */
8259 tw32(MAC_RX_MTU_SIZE,
8260 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
8261
8262 /* The slot time is changed by tg3_setup_phy if we
8263 * run at gigabit with half duplex.
8264 */
8265 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
8266 (6 << TX_LENGTHS_IPG_SHIFT) |
8267 (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
8268
8269 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8270 val |= tr32(MAC_TX_LENGTHS) &
8271 (TX_LENGTHS_JMB_FRM_LEN_MSK |
8272 TX_LENGTHS_CNT_DWN_VAL_MSK);
8273
8274 tw32(MAC_TX_LENGTHS, val);
8275
8276 /* Receive rules. */
8277 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
8278 tw32(RCVLPC_CONFIG, 0x0181);
8279
8280 /* Calculate RDMAC_MODE setting early, we need it to determine
8281 * the RCVLPC_STATE_ENABLE mask.
8282 */
8283 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
8284 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
8285 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
8286 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
8287 RDMAC_MODE_LNGREAD_ENAB);
8288
8289 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
8290 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
8291
8292 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8293 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8294 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8295 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
8296 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
8297 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
8298
8299 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8300 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8301 if (tg3_flag(tp, TSO_CAPABLE) &&
8302 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8303 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
8304 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8305 !tg3_flag(tp, IS_5788)) {
8306 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8307 }
8308 }
8309
8310 if (tg3_flag(tp, PCI_EXPRESS))
8311 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8312
8313 if (tg3_flag(tp, HW_TSO_1) ||
8314 tg3_flag(tp, HW_TSO_2) ||
8315 tg3_flag(tp, HW_TSO_3))
8316 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
8317
8318 if (tg3_flag(tp, 57765_PLUS) ||
8319 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8320 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8321 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
8322
8323 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8324 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
8325
8326 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
8327 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8328 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8329 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
8330 tg3_flag(tp, 57765_PLUS)) {
8331 val = tr32(TG3_RDMA_RSRVCTRL_REG);
8332 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8333 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8334 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
8335 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
8336 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
8337 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
8338 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
8339 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
8340 }
8341 tw32(TG3_RDMA_RSRVCTRL_REG,
8342 val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
8343 }
8344
8345 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8346 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8347 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
8348 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
8349 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
8350 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
8351 }
8352
8353 /* Receive/send statistics. */
8354 if (tg3_flag(tp, 5750_PLUS)) {
8355 val = tr32(RCVLPC_STATS_ENABLE);
8356 val &= ~RCVLPC_STATSENAB_DACK_FIX;
8357 tw32(RCVLPC_STATS_ENABLE, val);
8358 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
8359 tg3_flag(tp, TSO_CAPABLE)) {
8360 val = tr32(RCVLPC_STATS_ENABLE);
8361 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
8362 tw32(RCVLPC_STATS_ENABLE, val);
8363 } else {
8364 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
8365 }
8366 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
8367 tw32(SNDDATAI_STATSENAB, 0xffffff);
8368 tw32(SNDDATAI_STATSCTRL,
8369 (SNDDATAI_SCTRL_ENABLE |
8370 SNDDATAI_SCTRL_FASTUPD));
8371
8372 /* Setup host coalescing engine. */
8373 tw32(HOSTCC_MODE, 0);
8374 for (i = 0; i < 2000; i++) {
8375 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
8376 break;
8377 udelay(10);
8378 }
8379
8380 __tg3_set_coalesce(tp, &tp->coal);
8381
8382 if (!tg3_flag(tp, 5705_PLUS)) {
8383 /* Status/statistics block address. See tg3_timer,
8384 * the tg3_periodic_fetch_stats call there, and
8385 * tg3_get_stats to see how this works for 5705/5750 chips.
8386 */
8387 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8388 ((u64) tp->stats_mapping >> 32));
8389 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8390 ((u64) tp->stats_mapping & 0xffffffff));
8391 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
8392
8393 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
8394
8395 /* Clear statistics and status block memory areas */
8396 for (i = NIC_SRAM_STATS_BLK;
8397 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
8398 i += sizeof(u32)) {
8399 tg3_write_mem(tp, i, 0);
8400 udelay(40);
8401 }
8402 }
8403
8404 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
8405
8406 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
8407 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
8408 if (!tg3_flag(tp, 5705_PLUS))
8409 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
8410
8411 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8412 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
8413 /* reset to prevent losing 1st rx packet intermittently */
8414 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8415 udelay(10);
8416 }
8417
8418 if (tg3_flag(tp, ENABLE_APE))
8419 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
8420 else
8421 tp->mac_mode = 0;
8422 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
8423 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
8424 if (!tg3_flag(tp, 5705_PLUS) &&
8425 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
8426 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
8427 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8428 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
8429 udelay(40);
8430
8431 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
8432 * If TG3_FLAG_IS_NIC is zero, we should read the
8433 * register to preserve the GPIO settings for LOMs. The GPIOs,
8434 * whether used as inputs or outputs, are set by boot code after
8435 * reset.
8436 */
8437 if (!tg3_flag(tp, IS_NIC)) {
8438 u32 gpio_mask;
8439
8440 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
8441 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
8442 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
8443
8444 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8445 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
8446 GRC_LCLCTRL_GPIO_OUTPUT3;
8447
8448 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
8449 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
8450
8451 tp->grc_local_ctrl &= ~gpio_mask;
8452 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
8453
8454 /* GPIO1 must be driven high for eeprom write protect */
8455 if (tg3_flag(tp, EEPROM_WRITE_PROT))
8456 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
8457 GRC_LCLCTRL_GPIO_OUTPUT1);
8458 }
8459 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8460 udelay(100);
8461
8462 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1) {
8463 val = tr32(MSGINT_MODE);
8464 val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE;
8465 tw32(MSGINT_MODE, val);
8466 }
8467
8468 if (!tg3_flag(tp, 5705_PLUS)) {
8469 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
8470 udelay(40);
8471 }
8472
8473 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
8474 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
8475 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
8476 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
8477 WDMAC_MODE_LNGREAD_ENAB);
8478
8479 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8480 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8481 if (tg3_flag(tp, TSO_CAPABLE) &&
8482 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
8483 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
8484 /* nothing */
8485 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8486 !tg3_flag(tp, IS_5788)) {
8487 val |= WDMAC_MODE_RX_ACCEL;
8488 }
8489 }
8490
8491 /* Enable host coalescing bug fix */
8492 if (tg3_flag(tp, 5755_PLUS))
8493 val |= WDMAC_MODE_STATUS_TAG_FIX;
8494
8495 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
8496 val |= WDMAC_MODE_BURST_ALL_DATA;
8497
8498 tw32_f(WDMAC_MODE, val);
8499 udelay(40);
8500
8501 if (tg3_flag(tp, PCIX_MODE)) {
8502 u16 pcix_cmd;
8503
8504 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8505 &pcix_cmd);
8506 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
8507 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
8508 pcix_cmd |= PCI_X_CMD_READ_2K;
8509 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
8510 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
8511 pcix_cmd |= PCI_X_CMD_READ_2K;
8512 }
8513 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8514 pcix_cmd);
8515 }
8516
8517 tw32_f(RDMAC_MODE, rdmac_mode);
8518 udelay(40);
8519
8520 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
8521 if (!tg3_flag(tp, 5705_PLUS))
8522 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
8523
8524 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8525 tw32(SNDDATAC_MODE,
8526 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
8527 else
8528 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
8529
8530 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
8531 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
8532 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
8533 if (tg3_flag(tp, LRG_PROD_RING_CAP))
8534 val |= RCVDBDI_MODE_LRG_RING_SZ;
8535 tw32(RCVDBDI_MODE, val);
8536 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
8537 if (tg3_flag(tp, HW_TSO_1) ||
8538 tg3_flag(tp, HW_TSO_2) ||
8539 tg3_flag(tp, HW_TSO_3))
8540 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
8541 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
8542 if (tg3_flag(tp, ENABLE_TSS))
8543 val |= SNDBDI_MODE_MULTI_TXQ_EN;
8544 tw32(SNDBDI_MODE, val);
8545 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
8546
8547 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
8548 err = tg3_load_5701_a0_firmware_fix(tp);
8549 if (err)
8550 return err;
8551 }
8552
8553 if (tg3_flag(tp, TSO_CAPABLE)) {
8554 err = tg3_load_tso_firmware(tp);
8555 if (err)
8556 return err;
8557 }
8558
8559 tp->tx_mode = TX_MODE_ENABLE;
8560
8561 if (tg3_flag(tp, 5755_PLUS) ||
8562 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
8563 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
8564
8565 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8566 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
8567 tp->tx_mode &= ~val;
8568 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
8569 }
8570
8571 tw32_f(MAC_TX_MODE, tp->tx_mode);
8572 udelay(100);
8573
8574 if (tg3_flag(tp, ENABLE_RSS)) {
8575 u32 reg = MAC_RSS_INDIR_TBL_0;
8576 u8 *ent = (u8 *)&val;
8577
8578 /* Setup the indirection table */
8579 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
8580 int idx = i % sizeof(val);
8581
8582 ent[idx] = i % (tp->irq_cnt - 1);
8583 if (idx == sizeof(val) - 1) {
8584 tw32(reg, val);
8585 reg += 4;
8586 }
8587 }
8588
8589 /* Setup the "secret" hash key. */
8590 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
8591 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
8592 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
8593 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
8594 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
8595 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
8596 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
8597 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
8598 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
8599 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
8600 }
8601
8602 tp->rx_mode = RX_MODE_ENABLE;
8603 if (tg3_flag(tp, 5755_PLUS))
8604 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
8605
8606 if (tg3_flag(tp, ENABLE_RSS))
8607 tp->rx_mode |= RX_MODE_RSS_ENABLE |
8608 RX_MODE_RSS_ITBL_HASH_BITS_7 |
8609 RX_MODE_RSS_IPV6_HASH_EN |
8610 RX_MODE_RSS_TCP_IPV6_HASH_EN |
8611 RX_MODE_RSS_IPV4_HASH_EN |
8612 RX_MODE_RSS_TCP_IPV4_HASH_EN;
8613
8614 tw32_f(MAC_RX_MODE, tp->rx_mode);
8615 udelay(10);
8616
8617 tw32(MAC_LED_CTRL, tp->led_ctrl);
8618
8619 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
8620 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8621 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8622 udelay(10);
8623 }
8624 tw32_f(MAC_RX_MODE, tp->rx_mode);
8625 udelay(10);
8626
8627 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8628 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
8629 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
8630 /* Set drive transmission level to 1.2V */
8631 /* only if the signal pre-emphasis bit is not set */
8632 val = tr32(MAC_SERDES_CFG);
8633 val &= 0xfffff000;
8634 val |= 0x880;
8635 tw32(MAC_SERDES_CFG, val);
8636 }
8637 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
8638 tw32(MAC_SERDES_CFG, 0x616000);
8639 }
8640
8641 /* Prevent chip from dropping frames when flow control
8642 * is enabled.
8643 */
8644 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8645 val = 1;
8646 else
8647 val = 2;
8648 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
8649
8650 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
8651 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
8652 /* Use hardware link auto-negotiation */
8653 tg3_flag_set(tp, HW_AUTONEG);
8654 }
8655
8656 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8657 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
8658 u32 tmp;
8659
8660 tmp = tr32(SERDES_RX_CTRL);
8661 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
8662 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
8663 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
8664 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8665 }
8666
8667 if (!tg3_flag(tp, USE_PHYLIB)) {
8668 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
8669 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
8670 tp->link_config.speed = tp->link_config.orig_speed;
8671 tp->link_config.duplex = tp->link_config.orig_duplex;
8672 tp->link_config.autoneg = tp->link_config.orig_autoneg;
8673 }
8674
8675 err = tg3_setup_phy(tp, 0);
8676 if (err)
8677 return err;
8678
8679 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
8680 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8681 u32 tmp;
8682
8683 /* Clear CRC stats. */
8684 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
8685 tg3_writephy(tp, MII_TG3_TEST1,
8686 tmp | MII_TG3_TEST1_CRC_EN);
8687 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
8688 }
8689 }
8690 }
8691
8692 __tg3_set_rx_mode(tp->dev);
8693
8694 /* Initialize receive rules. */
8695 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
8696 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
8697 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
8698 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
8699
8700 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
8701 limit = 8;
8702 else
8703 limit = 16;
8704 if (tg3_flag(tp, ENABLE_ASF))
8705 limit -= 4;
8706 switch (limit) {
8707 case 16:
8708 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
8709 case 15:
8710 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
8711 case 14:
8712 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
8713 case 13:
8714 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
8715 case 12:
8716 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
8717 case 11:
8718 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
8719 case 10:
8720 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
8721 case 9:
8722 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
8723 case 8:
8724 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
8725 case 7:
8726 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
8727 case 6:
8728 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
8729 case 5:
8730 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
8731 case 4:
8732 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
8733 case 3:
8734 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
8735 case 2:
8736 case 1:
8737
8738 default:
8739 break;
8740 }
8741
8742 if (tg3_flag(tp, ENABLE_APE))
8743 /* Write our heartbeat update interval to APE. */
8744 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
8745 APE_HOST_HEARTBEAT_INT_DISABLE);
8746
8747 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
8748
8749 return 0;
8750 }
8751
8752 /* Called at device open time to get the chip ready for
8753 * packet processing. Invoked with tp->lock held.
8754 */
8755 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
8756 {
8757 tg3_switch_clocks(tp);
8758
8759 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
8760
8761 return tg3_reset_hw(tp, reset_phy);
8762 }
8763
8764 #define TG3_STAT_ADD32(PSTAT, REG) \
8765 do { u32 __val = tr32(REG); \
8766 (PSTAT)->low += __val; \
8767 if ((PSTAT)->low < __val) \
8768 (PSTAT)->high += 1; \
8769 } while (0)
8770
8771 static void tg3_periodic_fetch_stats(struct tg3 *tp)
8772 {
8773 struct tg3_hw_stats *sp = tp->hw_stats;
8774
8775 if (!netif_carrier_ok(tp->dev))
8776 return;
8777
8778 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
8779 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
8780 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
8781 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
8782 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
8783 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
8784 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
8785 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
8786 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
8787 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
8788 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
8789 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
8790 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
8791
8792 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
8793 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
8794 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
8795 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
8796 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
8797 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
8798 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
8799 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
8800 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
8801 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
8802 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
8803 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
8804 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
8805 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
8806
8807 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
8808 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
8809 tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 &&
8810 tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) {
8811 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
8812 } else {
8813 u32 val = tr32(HOSTCC_FLOW_ATTN);
8814 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
8815 if (val) {
8816 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
8817 sp->rx_discards.low += val;
8818 if (sp->rx_discards.low < val)
8819 sp->rx_discards.high += 1;
8820 }
8821 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
8822 }
8823 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
8824 }
8825
8826 static void tg3_chk_missed_msi(struct tg3 *tp)
8827 {
8828 u32 i;
8829
8830 for (i = 0; i < tp->irq_cnt; i++) {
8831 struct tg3_napi *tnapi = &tp->napi[i];
8832
8833 if (tg3_has_work(tnapi)) {
8834 if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
8835 tnapi->last_tx_cons == tnapi->tx_cons) {
8836 if (tnapi->chk_msi_cnt < 1) {
8837 tnapi->chk_msi_cnt++;
8838 return;
8839 }
8840 tw32_mailbox(tnapi->int_mbox,
8841 tnapi->last_tag << 24);
8842 }
8843 }
8844 tnapi->chk_msi_cnt = 0;
8845 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
8846 tnapi->last_tx_cons = tnapi->tx_cons;
8847 }
8848 }
8849
8850 static void tg3_timer(unsigned long __opaque)
8851 {
8852 struct tg3 *tp = (struct tg3 *) __opaque;
8853
8854 if (tp->irq_sync)
8855 goto restart_timer;
8856
8857 spin_lock(&tp->lock);
8858
8859 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8860 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8861 tg3_chk_missed_msi(tp);
8862
8863 if (!tg3_flag(tp, TAGGED_STATUS)) {
8864 /* All of this garbage is because when using non-tagged
8865 * IRQ status the mailbox/status_block protocol the chip
8866 * uses with the cpu is race prone.
8867 */
8868 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
8869 tw32(GRC_LOCAL_CTRL,
8870 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
8871 } else {
8872 tw32(HOSTCC_MODE, tp->coalesce_mode |
8873 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
8874 }
8875
8876 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
8877 tg3_flag_set(tp, RESTART_TIMER);
8878 spin_unlock(&tp->lock);
8879 schedule_work(&tp->reset_task);
8880 return;
8881 }
8882 }
8883
8884 /* This part only runs once per second. */
8885 if (!--tp->timer_counter) {
8886 if (tg3_flag(tp, 5705_PLUS))
8887 tg3_periodic_fetch_stats(tp);
8888
8889 if (tp->setlpicnt && !--tp->setlpicnt)
8890 tg3_phy_eee_enable(tp);
8891
8892 if (tg3_flag(tp, USE_LINKCHG_REG)) {
8893 u32 mac_stat;
8894 int phy_event;
8895
8896 mac_stat = tr32(MAC_STATUS);
8897
8898 phy_event = 0;
8899 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
8900 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
8901 phy_event = 1;
8902 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
8903 phy_event = 1;
8904
8905 if (phy_event)
8906 tg3_setup_phy(tp, 0);
8907 } else if (tg3_flag(tp, POLL_SERDES)) {
8908 u32 mac_stat = tr32(MAC_STATUS);
8909 int need_setup = 0;
8910
8911 if (netif_carrier_ok(tp->dev) &&
8912 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
8913 need_setup = 1;
8914 }
8915 if (!netif_carrier_ok(tp->dev) &&
8916 (mac_stat & (MAC_STATUS_PCS_SYNCED |
8917 MAC_STATUS_SIGNAL_DET))) {
8918 need_setup = 1;
8919 }
8920 if (need_setup) {
8921 if (!tp->serdes_counter) {
8922 tw32_f(MAC_MODE,
8923 (tp->mac_mode &
8924 ~MAC_MODE_PORT_MODE_MASK));
8925 udelay(40);
8926 tw32_f(MAC_MODE, tp->mac_mode);
8927 udelay(40);
8928 }
8929 tg3_setup_phy(tp, 0);
8930 }
8931 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8932 tg3_flag(tp, 5780_CLASS)) {
8933 tg3_serdes_parallel_detect(tp);
8934 }
8935
8936 tp->timer_counter = tp->timer_multiplier;
8937 }
8938
8939 /* Heartbeat is only sent once every 2 seconds.
8940 *
8941 * The heartbeat is to tell the ASF firmware that the host
8942 * driver is still alive. In the event that the OS crashes,
8943 * ASF needs to reset the hardware to free up the FIFO space
8944 * that may be filled with rx packets destined for the host.
8945 * If the FIFO is full, ASF will no longer function properly.
8946 *
8947 * Unintended resets have been reported on real time kernels
8948 * where the timer doesn't run on time. Netpoll will also have
8949 * same problem.
8950 *
8951 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
8952 * to check the ring condition when the heartbeat is expiring
8953 * before doing the reset. This will prevent most unintended
8954 * resets.
8955 */
8956 if (!--tp->asf_counter) {
8957 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
8958 tg3_wait_for_event_ack(tp);
8959
8960 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
8961 FWCMD_NICDRV_ALIVE3);
8962 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
8963 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
8964 TG3_FW_UPDATE_TIMEOUT_SEC);
8965
8966 tg3_generate_fw_event(tp);
8967 }
8968 tp->asf_counter = tp->asf_multiplier;
8969 }
8970
8971 spin_unlock(&tp->lock);
8972
8973 restart_timer:
8974 tp->timer.expires = jiffies + tp->timer_offset;
8975 add_timer(&tp->timer);
8976 }
8977
8978 static int tg3_request_irq(struct tg3 *tp, int irq_num)
8979 {
8980 irq_handler_t fn;
8981 unsigned long flags;
8982 char *name;
8983 struct tg3_napi *tnapi = &tp->napi[irq_num];
8984
8985 if (tp->irq_cnt == 1)
8986 name = tp->dev->name;
8987 else {
8988 name = &tnapi->irq_lbl[0];
8989 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
8990 name[IFNAMSIZ-1] = 0;
8991 }
8992
8993 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
8994 fn = tg3_msi;
8995 if (tg3_flag(tp, 1SHOT_MSI))
8996 fn = tg3_msi_1shot;
8997 flags = 0;
8998 } else {
8999 fn = tg3_interrupt;
9000 if (tg3_flag(tp, TAGGED_STATUS))
9001 fn = tg3_interrupt_tagged;
9002 flags = IRQF_SHARED;
9003 }
9004
9005 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
9006 }
9007
9008 static int tg3_test_interrupt(struct tg3 *tp)
9009 {
9010 struct tg3_napi *tnapi = &tp->napi[0];
9011 struct net_device *dev = tp->dev;
9012 int err, i, intr_ok = 0;
9013 u32 val;
9014
9015 if (!netif_running(dev))
9016 return -ENODEV;
9017
9018 tg3_disable_ints(tp);
9019
9020 free_irq(tnapi->irq_vec, tnapi);
9021
9022 /*
9023 * Turn off MSI one shot mode. Otherwise this test has no
9024 * observable way to know whether the interrupt was delivered.
9025 */
9026 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
9027 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
9028 tw32(MSGINT_MODE, val);
9029 }
9030
9031 err = request_irq(tnapi->irq_vec, tg3_test_isr,
9032 IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, tnapi);
9033 if (err)
9034 return err;
9035
9036 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
9037 tg3_enable_ints(tp);
9038
9039 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9040 tnapi->coal_now);
9041
9042 for (i = 0; i < 5; i++) {
9043 u32 int_mbox, misc_host_ctrl;
9044
9045 int_mbox = tr32_mailbox(tnapi->int_mbox);
9046 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
9047
9048 if ((int_mbox != 0) ||
9049 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
9050 intr_ok = 1;
9051 break;
9052 }
9053
9054 msleep(10);
9055 }
9056
9057 tg3_disable_ints(tp);
9058
9059 free_irq(tnapi->irq_vec, tnapi);
9060
9061 err = tg3_request_irq(tp, 0);
9062
9063 if (err)
9064 return err;
9065
9066 if (intr_ok) {
9067 /* Reenable MSI one shot mode. */
9068 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
9069 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
9070 tw32(MSGINT_MODE, val);
9071 }
9072 return 0;
9073 }
9074
9075 return -EIO;
9076 }
9077
9078 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
9079 * successfully restored
9080 */
9081 static int tg3_test_msi(struct tg3 *tp)
9082 {
9083 int err;
9084 u16 pci_cmd;
9085
9086 if (!tg3_flag(tp, USING_MSI))
9087 return 0;
9088
9089 /* Turn off SERR reporting in case MSI terminates with Master
9090 * Abort.
9091 */
9092 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9093 pci_write_config_word(tp->pdev, PCI_COMMAND,
9094 pci_cmd & ~PCI_COMMAND_SERR);
9095
9096 err = tg3_test_interrupt(tp);
9097
9098 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9099
9100 if (!err)
9101 return 0;
9102
9103 /* other failures */
9104 if (err != -EIO)
9105 return err;
9106
9107 /* MSI test failed, go back to INTx mode */
9108 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
9109 "to INTx mode. Please report this failure to the PCI "
9110 "maintainer and include system chipset information\n");
9111
9112 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9113
9114 pci_disable_msi(tp->pdev);
9115
9116 tg3_flag_clear(tp, USING_MSI);
9117 tp->napi[0].irq_vec = tp->pdev->irq;
9118
9119 err = tg3_request_irq(tp, 0);
9120 if (err)
9121 return err;
9122
9123 /* Need to reset the chip because the MSI cycle may have terminated
9124 * with Master Abort.
9125 */
9126 tg3_full_lock(tp, 1);
9127
9128 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9129 err = tg3_init_hw(tp, 1);
9130
9131 tg3_full_unlock(tp);
9132
9133 if (err)
9134 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9135
9136 return err;
9137 }
9138
9139 static int tg3_request_firmware(struct tg3 *tp)
9140 {
9141 const __be32 *fw_data;
9142
9143 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
9144 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
9145 tp->fw_needed);
9146 return -ENOENT;
9147 }
9148
9149 fw_data = (void *)tp->fw->data;
9150
9151 /* Firmware blob starts with version numbers, followed by
9152 * start address and _full_ length including BSS sections
9153 * (which must be longer than the actual data, of course
9154 */
9155
9156 tp->fw_len = be32_to_cpu(fw_data[2]); /* includes bss */
9157 if (tp->fw_len < (tp->fw->size - 12)) {
9158 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
9159 tp->fw_len, tp->fw_needed);
9160 release_firmware(tp->fw);
9161 tp->fw = NULL;
9162 return -EINVAL;
9163 }
9164
9165 /* We no longer need firmware; we have it. */
9166 tp->fw_needed = NULL;
9167 return 0;
9168 }
9169
9170 static bool tg3_enable_msix(struct tg3 *tp)
9171 {
9172 int i, rc, cpus = num_online_cpus();
9173 struct msix_entry msix_ent[tp->irq_max];
9174
9175 if (cpus == 1)
9176 /* Just fallback to the simpler MSI mode. */
9177 return false;
9178
9179 /*
9180 * We want as many rx rings enabled as there are cpus.
9181 * The first MSIX vector only deals with link interrupts, etc,
9182 * so we add one to the number of vectors we are requesting.
9183 */
9184 tp->irq_cnt = min_t(unsigned, cpus + 1, tp->irq_max);
9185
9186 for (i = 0; i < tp->irq_max; i++) {
9187 msix_ent[i].entry = i;
9188 msix_ent[i].vector = 0;
9189 }
9190
9191 rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
9192 if (rc < 0) {
9193 return false;
9194 } else if (rc != 0) {
9195 if (pci_enable_msix(tp->pdev, msix_ent, rc))
9196 return false;
9197 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
9198 tp->irq_cnt, rc);
9199 tp->irq_cnt = rc;
9200 }
9201
9202 for (i = 0; i < tp->irq_max; i++)
9203 tp->napi[i].irq_vec = msix_ent[i].vector;
9204
9205 netif_set_real_num_tx_queues(tp->dev, 1);
9206 rc = tp->irq_cnt > 1 ? tp->irq_cnt - 1 : 1;
9207 if (netif_set_real_num_rx_queues(tp->dev, rc)) {
9208 pci_disable_msix(tp->pdev);
9209 return false;
9210 }
9211
9212 if (tp->irq_cnt > 1) {
9213 tg3_flag_set(tp, ENABLE_RSS);
9214
9215 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9216 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9217 tg3_flag_set(tp, ENABLE_TSS);
9218 netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1);
9219 }
9220 }
9221
9222 return true;
9223 }
9224
9225 static void tg3_ints_init(struct tg3 *tp)
9226 {
9227 if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
9228 !tg3_flag(tp, TAGGED_STATUS)) {
9229 /* All MSI supporting chips should support tagged
9230 * status. Assert that this is the case.
9231 */
9232 netdev_warn(tp->dev,
9233 "MSI without TAGGED_STATUS? Not using MSI\n");
9234 goto defcfg;
9235 }
9236
9237 if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
9238 tg3_flag_set(tp, USING_MSIX);
9239 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
9240 tg3_flag_set(tp, USING_MSI);
9241
9242 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9243 u32 msi_mode = tr32(MSGINT_MODE);
9244 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
9245 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
9246 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
9247 }
9248 defcfg:
9249 if (!tg3_flag(tp, USING_MSIX)) {
9250 tp->irq_cnt = 1;
9251 tp->napi[0].irq_vec = tp->pdev->irq;
9252 netif_set_real_num_tx_queues(tp->dev, 1);
9253 netif_set_real_num_rx_queues(tp->dev, 1);
9254 }
9255 }
9256
9257 static void tg3_ints_fini(struct tg3 *tp)
9258 {
9259 if (tg3_flag(tp, USING_MSIX))
9260 pci_disable_msix(tp->pdev);
9261 else if (tg3_flag(tp, USING_MSI))
9262 pci_disable_msi(tp->pdev);
9263 tg3_flag_clear(tp, USING_MSI);
9264 tg3_flag_clear(tp, USING_MSIX);
9265 tg3_flag_clear(tp, ENABLE_RSS);
9266 tg3_flag_clear(tp, ENABLE_TSS);
9267 }
9268
9269 static int tg3_open(struct net_device *dev)
9270 {
9271 struct tg3 *tp = netdev_priv(dev);
9272 int i, err;
9273
9274 if (tp->fw_needed) {
9275 err = tg3_request_firmware(tp);
9276 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9277 if (err)
9278 return err;
9279 } else if (err) {
9280 netdev_warn(tp->dev, "TSO capability disabled\n");
9281 tg3_flag_clear(tp, TSO_CAPABLE);
9282 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
9283 netdev_notice(tp->dev, "TSO capability restored\n");
9284 tg3_flag_set(tp, TSO_CAPABLE);
9285 }
9286 }
9287
9288 netif_carrier_off(tp->dev);
9289
9290 err = tg3_power_up(tp);
9291 if (err)
9292 return err;
9293
9294 tg3_full_lock(tp, 0);
9295
9296 tg3_disable_ints(tp);
9297 tg3_flag_clear(tp, INIT_COMPLETE);
9298
9299 tg3_full_unlock(tp);
9300
9301 /*
9302 * Setup interrupts first so we know how
9303 * many NAPI resources to allocate
9304 */
9305 tg3_ints_init(tp);
9306
9307 /* The placement of this call is tied
9308 * to the setup and use of Host TX descriptors.
9309 */
9310 err = tg3_alloc_consistent(tp);
9311 if (err)
9312 goto err_out1;
9313
9314 tg3_napi_init(tp);
9315
9316 tg3_napi_enable(tp);
9317
9318 for (i = 0; i < tp->irq_cnt; i++) {
9319 struct tg3_napi *tnapi = &tp->napi[i];
9320 err = tg3_request_irq(tp, i);
9321 if (err) {
9322 for (i--; i >= 0; i--)
9323 free_irq(tnapi->irq_vec, tnapi);
9324 break;
9325 }
9326 }
9327
9328 if (err)
9329 goto err_out2;
9330
9331 tg3_full_lock(tp, 0);
9332
9333 err = tg3_init_hw(tp, 1);
9334 if (err) {
9335 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9336 tg3_free_rings(tp);
9337 } else {
9338 if (tg3_flag(tp, TAGGED_STATUS) &&
9339 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9340 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765)
9341 tp->timer_offset = HZ;
9342 else
9343 tp->timer_offset = HZ / 10;
9344
9345 BUG_ON(tp->timer_offset > HZ);
9346 tp->timer_counter = tp->timer_multiplier =
9347 (HZ / tp->timer_offset);
9348 tp->asf_counter = tp->asf_multiplier =
9349 ((HZ / tp->timer_offset) * 2);
9350
9351 init_timer(&tp->timer);
9352 tp->timer.expires = jiffies + tp->timer_offset;
9353 tp->timer.data = (unsigned long) tp;
9354 tp->timer.function = tg3_timer;
9355 }
9356
9357 tg3_full_unlock(tp);
9358
9359 if (err)
9360 goto err_out3;
9361
9362 if (tg3_flag(tp, USING_MSI)) {
9363 err = tg3_test_msi(tp);
9364
9365 if (err) {
9366 tg3_full_lock(tp, 0);
9367 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9368 tg3_free_rings(tp);
9369 tg3_full_unlock(tp);
9370
9371 goto err_out2;
9372 }
9373
9374 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
9375 u32 val = tr32(PCIE_TRANSACTION_CFG);
9376
9377 tw32(PCIE_TRANSACTION_CFG,
9378 val | PCIE_TRANS_CFG_1SHOT_MSI);
9379 }
9380 }
9381
9382 tg3_phy_start(tp);
9383
9384 tg3_full_lock(tp, 0);
9385
9386 add_timer(&tp->timer);
9387 tg3_flag_set(tp, INIT_COMPLETE);
9388 tg3_enable_ints(tp);
9389
9390 tg3_full_unlock(tp);
9391
9392 netif_tx_start_all_queues(dev);
9393
9394 /*
9395 * Reset loopback feature if it was turned on while the device was down
9396 * make sure that it's installed properly now.
9397 */
9398 if (dev->features & NETIF_F_LOOPBACK)
9399 tg3_set_loopback(dev, dev->features);
9400
9401 return 0;
9402
9403 err_out3:
9404 for (i = tp->irq_cnt - 1; i >= 0; i--) {
9405 struct tg3_napi *tnapi = &tp->napi[i];
9406 free_irq(tnapi->irq_vec, tnapi);
9407 }
9408
9409 err_out2:
9410 tg3_napi_disable(tp);
9411 tg3_napi_fini(tp);
9412 tg3_free_consistent(tp);
9413
9414 err_out1:
9415 tg3_ints_fini(tp);
9416 return err;
9417 }
9418
9419 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *,
9420 struct rtnl_link_stats64 *);
9421 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
9422
9423 static int tg3_close(struct net_device *dev)
9424 {
9425 int i;
9426 struct tg3 *tp = netdev_priv(dev);
9427
9428 tg3_napi_disable(tp);
9429 cancel_work_sync(&tp->reset_task);
9430
9431 netif_tx_stop_all_queues(dev);
9432
9433 del_timer_sync(&tp->timer);
9434
9435 tg3_phy_stop(tp);
9436
9437 tg3_full_lock(tp, 1);
9438
9439 tg3_disable_ints(tp);
9440
9441 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9442 tg3_free_rings(tp);
9443 tg3_flag_clear(tp, INIT_COMPLETE);
9444
9445 tg3_full_unlock(tp);
9446
9447 for (i = tp->irq_cnt - 1; i >= 0; i--) {
9448 struct tg3_napi *tnapi = &tp->napi[i];
9449 free_irq(tnapi->irq_vec, tnapi);
9450 }
9451
9452 tg3_ints_fini(tp);
9453
9454 tg3_get_stats64(tp->dev, &tp->net_stats_prev);
9455
9456 memcpy(&tp->estats_prev, tg3_get_estats(tp),
9457 sizeof(tp->estats_prev));
9458
9459 tg3_napi_fini(tp);
9460
9461 tg3_free_consistent(tp);
9462
9463 tg3_power_down(tp);
9464
9465 netif_carrier_off(tp->dev);
9466
9467 return 0;
9468 }
9469
9470 static inline u64 get_stat64(tg3_stat64_t *val)
9471 {
9472 return ((u64)val->high << 32) | ((u64)val->low);
9473 }
9474
9475 static u64 calc_crc_errors(struct tg3 *tp)
9476 {
9477 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9478
9479 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9480 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9481 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
9482 u32 val;
9483
9484 spin_lock_bh(&tp->lock);
9485 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
9486 tg3_writephy(tp, MII_TG3_TEST1,
9487 val | MII_TG3_TEST1_CRC_EN);
9488 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
9489 } else
9490 val = 0;
9491 spin_unlock_bh(&tp->lock);
9492
9493 tp->phy_crc_errors += val;
9494
9495 return tp->phy_crc_errors;
9496 }
9497
9498 return get_stat64(&hw_stats->rx_fcs_errors);
9499 }
9500
9501 #define ESTAT_ADD(member) \
9502 estats->member = old_estats->member + \
9503 get_stat64(&hw_stats->member)
9504
9505 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
9506 {
9507 struct tg3_ethtool_stats *estats = &tp->estats;
9508 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
9509 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9510
9511 if (!hw_stats)
9512 return old_estats;
9513
9514 ESTAT_ADD(rx_octets);
9515 ESTAT_ADD(rx_fragments);
9516 ESTAT_ADD(rx_ucast_packets);
9517 ESTAT_ADD(rx_mcast_packets);
9518 ESTAT_ADD(rx_bcast_packets);
9519 ESTAT_ADD(rx_fcs_errors);
9520 ESTAT_ADD(rx_align_errors);
9521 ESTAT_ADD(rx_xon_pause_rcvd);
9522 ESTAT_ADD(rx_xoff_pause_rcvd);
9523 ESTAT_ADD(rx_mac_ctrl_rcvd);
9524 ESTAT_ADD(rx_xoff_entered);
9525 ESTAT_ADD(rx_frame_too_long_errors);
9526 ESTAT_ADD(rx_jabbers);
9527 ESTAT_ADD(rx_undersize_packets);
9528 ESTAT_ADD(rx_in_length_errors);
9529 ESTAT_ADD(rx_out_length_errors);
9530 ESTAT_ADD(rx_64_or_less_octet_packets);
9531 ESTAT_ADD(rx_65_to_127_octet_packets);
9532 ESTAT_ADD(rx_128_to_255_octet_packets);
9533 ESTAT_ADD(rx_256_to_511_octet_packets);
9534 ESTAT_ADD(rx_512_to_1023_octet_packets);
9535 ESTAT_ADD(rx_1024_to_1522_octet_packets);
9536 ESTAT_ADD(rx_1523_to_2047_octet_packets);
9537 ESTAT_ADD(rx_2048_to_4095_octet_packets);
9538 ESTAT_ADD(rx_4096_to_8191_octet_packets);
9539 ESTAT_ADD(rx_8192_to_9022_octet_packets);
9540
9541 ESTAT_ADD(tx_octets);
9542 ESTAT_ADD(tx_collisions);
9543 ESTAT_ADD(tx_xon_sent);
9544 ESTAT_ADD(tx_xoff_sent);
9545 ESTAT_ADD(tx_flow_control);
9546 ESTAT_ADD(tx_mac_errors);
9547 ESTAT_ADD(tx_single_collisions);
9548 ESTAT_ADD(tx_mult_collisions);
9549 ESTAT_ADD(tx_deferred);
9550 ESTAT_ADD(tx_excessive_collisions);
9551 ESTAT_ADD(tx_late_collisions);
9552 ESTAT_ADD(tx_collide_2times);
9553 ESTAT_ADD(tx_collide_3times);
9554 ESTAT_ADD(tx_collide_4times);
9555 ESTAT_ADD(tx_collide_5times);
9556 ESTAT_ADD(tx_collide_6times);
9557 ESTAT_ADD(tx_collide_7times);
9558 ESTAT_ADD(tx_collide_8times);
9559 ESTAT_ADD(tx_collide_9times);
9560 ESTAT_ADD(tx_collide_10times);
9561 ESTAT_ADD(tx_collide_11times);
9562 ESTAT_ADD(tx_collide_12times);
9563 ESTAT_ADD(tx_collide_13times);
9564 ESTAT_ADD(tx_collide_14times);
9565 ESTAT_ADD(tx_collide_15times);
9566 ESTAT_ADD(tx_ucast_packets);
9567 ESTAT_ADD(tx_mcast_packets);
9568 ESTAT_ADD(tx_bcast_packets);
9569 ESTAT_ADD(tx_carrier_sense_errors);
9570 ESTAT_ADD(tx_discards);
9571 ESTAT_ADD(tx_errors);
9572
9573 ESTAT_ADD(dma_writeq_full);
9574 ESTAT_ADD(dma_write_prioq_full);
9575 ESTAT_ADD(rxbds_empty);
9576 ESTAT_ADD(rx_discards);
9577 ESTAT_ADD(rx_errors);
9578 ESTAT_ADD(rx_threshold_hit);
9579
9580 ESTAT_ADD(dma_readq_full);
9581 ESTAT_ADD(dma_read_prioq_full);
9582 ESTAT_ADD(tx_comp_queue_full);
9583
9584 ESTAT_ADD(ring_set_send_prod_index);
9585 ESTAT_ADD(ring_status_update);
9586 ESTAT_ADD(nic_irqs);
9587 ESTAT_ADD(nic_avoided_irqs);
9588 ESTAT_ADD(nic_tx_threshold_hit);
9589
9590 ESTAT_ADD(mbuf_lwm_thresh_hit);
9591
9592 return estats;
9593 }
9594
9595 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
9596 struct rtnl_link_stats64 *stats)
9597 {
9598 struct tg3 *tp = netdev_priv(dev);
9599 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
9600 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9601
9602 if (!hw_stats)
9603 return old_stats;
9604
9605 stats->rx_packets = old_stats->rx_packets +
9606 get_stat64(&hw_stats->rx_ucast_packets) +
9607 get_stat64(&hw_stats->rx_mcast_packets) +
9608 get_stat64(&hw_stats->rx_bcast_packets);
9609
9610 stats->tx_packets = old_stats->tx_packets +
9611 get_stat64(&hw_stats->tx_ucast_packets) +
9612 get_stat64(&hw_stats->tx_mcast_packets) +
9613 get_stat64(&hw_stats->tx_bcast_packets);
9614
9615 stats->rx_bytes = old_stats->rx_bytes +
9616 get_stat64(&hw_stats->rx_octets);
9617 stats->tx_bytes = old_stats->tx_bytes +
9618 get_stat64(&hw_stats->tx_octets);
9619
9620 stats->rx_errors = old_stats->rx_errors +
9621 get_stat64(&hw_stats->rx_errors);
9622 stats->tx_errors = old_stats->tx_errors +
9623 get_stat64(&hw_stats->tx_errors) +
9624 get_stat64(&hw_stats->tx_mac_errors) +
9625 get_stat64(&hw_stats->tx_carrier_sense_errors) +
9626 get_stat64(&hw_stats->tx_discards);
9627
9628 stats->multicast = old_stats->multicast +
9629 get_stat64(&hw_stats->rx_mcast_packets);
9630 stats->collisions = old_stats->collisions +
9631 get_stat64(&hw_stats->tx_collisions);
9632
9633 stats->rx_length_errors = old_stats->rx_length_errors +
9634 get_stat64(&hw_stats->rx_frame_too_long_errors) +
9635 get_stat64(&hw_stats->rx_undersize_packets);
9636
9637 stats->rx_over_errors = old_stats->rx_over_errors +
9638 get_stat64(&hw_stats->rxbds_empty);
9639 stats->rx_frame_errors = old_stats->rx_frame_errors +
9640 get_stat64(&hw_stats->rx_align_errors);
9641 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
9642 get_stat64(&hw_stats->tx_discards);
9643 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
9644 get_stat64(&hw_stats->tx_carrier_sense_errors);
9645
9646 stats->rx_crc_errors = old_stats->rx_crc_errors +
9647 calc_crc_errors(tp);
9648
9649 stats->rx_missed_errors = old_stats->rx_missed_errors +
9650 get_stat64(&hw_stats->rx_discards);
9651
9652 stats->rx_dropped = tp->rx_dropped;
9653
9654 return stats;
9655 }
9656
9657 static inline u32 calc_crc(unsigned char *buf, int len)
9658 {
9659 u32 reg;
9660 u32 tmp;
9661 int j, k;
9662
9663 reg = 0xffffffff;
9664
9665 for (j = 0; j < len; j++) {
9666 reg ^= buf[j];
9667
9668 for (k = 0; k < 8; k++) {
9669 tmp = reg & 0x01;
9670
9671 reg >>= 1;
9672
9673 if (tmp)
9674 reg ^= 0xedb88320;
9675 }
9676 }
9677
9678 return ~reg;
9679 }
9680
9681 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9682 {
9683 /* accept or reject all multicast frames */
9684 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9685 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9686 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9687 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9688 }
9689
9690 static void __tg3_set_rx_mode(struct net_device *dev)
9691 {
9692 struct tg3 *tp = netdev_priv(dev);
9693 u32 rx_mode;
9694
9695 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9696 RX_MODE_KEEP_VLAN_TAG);
9697
9698 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9699 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9700 * flag clear.
9701 */
9702 if (!tg3_flag(tp, ENABLE_ASF))
9703 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9704 #endif
9705
9706 if (dev->flags & IFF_PROMISC) {
9707 /* Promiscuous mode. */
9708 rx_mode |= RX_MODE_PROMISC;
9709 } else if (dev->flags & IFF_ALLMULTI) {
9710 /* Accept all multicast. */
9711 tg3_set_multi(tp, 1);
9712 } else if (netdev_mc_empty(dev)) {
9713 /* Reject all multicast. */
9714 tg3_set_multi(tp, 0);
9715 } else {
9716 /* Accept one or more multicast(s). */
9717 struct netdev_hw_addr *ha;
9718 u32 mc_filter[4] = { 0, };
9719 u32 regidx;
9720 u32 bit;
9721 u32 crc;
9722
9723 netdev_for_each_mc_addr(ha, dev) {
9724 crc = calc_crc(ha->addr, ETH_ALEN);
9725 bit = ~crc & 0x7f;
9726 regidx = (bit & 0x60) >> 5;
9727 bit &= 0x1f;
9728 mc_filter[regidx] |= (1 << bit);
9729 }
9730
9731 tw32(MAC_HASH_REG_0, mc_filter[0]);
9732 tw32(MAC_HASH_REG_1, mc_filter[1]);
9733 tw32(MAC_HASH_REG_2, mc_filter[2]);
9734 tw32(MAC_HASH_REG_3, mc_filter[3]);
9735 }
9736
9737 if (rx_mode != tp->rx_mode) {
9738 tp->rx_mode = rx_mode;
9739 tw32_f(MAC_RX_MODE, rx_mode);
9740 udelay(10);
9741 }
9742 }
9743
9744 static void tg3_set_rx_mode(struct net_device *dev)
9745 {
9746 struct tg3 *tp = netdev_priv(dev);
9747
9748 if (!netif_running(dev))
9749 return;
9750
9751 tg3_full_lock(tp, 0);
9752 __tg3_set_rx_mode(dev);
9753 tg3_full_unlock(tp);
9754 }
9755
9756 static int tg3_get_regs_len(struct net_device *dev)
9757 {
9758 return TG3_REG_BLK_SIZE;
9759 }
9760
9761 static void tg3_get_regs(struct net_device *dev,
9762 struct ethtool_regs *regs, void *_p)
9763 {
9764 struct tg3 *tp = netdev_priv(dev);
9765
9766 regs->version = 0;
9767
9768 memset(_p, 0, TG3_REG_BLK_SIZE);
9769
9770 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9771 return;
9772
9773 tg3_full_lock(tp, 0);
9774
9775 tg3_dump_legacy_regs(tp, (u32 *)_p);
9776
9777 tg3_full_unlock(tp);
9778 }
9779
9780 static int tg3_get_eeprom_len(struct net_device *dev)
9781 {
9782 struct tg3 *tp = netdev_priv(dev);
9783
9784 return tp->nvram_size;
9785 }
9786
9787 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
9788 {
9789 struct tg3 *tp = netdev_priv(dev);
9790 int ret;
9791 u8 *pd;
9792 u32 i, offset, len, b_offset, b_count;
9793 __be32 val;
9794
9795 if (tg3_flag(tp, NO_NVRAM))
9796 return -EINVAL;
9797
9798 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9799 return -EAGAIN;
9800
9801 offset = eeprom->offset;
9802 len = eeprom->len;
9803 eeprom->len = 0;
9804
9805 eeprom->magic = TG3_EEPROM_MAGIC;
9806
9807 if (offset & 3) {
9808 /* adjustments to start on required 4 byte boundary */
9809 b_offset = offset & 3;
9810 b_count = 4 - b_offset;
9811 if (b_count > len) {
9812 /* i.e. offset=1 len=2 */
9813 b_count = len;
9814 }
9815 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
9816 if (ret)
9817 return ret;
9818 memcpy(data, ((char *)&val) + b_offset, b_count);
9819 len -= b_count;
9820 offset += b_count;
9821 eeprom->len += b_count;
9822 }
9823
9824 /* read bytes up to the last 4 byte boundary */
9825 pd = &data[eeprom->len];
9826 for (i = 0; i < (len - (len & 3)); i += 4) {
9827 ret = tg3_nvram_read_be32(tp, offset + i, &val);
9828 if (ret) {
9829 eeprom->len += i;
9830 return ret;
9831 }
9832 memcpy(pd + i, &val, 4);
9833 }
9834 eeprom->len += i;
9835
9836 if (len & 3) {
9837 /* read last bytes not ending on 4 byte boundary */
9838 pd = &data[eeprom->len];
9839 b_count = len & 3;
9840 b_offset = offset + len - b_count;
9841 ret = tg3_nvram_read_be32(tp, b_offset, &val);
9842 if (ret)
9843 return ret;
9844 memcpy(pd, &val, b_count);
9845 eeprom->len += b_count;
9846 }
9847 return 0;
9848 }
9849
9850 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
9851
9852 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
9853 {
9854 struct tg3 *tp = netdev_priv(dev);
9855 int ret;
9856 u32 offset, len, b_offset, odd_len;
9857 u8 *buf;
9858 __be32 start, end;
9859
9860 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9861 return -EAGAIN;
9862
9863 if (tg3_flag(tp, NO_NVRAM) ||
9864 eeprom->magic != TG3_EEPROM_MAGIC)
9865 return -EINVAL;
9866
9867 offset = eeprom->offset;
9868 len = eeprom->len;
9869
9870 if ((b_offset = (offset & 3))) {
9871 /* adjustments to start on required 4 byte boundary */
9872 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
9873 if (ret)
9874 return ret;
9875 len += b_offset;
9876 offset &= ~3;
9877 if (len < 4)
9878 len = 4;
9879 }
9880
9881 odd_len = 0;
9882 if (len & 3) {
9883 /* adjustments to end on required 4 byte boundary */
9884 odd_len = 1;
9885 len = (len + 3) & ~3;
9886 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
9887 if (ret)
9888 return ret;
9889 }
9890
9891 buf = data;
9892 if (b_offset || odd_len) {
9893 buf = kmalloc(len, GFP_KERNEL);
9894 if (!buf)
9895 return -ENOMEM;
9896 if (b_offset)
9897 memcpy(buf, &start, 4);
9898 if (odd_len)
9899 memcpy(buf+len-4, &end, 4);
9900 memcpy(buf + b_offset, data, eeprom->len);
9901 }
9902
9903 ret = tg3_nvram_write_block(tp, offset, len, buf);
9904
9905 if (buf != data)
9906 kfree(buf);
9907
9908 return ret;
9909 }
9910
9911 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9912 {
9913 struct tg3 *tp = netdev_priv(dev);
9914
9915 if (tg3_flag(tp, USE_PHYLIB)) {
9916 struct phy_device *phydev;
9917 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
9918 return -EAGAIN;
9919 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
9920 return phy_ethtool_gset(phydev, cmd);
9921 }
9922
9923 cmd->supported = (SUPPORTED_Autoneg);
9924
9925 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
9926 cmd->supported |= (SUPPORTED_1000baseT_Half |
9927 SUPPORTED_1000baseT_Full);
9928
9929 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
9930 cmd->supported |= (SUPPORTED_100baseT_Half |
9931 SUPPORTED_100baseT_Full |
9932 SUPPORTED_10baseT_Half |
9933 SUPPORTED_10baseT_Full |
9934 SUPPORTED_TP);
9935 cmd->port = PORT_TP;
9936 } else {
9937 cmd->supported |= SUPPORTED_FIBRE;
9938 cmd->port = PORT_FIBRE;
9939 }
9940
9941 cmd->advertising = tp->link_config.advertising;
9942 if (netif_running(dev)) {
9943 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
9944 cmd->duplex = tp->link_config.active_duplex;
9945 } else {
9946 ethtool_cmd_speed_set(cmd, SPEED_INVALID);
9947 cmd->duplex = DUPLEX_INVALID;
9948 }
9949 cmd->phy_address = tp->phy_addr;
9950 cmd->transceiver = XCVR_INTERNAL;
9951 cmd->autoneg = tp->link_config.autoneg;
9952 cmd->maxtxpkt = 0;
9953 cmd->maxrxpkt = 0;
9954 return 0;
9955 }
9956
9957 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9958 {
9959 struct tg3 *tp = netdev_priv(dev);
9960 u32 speed = ethtool_cmd_speed(cmd);
9961
9962 if (tg3_flag(tp, USE_PHYLIB)) {
9963 struct phy_device *phydev;
9964 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
9965 return -EAGAIN;
9966 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
9967 return phy_ethtool_sset(phydev, cmd);
9968 }
9969
9970 if (cmd->autoneg != AUTONEG_ENABLE &&
9971 cmd->autoneg != AUTONEG_DISABLE)
9972 return -EINVAL;
9973
9974 if (cmd->autoneg == AUTONEG_DISABLE &&
9975 cmd->duplex != DUPLEX_FULL &&
9976 cmd->duplex != DUPLEX_HALF)
9977 return -EINVAL;
9978
9979 if (cmd->autoneg == AUTONEG_ENABLE) {
9980 u32 mask = ADVERTISED_Autoneg |
9981 ADVERTISED_Pause |
9982 ADVERTISED_Asym_Pause;
9983
9984 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
9985 mask |= ADVERTISED_1000baseT_Half |
9986 ADVERTISED_1000baseT_Full;
9987
9988 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
9989 mask |= ADVERTISED_100baseT_Half |
9990 ADVERTISED_100baseT_Full |
9991 ADVERTISED_10baseT_Half |
9992 ADVERTISED_10baseT_Full |
9993 ADVERTISED_TP;
9994 else
9995 mask |= ADVERTISED_FIBRE;
9996
9997 if (cmd->advertising & ~mask)
9998 return -EINVAL;
9999
10000 mask &= (ADVERTISED_1000baseT_Half |
10001 ADVERTISED_1000baseT_Full |
10002 ADVERTISED_100baseT_Half |
10003 ADVERTISED_100baseT_Full |
10004 ADVERTISED_10baseT_Half |
10005 ADVERTISED_10baseT_Full);
10006
10007 cmd->advertising &= mask;
10008 } else {
10009 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
10010 if (speed != SPEED_1000)
10011 return -EINVAL;
10012
10013 if (cmd->duplex != DUPLEX_FULL)
10014 return -EINVAL;
10015 } else {
10016 if (speed != SPEED_100 &&
10017 speed != SPEED_10)
10018 return -EINVAL;
10019 }
10020 }
10021
10022 tg3_full_lock(tp, 0);
10023
10024 tp->link_config.autoneg = cmd->autoneg;
10025 if (cmd->autoneg == AUTONEG_ENABLE) {
10026 tp->link_config.advertising = (cmd->advertising |
10027 ADVERTISED_Autoneg);
10028 tp->link_config.speed = SPEED_INVALID;
10029 tp->link_config.duplex = DUPLEX_INVALID;
10030 } else {
10031 tp->link_config.advertising = 0;
10032 tp->link_config.speed = speed;
10033 tp->link_config.duplex = cmd->duplex;
10034 }
10035
10036 tp->link_config.orig_speed = tp->link_config.speed;
10037 tp->link_config.orig_duplex = tp->link_config.duplex;
10038 tp->link_config.orig_autoneg = tp->link_config.autoneg;
10039
10040 if (netif_running(dev))
10041 tg3_setup_phy(tp, 1);
10042
10043 tg3_full_unlock(tp);
10044
10045 return 0;
10046 }
10047
10048 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
10049 {
10050 struct tg3 *tp = netdev_priv(dev);
10051
10052 strcpy(info->driver, DRV_MODULE_NAME);
10053 strcpy(info->version, DRV_MODULE_VERSION);
10054 strcpy(info->fw_version, tp->fw_ver);
10055 strcpy(info->bus_info, pci_name(tp->pdev));
10056 }
10057
10058 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10059 {
10060 struct tg3 *tp = netdev_priv(dev);
10061
10062 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
10063 wol->supported = WAKE_MAGIC;
10064 else
10065 wol->supported = 0;
10066 wol->wolopts = 0;
10067 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
10068 wol->wolopts = WAKE_MAGIC;
10069 memset(&wol->sopass, 0, sizeof(wol->sopass));
10070 }
10071
10072 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10073 {
10074 struct tg3 *tp = netdev_priv(dev);
10075 struct device *dp = &tp->pdev->dev;
10076
10077 if (wol->wolopts & ~WAKE_MAGIC)
10078 return -EINVAL;
10079 if ((wol->wolopts & WAKE_MAGIC) &&
10080 !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
10081 return -EINVAL;
10082
10083 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
10084
10085 spin_lock_bh(&tp->lock);
10086 if (device_may_wakeup(dp))
10087 tg3_flag_set(tp, WOL_ENABLE);
10088 else
10089 tg3_flag_clear(tp, WOL_ENABLE);
10090 spin_unlock_bh(&tp->lock);
10091
10092 return 0;
10093 }
10094
10095 static u32 tg3_get_msglevel(struct net_device *dev)
10096 {
10097 struct tg3 *tp = netdev_priv(dev);
10098 return tp->msg_enable;
10099 }
10100
10101 static void tg3_set_msglevel(struct net_device *dev, u32 value)
10102 {
10103 struct tg3 *tp = netdev_priv(dev);
10104 tp->msg_enable = value;
10105 }
10106
10107 static int tg3_nway_reset(struct net_device *dev)
10108 {
10109 struct tg3 *tp = netdev_priv(dev);
10110 int r;
10111
10112 if (!netif_running(dev))
10113 return -EAGAIN;
10114
10115 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
10116 return -EINVAL;
10117
10118 if (tg3_flag(tp, USE_PHYLIB)) {
10119 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10120 return -EAGAIN;
10121 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
10122 } else {
10123 u32 bmcr;
10124
10125 spin_lock_bh(&tp->lock);
10126 r = -EINVAL;
10127 tg3_readphy(tp, MII_BMCR, &bmcr);
10128 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
10129 ((bmcr & BMCR_ANENABLE) ||
10130 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
10131 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
10132 BMCR_ANENABLE);
10133 r = 0;
10134 }
10135 spin_unlock_bh(&tp->lock);
10136 }
10137
10138 return r;
10139 }
10140
10141 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10142 {
10143 struct tg3 *tp = netdev_priv(dev);
10144
10145 ering->rx_max_pending = tp->rx_std_ring_mask;
10146 ering->rx_mini_max_pending = 0;
10147 if (tg3_flag(tp, JUMBO_RING_ENABLE))
10148 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
10149 else
10150 ering->rx_jumbo_max_pending = 0;
10151
10152 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
10153
10154 ering->rx_pending = tp->rx_pending;
10155 ering->rx_mini_pending = 0;
10156 if (tg3_flag(tp, JUMBO_RING_ENABLE))
10157 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
10158 else
10159 ering->rx_jumbo_pending = 0;
10160
10161 ering->tx_pending = tp->napi[0].tx_pending;
10162 }
10163
10164 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10165 {
10166 struct tg3 *tp = netdev_priv(dev);
10167 int i, irq_sync = 0, err = 0;
10168
10169 if ((ering->rx_pending > tp->rx_std_ring_mask) ||
10170 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
10171 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
10172 (ering->tx_pending <= MAX_SKB_FRAGS) ||
10173 (tg3_flag(tp, TSO_BUG) &&
10174 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
10175 return -EINVAL;
10176
10177 if (netif_running(dev)) {
10178 tg3_phy_stop(tp);
10179 tg3_netif_stop(tp);
10180 irq_sync = 1;
10181 }
10182
10183 tg3_full_lock(tp, irq_sync);
10184
10185 tp->rx_pending = ering->rx_pending;
10186
10187 if (tg3_flag(tp, MAX_RXPEND_64) &&
10188 tp->rx_pending > 63)
10189 tp->rx_pending = 63;
10190 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
10191
10192 for (i = 0; i < tp->irq_max; i++)
10193 tp->napi[i].tx_pending = ering->tx_pending;
10194
10195 if (netif_running(dev)) {
10196 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10197 err = tg3_restart_hw(tp, 1);
10198 if (!err)
10199 tg3_netif_start(tp);
10200 }
10201
10202 tg3_full_unlock(tp);
10203
10204 if (irq_sync && !err)
10205 tg3_phy_start(tp);
10206
10207 return err;
10208 }
10209
10210 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10211 {
10212 struct tg3 *tp = netdev_priv(dev);
10213
10214 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
10215
10216 if (tp->link_config.active_flowctrl & FLOW_CTRL_RX)
10217 epause->rx_pause = 1;
10218 else
10219 epause->rx_pause = 0;
10220
10221 if (tp->link_config.active_flowctrl & FLOW_CTRL_TX)
10222 epause->tx_pause = 1;
10223 else
10224 epause->tx_pause = 0;
10225 }
10226
10227 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10228 {
10229 struct tg3 *tp = netdev_priv(dev);
10230 int err = 0;
10231
10232 if (tg3_flag(tp, USE_PHYLIB)) {
10233 u32 newadv;
10234 struct phy_device *phydev;
10235
10236 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10237
10238 if (!(phydev->supported & SUPPORTED_Pause) ||
10239 (!(phydev->supported & SUPPORTED_Asym_Pause) &&
10240 (epause->rx_pause != epause->tx_pause)))
10241 return -EINVAL;
10242
10243 tp->link_config.flowctrl = 0;
10244 if (epause->rx_pause) {
10245 tp->link_config.flowctrl |= FLOW_CTRL_RX;
10246
10247 if (epause->tx_pause) {
10248 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10249 newadv = ADVERTISED_Pause;
10250 } else
10251 newadv = ADVERTISED_Pause |
10252 ADVERTISED_Asym_Pause;
10253 } else if (epause->tx_pause) {
10254 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10255 newadv = ADVERTISED_Asym_Pause;
10256 } else
10257 newadv = 0;
10258
10259 if (epause->autoneg)
10260 tg3_flag_set(tp, PAUSE_AUTONEG);
10261 else
10262 tg3_flag_clear(tp, PAUSE_AUTONEG);
10263
10264 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
10265 u32 oldadv = phydev->advertising &
10266 (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
10267 if (oldadv != newadv) {
10268 phydev->advertising &=
10269 ~(ADVERTISED_Pause |
10270 ADVERTISED_Asym_Pause);
10271 phydev->advertising |= newadv;
10272 if (phydev->autoneg) {
10273 /*
10274 * Always renegotiate the link to
10275 * inform our link partner of our
10276 * flow control settings, even if the
10277 * flow control is forced. Let
10278 * tg3_adjust_link() do the final
10279 * flow control setup.
10280 */
10281 return phy_start_aneg(phydev);
10282 }
10283 }
10284
10285 if (!epause->autoneg)
10286 tg3_setup_flow_control(tp, 0, 0);
10287 } else {
10288 tp->link_config.orig_advertising &=
10289 ~(ADVERTISED_Pause |
10290 ADVERTISED_Asym_Pause);
10291 tp->link_config.orig_advertising |= newadv;
10292 }
10293 } else {
10294 int irq_sync = 0;
10295
10296 if (netif_running(dev)) {
10297 tg3_netif_stop(tp);
10298 irq_sync = 1;
10299 }
10300
10301 tg3_full_lock(tp, irq_sync);
10302
10303 if (epause->autoneg)
10304 tg3_flag_set(tp, PAUSE_AUTONEG);
10305 else
10306 tg3_flag_clear(tp, PAUSE_AUTONEG);
10307 if (epause->rx_pause)
10308 tp->link_config.flowctrl |= FLOW_CTRL_RX;
10309 else
10310 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
10311 if (epause->tx_pause)
10312 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10313 else
10314 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
10315
10316 if (netif_running(dev)) {
10317 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10318 err = tg3_restart_hw(tp, 1);
10319 if (!err)
10320 tg3_netif_start(tp);
10321 }
10322
10323 tg3_full_unlock(tp);
10324 }
10325
10326 return err;
10327 }
10328
10329 static int tg3_get_sset_count(struct net_device *dev, int sset)
10330 {
10331 switch (sset) {
10332 case ETH_SS_TEST:
10333 return TG3_NUM_TEST;
10334 case ETH_SS_STATS:
10335 return TG3_NUM_STATS;
10336 default:
10337 return -EOPNOTSUPP;
10338 }
10339 }
10340
10341 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10342 {
10343 switch (stringset) {
10344 case ETH_SS_STATS:
10345 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
10346 break;
10347 case ETH_SS_TEST:
10348 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
10349 break;
10350 default:
10351 WARN_ON(1); /* we need a WARN() */
10352 break;
10353 }
10354 }
10355
10356 static int tg3_set_phys_id(struct net_device *dev,
10357 enum ethtool_phys_id_state state)
10358 {
10359 struct tg3 *tp = netdev_priv(dev);
10360
10361 if (!netif_running(tp->dev))
10362 return -EAGAIN;
10363
10364 switch (state) {
10365 case ETHTOOL_ID_ACTIVE:
10366 return 1; /* cycle on/off once per second */
10367
10368 case ETHTOOL_ID_ON:
10369 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10370 LED_CTRL_1000MBPS_ON |
10371 LED_CTRL_100MBPS_ON |
10372 LED_CTRL_10MBPS_ON |
10373 LED_CTRL_TRAFFIC_OVERRIDE |
10374 LED_CTRL_TRAFFIC_BLINK |
10375 LED_CTRL_TRAFFIC_LED);
10376 break;
10377
10378 case ETHTOOL_ID_OFF:
10379 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10380 LED_CTRL_TRAFFIC_OVERRIDE);
10381 break;
10382
10383 case ETHTOOL_ID_INACTIVE:
10384 tw32(MAC_LED_CTRL, tp->led_ctrl);
10385 break;
10386 }
10387
10388 return 0;
10389 }
10390
10391 static void tg3_get_ethtool_stats(struct net_device *dev,
10392 struct ethtool_stats *estats, u64 *tmp_stats)
10393 {
10394 struct tg3 *tp = netdev_priv(dev);
10395 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
10396 }
10397
10398 static __be32 * tg3_vpd_readblock(struct tg3 *tp)
10399 {
10400 int i;
10401 __be32 *buf;
10402 u32 offset = 0, len = 0;
10403 u32 magic, val;
10404
10405 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
10406 return NULL;
10407
10408 if (magic == TG3_EEPROM_MAGIC) {
10409 for (offset = TG3_NVM_DIR_START;
10410 offset < TG3_NVM_DIR_END;
10411 offset += TG3_NVM_DIRENT_SIZE) {
10412 if (tg3_nvram_read(tp, offset, &val))
10413 return NULL;
10414
10415 if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
10416 TG3_NVM_DIRTYPE_EXTVPD)
10417 break;
10418 }
10419
10420 if (offset != TG3_NVM_DIR_END) {
10421 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
10422 if (tg3_nvram_read(tp, offset + 4, &offset))
10423 return NULL;
10424
10425 offset = tg3_nvram_logical_addr(tp, offset);
10426 }
10427 }
10428
10429 if (!offset || !len) {
10430 offset = TG3_NVM_VPD_OFF;
10431 len = TG3_NVM_VPD_LEN;
10432 }
10433
10434 buf = kmalloc(len, GFP_KERNEL);
10435 if (buf == NULL)
10436 return NULL;
10437
10438 if (magic == TG3_EEPROM_MAGIC) {
10439 for (i = 0; i < len; i += 4) {
10440 /* The data is in little-endian format in NVRAM.
10441 * Use the big-endian read routines to preserve
10442 * the byte order as it exists in NVRAM.
10443 */
10444 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
10445 goto error;
10446 }
10447 } else {
10448 u8 *ptr;
10449 ssize_t cnt;
10450 unsigned int pos = 0;
10451
10452 ptr = (u8 *)&buf[0];
10453 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
10454 cnt = pci_read_vpd(tp->pdev, pos,
10455 len - pos, ptr);
10456 if (cnt == -ETIMEDOUT || cnt == -EINTR)
10457 cnt = 0;
10458 else if (cnt < 0)
10459 goto error;
10460 }
10461 if (pos != len)
10462 goto error;
10463 }
10464
10465 return buf;
10466
10467 error:
10468 kfree(buf);
10469 return NULL;
10470 }
10471
10472 #define NVRAM_TEST_SIZE 0x100
10473 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
10474 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
10475 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
10476 #define NVRAM_SELFBOOT_HW_SIZE 0x20
10477 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
10478
10479 static int tg3_test_nvram(struct tg3 *tp)
10480 {
10481 u32 csum, magic;
10482 __be32 *buf;
10483 int i, j, k, err = 0, size;
10484
10485 if (tg3_flag(tp, NO_NVRAM))
10486 return 0;
10487
10488 if (tg3_nvram_read(tp, 0, &magic) != 0)
10489 return -EIO;
10490
10491 if (magic == TG3_EEPROM_MAGIC)
10492 size = NVRAM_TEST_SIZE;
10493 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
10494 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
10495 TG3_EEPROM_SB_FORMAT_1) {
10496 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
10497 case TG3_EEPROM_SB_REVISION_0:
10498 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
10499 break;
10500 case TG3_EEPROM_SB_REVISION_2:
10501 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
10502 break;
10503 case TG3_EEPROM_SB_REVISION_3:
10504 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
10505 break;
10506 default:
10507 return 0;
10508 }
10509 } else
10510 return 0;
10511 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
10512 size = NVRAM_SELFBOOT_HW_SIZE;
10513 else
10514 return -EIO;
10515
10516 buf = kmalloc(size, GFP_KERNEL);
10517 if (buf == NULL)
10518 return -ENOMEM;
10519
10520 err = -EIO;
10521 for (i = 0, j = 0; i < size; i += 4, j++) {
10522 err = tg3_nvram_read_be32(tp, i, &buf[j]);
10523 if (err)
10524 break;
10525 }
10526 if (i < size)
10527 goto out;
10528
10529 /* Selfboot format */
10530 magic = be32_to_cpu(buf[0]);
10531 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
10532 TG3_EEPROM_MAGIC_FW) {
10533 u8 *buf8 = (u8 *) buf, csum8 = 0;
10534
10535 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
10536 TG3_EEPROM_SB_REVISION_2) {
10537 /* For rev 2, the csum doesn't include the MBA. */
10538 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
10539 csum8 += buf8[i];
10540 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
10541 csum8 += buf8[i];
10542 } else {
10543 for (i = 0; i < size; i++)
10544 csum8 += buf8[i];
10545 }
10546
10547 if (csum8 == 0) {
10548 err = 0;
10549 goto out;
10550 }
10551
10552 err = -EIO;
10553 goto out;
10554 }
10555
10556 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
10557 TG3_EEPROM_MAGIC_HW) {
10558 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
10559 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
10560 u8 *buf8 = (u8 *) buf;
10561
10562 /* Separate the parity bits and the data bytes. */
10563 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
10564 if ((i == 0) || (i == 8)) {
10565 int l;
10566 u8 msk;
10567
10568 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
10569 parity[k++] = buf8[i] & msk;
10570 i++;
10571 } else if (i == 16) {
10572 int l;
10573 u8 msk;
10574
10575 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
10576 parity[k++] = buf8[i] & msk;
10577 i++;
10578
10579 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
10580 parity[k++] = buf8[i] & msk;
10581 i++;
10582 }
10583 data[j++] = buf8[i];
10584 }
10585
10586 err = -EIO;
10587 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
10588 u8 hw8 = hweight8(data[i]);
10589
10590 if ((hw8 & 0x1) && parity[i])
10591 goto out;
10592 else if (!(hw8 & 0x1) && !parity[i])
10593 goto out;
10594 }
10595 err = 0;
10596 goto out;
10597 }
10598
10599 err = -EIO;
10600
10601 /* Bootstrap checksum at offset 0x10 */
10602 csum = calc_crc((unsigned char *) buf, 0x10);
10603 if (csum != le32_to_cpu(buf[0x10/4]))
10604 goto out;
10605
10606 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
10607 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
10608 if (csum != le32_to_cpu(buf[0xfc/4]))
10609 goto out;
10610
10611 kfree(buf);
10612
10613 buf = tg3_vpd_readblock(tp);
10614 if (!buf)
10615 return -ENOMEM;
10616
10617 i = pci_vpd_find_tag((u8 *)buf, 0, TG3_NVM_VPD_LEN,
10618 PCI_VPD_LRDT_RO_DATA);
10619 if (i > 0) {
10620 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
10621 if (j < 0)
10622 goto out;
10623
10624 if (i + PCI_VPD_LRDT_TAG_SIZE + j > TG3_NVM_VPD_LEN)
10625 goto out;
10626
10627 i += PCI_VPD_LRDT_TAG_SIZE;
10628 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
10629 PCI_VPD_RO_KEYWORD_CHKSUM);
10630 if (j > 0) {
10631 u8 csum8 = 0;
10632
10633 j += PCI_VPD_INFO_FLD_HDR_SIZE;
10634
10635 for (i = 0; i <= j; i++)
10636 csum8 += ((u8 *)buf)[i];
10637
10638 if (csum8)
10639 goto out;
10640 }
10641 }
10642
10643 err = 0;
10644
10645 out:
10646 kfree(buf);
10647 return err;
10648 }
10649
10650 #define TG3_SERDES_TIMEOUT_SEC 2
10651 #define TG3_COPPER_TIMEOUT_SEC 6
10652
10653 static int tg3_test_link(struct tg3 *tp)
10654 {
10655 int i, max;
10656
10657 if (!netif_running(tp->dev))
10658 return -ENODEV;
10659
10660 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
10661 max = TG3_SERDES_TIMEOUT_SEC;
10662 else
10663 max = TG3_COPPER_TIMEOUT_SEC;
10664
10665 for (i = 0; i < max; i++) {
10666 if (netif_carrier_ok(tp->dev))
10667 return 0;
10668
10669 if (msleep_interruptible(1000))
10670 break;
10671 }
10672
10673 return -EIO;
10674 }
10675
10676 /* Only test the commonly used registers */
10677 static int tg3_test_registers(struct tg3 *tp)
10678 {
10679 int i, is_5705, is_5750;
10680 u32 offset, read_mask, write_mask, val, save_val, read_val;
10681 static struct {
10682 u16 offset;
10683 u16 flags;
10684 #define TG3_FL_5705 0x1
10685 #define TG3_FL_NOT_5705 0x2
10686 #define TG3_FL_NOT_5788 0x4
10687 #define TG3_FL_NOT_5750 0x8
10688 u32 read_mask;
10689 u32 write_mask;
10690 } reg_tbl[] = {
10691 /* MAC Control Registers */
10692 { MAC_MODE, TG3_FL_NOT_5705,
10693 0x00000000, 0x00ef6f8c },
10694 { MAC_MODE, TG3_FL_5705,
10695 0x00000000, 0x01ef6b8c },
10696 { MAC_STATUS, TG3_FL_NOT_5705,
10697 0x03800107, 0x00000000 },
10698 { MAC_STATUS, TG3_FL_5705,
10699 0x03800100, 0x00000000 },
10700 { MAC_ADDR_0_HIGH, 0x0000,
10701 0x00000000, 0x0000ffff },
10702 { MAC_ADDR_0_LOW, 0x0000,
10703 0x00000000, 0xffffffff },
10704 { MAC_RX_MTU_SIZE, 0x0000,
10705 0x00000000, 0x0000ffff },
10706 { MAC_TX_MODE, 0x0000,
10707 0x00000000, 0x00000070 },
10708 { MAC_TX_LENGTHS, 0x0000,
10709 0x00000000, 0x00003fff },
10710 { MAC_RX_MODE, TG3_FL_NOT_5705,
10711 0x00000000, 0x000007fc },
10712 { MAC_RX_MODE, TG3_FL_5705,
10713 0x00000000, 0x000007dc },
10714 { MAC_HASH_REG_0, 0x0000,
10715 0x00000000, 0xffffffff },
10716 { MAC_HASH_REG_1, 0x0000,
10717 0x00000000, 0xffffffff },
10718 { MAC_HASH_REG_2, 0x0000,
10719 0x00000000, 0xffffffff },
10720 { MAC_HASH_REG_3, 0x0000,
10721 0x00000000, 0xffffffff },
10722
10723 /* Receive Data and Receive BD Initiator Control Registers. */
10724 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
10725 0x00000000, 0xffffffff },
10726 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
10727 0x00000000, 0xffffffff },
10728 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
10729 0x00000000, 0x00000003 },
10730 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
10731 0x00000000, 0xffffffff },
10732 { RCVDBDI_STD_BD+0, 0x0000,
10733 0x00000000, 0xffffffff },
10734 { RCVDBDI_STD_BD+4, 0x0000,
10735 0x00000000, 0xffffffff },
10736 { RCVDBDI_STD_BD+8, 0x0000,
10737 0x00000000, 0xffff0002 },
10738 { RCVDBDI_STD_BD+0xc, 0x0000,
10739 0x00000000, 0xffffffff },
10740
10741 /* Receive BD Initiator Control Registers. */
10742 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
10743 0x00000000, 0xffffffff },
10744 { RCVBDI_STD_THRESH, TG3_FL_5705,
10745 0x00000000, 0x000003ff },
10746 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
10747 0x00000000, 0xffffffff },
10748
10749 /* Host Coalescing Control Registers. */
10750 { HOSTCC_MODE, TG3_FL_NOT_5705,
10751 0x00000000, 0x00000004 },
10752 { HOSTCC_MODE, TG3_FL_5705,
10753 0x00000000, 0x000000f6 },
10754 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
10755 0x00000000, 0xffffffff },
10756 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
10757 0x00000000, 0x000003ff },
10758 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
10759 0x00000000, 0xffffffff },
10760 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
10761 0x00000000, 0x000003ff },
10762 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
10763 0x00000000, 0xffffffff },
10764 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
10765 0x00000000, 0x000000ff },
10766 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
10767 0x00000000, 0xffffffff },
10768 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
10769 0x00000000, 0x000000ff },
10770 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
10771 0x00000000, 0xffffffff },
10772 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
10773 0x00000000, 0xffffffff },
10774 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
10775 0x00000000, 0xffffffff },
10776 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
10777 0x00000000, 0x000000ff },
10778 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
10779 0x00000000, 0xffffffff },
10780 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
10781 0x00000000, 0x000000ff },
10782 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
10783 0x00000000, 0xffffffff },
10784 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
10785 0x00000000, 0xffffffff },
10786 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
10787 0x00000000, 0xffffffff },
10788 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
10789 0x00000000, 0xffffffff },
10790 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
10791 0x00000000, 0xffffffff },
10792 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
10793 0xffffffff, 0x00000000 },
10794 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
10795 0xffffffff, 0x00000000 },
10796
10797 /* Buffer Manager Control Registers. */
10798 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
10799 0x00000000, 0x007fff80 },
10800 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
10801 0x00000000, 0x007fffff },
10802 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
10803 0x00000000, 0x0000003f },
10804 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
10805 0x00000000, 0x000001ff },
10806 { BUFMGR_MB_HIGH_WATER, 0x0000,
10807 0x00000000, 0x000001ff },
10808 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
10809 0xffffffff, 0x00000000 },
10810 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
10811 0xffffffff, 0x00000000 },
10812
10813 /* Mailbox Registers */
10814 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
10815 0x00000000, 0x000001ff },
10816 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
10817 0x00000000, 0x000001ff },
10818 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
10819 0x00000000, 0x000007ff },
10820 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
10821 0x00000000, 0x000001ff },
10822
10823 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
10824 };
10825
10826 is_5705 = is_5750 = 0;
10827 if (tg3_flag(tp, 5705_PLUS)) {
10828 is_5705 = 1;
10829 if (tg3_flag(tp, 5750_PLUS))
10830 is_5750 = 1;
10831 }
10832
10833 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
10834 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
10835 continue;
10836
10837 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
10838 continue;
10839
10840 if (tg3_flag(tp, IS_5788) &&
10841 (reg_tbl[i].flags & TG3_FL_NOT_5788))
10842 continue;
10843
10844 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
10845 continue;
10846
10847 offset = (u32) reg_tbl[i].offset;
10848 read_mask = reg_tbl[i].read_mask;
10849 write_mask = reg_tbl[i].write_mask;
10850
10851 /* Save the original register content */
10852 save_val = tr32(offset);
10853
10854 /* Determine the read-only value. */
10855 read_val = save_val & read_mask;
10856
10857 /* Write zero to the register, then make sure the read-only bits
10858 * are not changed and the read/write bits are all zeros.
10859 */
10860 tw32(offset, 0);
10861
10862 val = tr32(offset);
10863
10864 /* Test the read-only and read/write bits. */
10865 if (((val & read_mask) != read_val) || (val & write_mask))
10866 goto out;
10867
10868 /* Write ones to all the bits defined by RdMask and WrMask, then
10869 * make sure the read-only bits are not changed and the
10870 * read/write bits are all ones.
10871 */
10872 tw32(offset, read_mask | write_mask);
10873
10874 val = tr32(offset);
10875
10876 /* Test the read-only bits. */
10877 if ((val & read_mask) != read_val)
10878 goto out;
10879
10880 /* Test the read/write bits. */
10881 if ((val & write_mask) != write_mask)
10882 goto out;
10883
10884 tw32(offset, save_val);
10885 }
10886
10887 return 0;
10888
10889 out:
10890 if (netif_msg_hw(tp))
10891 netdev_err(tp->dev,
10892 "Register test failed at offset %x\n", offset);
10893 tw32(offset, save_val);
10894 return -EIO;
10895 }
10896
10897 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
10898 {
10899 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
10900 int i;
10901 u32 j;
10902
10903 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
10904 for (j = 0; j < len; j += 4) {
10905 u32 val;
10906
10907 tg3_write_mem(tp, offset + j, test_pattern[i]);
10908 tg3_read_mem(tp, offset + j, &val);
10909 if (val != test_pattern[i])
10910 return -EIO;
10911 }
10912 }
10913 return 0;
10914 }
10915
10916 static int tg3_test_memory(struct tg3 *tp)
10917 {
10918 static struct mem_entry {
10919 u32 offset;
10920 u32 len;
10921 } mem_tbl_570x[] = {
10922 { 0x00000000, 0x00b50},
10923 { 0x00002000, 0x1c000},
10924 { 0xffffffff, 0x00000}
10925 }, mem_tbl_5705[] = {
10926 { 0x00000100, 0x0000c},
10927 { 0x00000200, 0x00008},
10928 { 0x00004000, 0x00800},
10929 { 0x00006000, 0x01000},
10930 { 0x00008000, 0x02000},
10931 { 0x00010000, 0x0e000},
10932 { 0xffffffff, 0x00000}
10933 }, mem_tbl_5755[] = {
10934 { 0x00000200, 0x00008},
10935 { 0x00004000, 0x00800},
10936 { 0x00006000, 0x00800},
10937 { 0x00008000, 0x02000},
10938 { 0x00010000, 0x0c000},
10939 { 0xffffffff, 0x00000}
10940 }, mem_tbl_5906[] = {
10941 { 0x00000200, 0x00008},
10942 { 0x00004000, 0x00400},
10943 { 0x00006000, 0x00400},
10944 { 0x00008000, 0x01000},
10945 { 0x00010000, 0x01000},
10946 { 0xffffffff, 0x00000}
10947 }, mem_tbl_5717[] = {
10948 { 0x00000200, 0x00008},
10949 { 0x00010000, 0x0a000},
10950 { 0x00020000, 0x13c00},
10951 { 0xffffffff, 0x00000}
10952 }, mem_tbl_57765[] = {
10953 { 0x00000200, 0x00008},
10954 { 0x00004000, 0x00800},
10955 { 0x00006000, 0x09800},
10956 { 0x00010000, 0x0a000},
10957 { 0xffffffff, 0x00000}
10958 };
10959 struct mem_entry *mem_tbl;
10960 int err = 0;
10961 int i;
10962
10963 if (tg3_flag(tp, 5717_PLUS))
10964 mem_tbl = mem_tbl_5717;
10965 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
10966 mem_tbl = mem_tbl_57765;
10967 else if (tg3_flag(tp, 5755_PLUS))
10968 mem_tbl = mem_tbl_5755;
10969 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10970 mem_tbl = mem_tbl_5906;
10971 else if (tg3_flag(tp, 5705_PLUS))
10972 mem_tbl = mem_tbl_5705;
10973 else
10974 mem_tbl = mem_tbl_570x;
10975
10976 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
10977 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
10978 if (err)
10979 break;
10980 }
10981
10982 return err;
10983 }
10984
10985 #define TG3_MAC_LOOPBACK 0
10986 #define TG3_PHY_LOOPBACK 1
10987 #define TG3_TSO_LOOPBACK 2
10988
10989 #define TG3_TSO_MSS 500
10990
10991 #define TG3_TSO_IP_HDR_LEN 20
10992 #define TG3_TSO_TCP_HDR_LEN 20
10993 #define TG3_TSO_TCP_OPT_LEN 12
10994
10995 static const u8 tg3_tso_header[] = {
10996 0x08, 0x00,
10997 0x45, 0x00, 0x00, 0x00,
10998 0x00, 0x00, 0x40, 0x00,
10999 0x40, 0x06, 0x00, 0x00,
11000 0x0a, 0x00, 0x00, 0x01,
11001 0x0a, 0x00, 0x00, 0x02,
11002 0x0d, 0x00, 0xe0, 0x00,
11003 0x00, 0x00, 0x01, 0x00,
11004 0x00, 0x00, 0x02, 0x00,
11005 0x80, 0x10, 0x10, 0x00,
11006 0x14, 0x09, 0x00, 0x00,
11007 0x01, 0x01, 0x08, 0x0a,
11008 0x11, 0x11, 0x11, 0x11,
11009 0x11, 0x11, 0x11, 0x11,
11010 };
11011
11012 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, int loopback_mode)
11013 {
11014 u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
11015 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
11016 struct sk_buff *skb, *rx_skb;
11017 u8 *tx_data;
11018 dma_addr_t map;
11019 int num_pkts, tx_len, rx_len, i, err;
11020 struct tg3_rx_buffer_desc *desc;
11021 struct tg3_napi *tnapi, *rnapi;
11022 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
11023
11024 tnapi = &tp->napi[0];
11025 rnapi = &tp->napi[0];
11026 if (tp->irq_cnt > 1) {
11027 if (tg3_flag(tp, ENABLE_RSS))
11028 rnapi = &tp->napi[1];
11029 if (tg3_flag(tp, ENABLE_TSS))
11030 tnapi = &tp->napi[1];
11031 }
11032 coal_now = tnapi->coal_now | rnapi->coal_now;
11033
11034 if (loopback_mode == TG3_MAC_LOOPBACK) {
11035 /* HW errata - mac loopback fails in some cases on 5780.
11036 * Normal traffic and PHY loopback are not affected by
11037 * errata. Also, the MAC loopback test is deprecated for
11038 * all newer ASIC revisions.
11039 */
11040 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
11041 tg3_flag(tp, CPMU_PRESENT))
11042 return 0;
11043
11044 mac_mode = tp->mac_mode &
11045 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
11046 mac_mode |= MAC_MODE_PORT_INT_LPBACK;
11047 if (!tg3_flag(tp, 5705_PLUS))
11048 mac_mode |= MAC_MODE_LINK_POLARITY;
11049 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
11050 mac_mode |= MAC_MODE_PORT_MODE_MII;
11051 else
11052 mac_mode |= MAC_MODE_PORT_MODE_GMII;
11053 tw32(MAC_MODE, mac_mode);
11054 } else {
11055 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
11056 tg3_phy_fet_toggle_apd(tp, false);
11057 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
11058 } else
11059 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
11060
11061 tg3_phy_toggle_automdix(tp, 0);
11062
11063 tg3_writephy(tp, MII_BMCR, val);
11064 udelay(40);
11065
11066 mac_mode = tp->mac_mode &
11067 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
11068 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
11069 tg3_writephy(tp, MII_TG3_FET_PTEST,
11070 MII_TG3_FET_PTEST_FRC_TX_LINK |
11071 MII_TG3_FET_PTEST_FRC_TX_LOCK);
11072 /* The write needs to be flushed for the AC131 */
11073 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
11074 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
11075 mac_mode |= MAC_MODE_PORT_MODE_MII;
11076 } else
11077 mac_mode |= MAC_MODE_PORT_MODE_GMII;
11078
11079 /* reset to prevent losing 1st rx packet intermittently */
11080 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
11081 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
11082 udelay(10);
11083 tw32_f(MAC_RX_MODE, tp->rx_mode);
11084 }
11085 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
11086 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
11087 if (masked_phy_id == TG3_PHY_ID_BCM5401)
11088 mac_mode &= ~MAC_MODE_LINK_POLARITY;
11089 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
11090 mac_mode |= MAC_MODE_LINK_POLARITY;
11091 tg3_writephy(tp, MII_TG3_EXT_CTRL,
11092 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
11093 }
11094 tw32(MAC_MODE, mac_mode);
11095
11096 /* Wait for link */
11097 for (i = 0; i < 100; i++) {
11098 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
11099 break;
11100 mdelay(1);
11101 }
11102 }
11103
11104 err = -EIO;
11105
11106 tx_len = pktsz;
11107 skb = netdev_alloc_skb(tp->dev, tx_len);
11108 if (!skb)
11109 return -ENOMEM;
11110
11111 tx_data = skb_put(skb, tx_len);
11112 memcpy(tx_data, tp->dev->dev_addr, 6);
11113 memset(tx_data + 6, 0x0, 8);
11114
11115 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
11116
11117 if (loopback_mode == TG3_TSO_LOOPBACK) {
11118 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
11119
11120 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
11121 TG3_TSO_TCP_OPT_LEN;
11122
11123 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
11124 sizeof(tg3_tso_header));
11125 mss = TG3_TSO_MSS;
11126
11127 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
11128 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
11129
11130 /* Set the total length field in the IP header */
11131 iph->tot_len = htons((u16)(mss + hdr_len));
11132
11133 base_flags = (TXD_FLAG_CPU_PRE_DMA |
11134 TXD_FLAG_CPU_POST_DMA);
11135
11136 if (tg3_flag(tp, HW_TSO_1) ||
11137 tg3_flag(tp, HW_TSO_2) ||
11138 tg3_flag(tp, HW_TSO_3)) {
11139 struct tcphdr *th;
11140 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
11141 th = (struct tcphdr *)&tx_data[val];
11142 th->check = 0;
11143 } else
11144 base_flags |= TXD_FLAG_TCPUDP_CSUM;
11145
11146 if (tg3_flag(tp, HW_TSO_3)) {
11147 mss |= (hdr_len & 0xc) << 12;
11148 if (hdr_len & 0x10)
11149 base_flags |= 0x00000010;
11150 base_flags |= (hdr_len & 0x3e0) << 5;
11151 } else if (tg3_flag(tp, HW_TSO_2))
11152 mss |= hdr_len << 9;
11153 else if (tg3_flag(tp, HW_TSO_1) ||
11154 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
11155 mss |= (TG3_TSO_TCP_OPT_LEN << 9);
11156 } else {
11157 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
11158 }
11159
11160 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
11161 } else {
11162 num_pkts = 1;
11163 data_off = ETH_HLEN;
11164 }
11165
11166 for (i = data_off; i < tx_len; i++)
11167 tx_data[i] = (u8) (i & 0xff);
11168
11169 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
11170 if (pci_dma_mapping_error(tp->pdev, map)) {
11171 dev_kfree_skb(skb);
11172 return -EIO;
11173 }
11174
11175 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11176 rnapi->coal_now);
11177
11178 udelay(10);
11179
11180 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
11181
11182 tg3_set_txd(tnapi, tnapi->tx_prod, map, tx_len,
11183 base_flags, (mss << 1) | 1);
11184
11185 tnapi->tx_prod++;
11186
11187 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
11188 tr32_mailbox(tnapi->prodmbox);
11189
11190 udelay(10);
11191
11192 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
11193 for (i = 0; i < 35; i++) {
11194 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11195 coal_now);
11196
11197 udelay(10);
11198
11199 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
11200 rx_idx = rnapi->hw_status->idx[0].rx_producer;
11201 if ((tx_idx == tnapi->tx_prod) &&
11202 (rx_idx == (rx_start_idx + num_pkts)))
11203 break;
11204 }
11205
11206 pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
11207 dev_kfree_skb(skb);
11208
11209 if (tx_idx != tnapi->tx_prod)
11210 goto out;
11211
11212 if (rx_idx != rx_start_idx + num_pkts)
11213 goto out;
11214
11215 val = data_off;
11216 while (rx_idx != rx_start_idx) {
11217 desc = &rnapi->rx_rcb[rx_start_idx++];
11218 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
11219 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
11220
11221 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
11222 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
11223 goto out;
11224
11225 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
11226 - ETH_FCS_LEN;
11227
11228 if (loopback_mode != TG3_TSO_LOOPBACK) {
11229 if (rx_len != tx_len)
11230 goto out;
11231
11232 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
11233 if (opaque_key != RXD_OPAQUE_RING_STD)
11234 goto out;
11235 } else {
11236 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
11237 goto out;
11238 }
11239 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
11240 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
11241 >> RXD_TCPCSUM_SHIFT != 0xffff) {
11242 goto out;
11243 }
11244
11245 if (opaque_key == RXD_OPAQUE_RING_STD) {
11246 rx_skb = tpr->rx_std_buffers[desc_idx].skb;
11247 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
11248 mapping);
11249 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
11250 rx_skb = tpr->rx_jmb_buffers[desc_idx].skb;
11251 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
11252 mapping);
11253 } else
11254 goto out;
11255
11256 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
11257 PCI_DMA_FROMDEVICE);
11258
11259 for (i = data_off; i < rx_len; i++, val++) {
11260 if (*(rx_skb->data + i) != (u8) (val & 0xff))
11261 goto out;
11262 }
11263 }
11264
11265 err = 0;
11266
11267 /* tg3_free_rings will unmap and free the rx_skb */
11268 out:
11269 return err;
11270 }
11271
11272 #define TG3_STD_LOOPBACK_FAILED 1
11273 #define TG3_JMB_LOOPBACK_FAILED 2
11274 #define TG3_TSO_LOOPBACK_FAILED 4
11275
11276 #define TG3_MAC_LOOPBACK_SHIFT 0
11277 #define TG3_PHY_LOOPBACK_SHIFT 4
11278 #define TG3_LOOPBACK_FAILED 0x00000077
11279
11280 static int tg3_test_loopback(struct tg3 *tp)
11281 {
11282 int err = 0;
11283 u32 eee_cap, cpmuctrl = 0;
11284
11285 if (!netif_running(tp->dev))
11286 return TG3_LOOPBACK_FAILED;
11287
11288 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
11289 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11290
11291 err = tg3_reset_hw(tp, 1);
11292 if (err) {
11293 err = TG3_LOOPBACK_FAILED;
11294 goto done;
11295 }
11296
11297 if (tg3_flag(tp, ENABLE_RSS)) {
11298 int i;
11299
11300 /* Reroute all rx packets to the 1st queue */
11301 for (i = MAC_RSS_INDIR_TBL_0;
11302 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
11303 tw32(i, 0x0);
11304 }
11305
11306 /* Turn off gphy autopowerdown. */
11307 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11308 tg3_phy_toggle_apd(tp, false);
11309
11310 if (tg3_flag(tp, CPMU_PRESENT)) {
11311 int i;
11312 u32 status;
11313
11314 tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
11315
11316 /* Wait for up to 40 microseconds to acquire lock. */
11317 for (i = 0; i < 4; i++) {
11318 status = tr32(TG3_CPMU_MUTEX_GNT);
11319 if (status == CPMU_MUTEX_GNT_DRIVER)
11320 break;
11321 udelay(10);
11322 }
11323
11324 if (status != CPMU_MUTEX_GNT_DRIVER) {
11325 err = TG3_LOOPBACK_FAILED;
11326 goto done;
11327 }
11328
11329 /* Turn off link-based power management. */
11330 cpmuctrl = tr32(TG3_CPMU_CTRL);
11331 tw32(TG3_CPMU_CTRL,
11332 cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE |
11333 CPMU_CTRL_LINK_AWARE_MODE));
11334 }
11335
11336 if (tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_MAC_LOOPBACK))
11337 err |= TG3_STD_LOOPBACK_FAILED << TG3_MAC_LOOPBACK_SHIFT;
11338
11339 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11340 tg3_run_loopback(tp, 9000 + ETH_HLEN, TG3_MAC_LOOPBACK))
11341 err |= TG3_JMB_LOOPBACK_FAILED << TG3_MAC_LOOPBACK_SHIFT;
11342
11343 if (tg3_flag(tp, CPMU_PRESENT)) {
11344 tw32(TG3_CPMU_CTRL, cpmuctrl);
11345
11346 /* Release the mutex */
11347 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
11348 }
11349
11350 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11351 !tg3_flag(tp, USE_PHYLIB)) {
11352 if (tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_PHY_LOOPBACK))
11353 err |= TG3_STD_LOOPBACK_FAILED <<
11354 TG3_PHY_LOOPBACK_SHIFT;
11355 if (tg3_flag(tp, TSO_CAPABLE) &&
11356 tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_TSO_LOOPBACK))
11357 err |= TG3_TSO_LOOPBACK_FAILED <<
11358 TG3_PHY_LOOPBACK_SHIFT;
11359 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11360 tg3_run_loopback(tp, 9000 + ETH_HLEN, TG3_PHY_LOOPBACK))
11361 err |= TG3_JMB_LOOPBACK_FAILED <<
11362 TG3_PHY_LOOPBACK_SHIFT;
11363 }
11364
11365 /* Re-enable gphy autopowerdown. */
11366 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11367 tg3_phy_toggle_apd(tp, true);
11368
11369 done:
11370 tp->phy_flags |= eee_cap;
11371
11372 return err;
11373 }
11374
11375 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
11376 u64 *data)
11377 {
11378 struct tg3 *tp = netdev_priv(dev);
11379
11380 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11381 tg3_power_up(tp);
11382
11383 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
11384
11385 if (tg3_test_nvram(tp) != 0) {
11386 etest->flags |= ETH_TEST_FL_FAILED;
11387 data[0] = 1;
11388 }
11389 if (tg3_test_link(tp) != 0) {
11390 etest->flags |= ETH_TEST_FL_FAILED;
11391 data[1] = 1;
11392 }
11393 if (etest->flags & ETH_TEST_FL_OFFLINE) {
11394 int err, err2 = 0, irq_sync = 0;
11395
11396 if (netif_running(dev)) {
11397 tg3_phy_stop(tp);
11398 tg3_netif_stop(tp);
11399 irq_sync = 1;
11400 }
11401
11402 tg3_full_lock(tp, irq_sync);
11403
11404 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
11405 err = tg3_nvram_lock(tp);
11406 tg3_halt_cpu(tp, RX_CPU_BASE);
11407 if (!tg3_flag(tp, 5705_PLUS))
11408 tg3_halt_cpu(tp, TX_CPU_BASE);
11409 if (!err)
11410 tg3_nvram_unlock(tp);
11411
11412 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
11413 tg3_phy_reset(tp);
11414
11415 if (tg3_test_registers(tp) != 0) {
11416 etest->flags |= ETH_TEST_FL_FAILED;
11417 data[2] = 1;
11418 }
11419 if (tg3_test_memory(tp) != 0) {
11420 etest->flags |= ETH_TEST_FL_FAILED;
11421 data[3] = 1;
11422 }
11423 if ((data[4] = tg3_test_loopback(tp)) != 0)
11424 etest->flags |= ETH_TEST_FL_FAILED;
11425
11426 tg3_full_unlock(tp);
11427
11428 if (tg3_test_interrupt(tp) != 0) {
11429 etest->flags |= ETH_TEST_FL_FAILED;
11430 data[5] = 1;
11431 }
11432
11433 tg3_full_lock(tp, 0);
11434
11435 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11436 if (netif_running(dev)) {
11437 tg3_flag_set(tp, INIT_COMPLETE);
11438 err2 = tg3_restart_hw(tp, 1);
11439 if (!err2)
11440 tg3_netif_start(tp);
11441 }
11442
11443 tg3_full_unlock(tp);
11444
11445 if (irq_sync && !err2)
11446 tg3_phy_start(tp);
11447 }
11448 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11449 tg3_power_down(tp);
11450
11451 }
11452
11453 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11454 {
11455 struct mii_ioctl_data *data = if_mii(ifr);
11456 struct tg3 *tp = netdev_priv(dev);
11457 int err;
11458
11459 if (tg3_flag(tp, USE_PHYLIB)) {
11460 struct phy_device *phydev;
11461 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11462 return -EAGAIN;
11463 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11464 return phy_mii_ioctl(phydev, ifr, cmd);
11465 }
11466
11467 switch (cmd) {
11468 case SIOCGMIIPHY:
11469 data->phy_id = tp->phy_addr;
11470
11471 /* fallthru */
11472 case SIOCGMIIREG: {
11473 u32 mii_regval;
11474
11475 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11476 break; /* We have no PHY */
11477
11478 if (!netif_running(dev))
11479 return -EAGAIN;
11480
11481 spin_lock_bh(&tp->lock);
11482 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
11483 spin_unlock_bh(&tp->lock);
11484
11485 data->val_out = mii_regval;
11486
11487 return err;
11488 }
11489
11490 case SIOCSMIIREG:
11491 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11492 break; /* We have no PHY */
11493
11494 if (!netif_running(dev))
11495 return -EAGAIN;
11496
11497 spin_lock_bh(&tp->lock);
11498 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
11499 spin_unlock_bh(&tp->lock);
11500
11501 return err;
11502
11503 default:
11504 /* do nothing */
11505 break;
11506 }
11507 return -EOPNOTSUPP;
11508 }
11509
11510 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11511 {
11512 struct tg3 *tp = netdev_priv(dev);
11513
11514 memcpy(ec, &tp->coal, sizeof(*ec));
11515 return 0;
11516 }
11517
11518 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11519 {
11520 struct tg3 *tp = netdev_priv(dev);
11521 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
11522 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
11523
11524 if (!tg3_flag(tp, 5705_PLUS)) {
11525 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
11526 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
11527 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
11528 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
11529 }
11530
11531 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
11532 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
11533 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
11534 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
11535 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
11536 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
11537 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
11538 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
11539 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
11540 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
11541 return -EINVAL;
11542
11543 /* No rx interrupts will be generated if both are zero */
11544 if ((ec->rx_coalesce_usecs == 0) &&
11545 (ec->rx_max_coalesced_frames == 0))
11546 return -EINVAL;
11547
11548 /* No tx interrupts will be generated if both are zero */
11549 if ((ec->tx_coalesce_usecs == 0) &&
11550 (ec->tx_max_coalesced_frames == 0))
11551 return -EINVAL;
11552
11553 /* Only copy relevant parameters, ignore all others. */
11554 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
11555 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
11556 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
11557 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
11558 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
11559 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
11560 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
11561 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
11562 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
11563
11564 if (netif_running(dev)) {
11565 tg3_full_lock(tp, 0);
11566 __tg3_set_coalesce(tp, &tp->coal);
11567 tg3_full_unlock(tp);
11568 }
11569 return 0;
11570 }
11571
11572 static const struct ethtool_ops tg3_ethtool_ops = {
11573 .get_settings = tg3_get_settings,
11574 .set_settings = tg3_set_settings,
11575 .get_drvinfo = tg3_get_drvinfo,
11576 .get_regs_len = tg3_get_regs_len,
11577 .get_regs = tg3_get_regs,
11578 .get_wol = tg3_get_wol,
11579 .set_wol = tg3_set_wol,
11580 .get_msglevel = tg3_get_msglevel,
11581 .set_msglevel = tg3_set_msglevel,
11582 .nway_reset = tg3_nway_reset,
11583 .get_link = ethtool_op_get_link,
11584 .get_eeprom_len = tg3_get_eeprom_len,
11585 .get_eeprom = tg3_get_eeprom,
11586 .set_eeprom = tg3_set_eeprom,
11587 .get_ringparam = tg3_get_ringparam,
11588 .set_ringparam = tg3_set_ringparam,
11589 .get_pauseparam = tg3_get_pauseparam,
11590 .set_pauseparam = tg3_set_pauseparam,
11591 .self_test = tg3_self_test,
11592 .get_strings = tg3_get_strings,
11593 .set_phys_id = tg3_set_phys_id,
11594 .get_ethtool_stats = tg3_get_ethtool_stats,
11595 .get_coalesce = tg3_get_coalesce,
11596 .set_coalesce = tg3_set_coalesce,
11597 .get_sset_count = tg3_get_sset_count,
11598 };
11599
11600 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
11601 {
11602 u32 cursize, val, magic;
11603
11604 tp->nvram_size = EEPROM_CHIP_SIZE;
11605
11606 if (tg3_nvram_read(tp, 0, &magic) != 0)
11607 return;
11608
11609 if ((magic != TG3_EEPROM_MAGIC) &&
11610 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
11611 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
11612 return;
11613
11614 /*
11615 * Size the chip by reading offsets at increasing powers of two.
11616 * When we encounter our validation signature, we know the addressing
11617 * has wrapped around, and thus have our chip size.
11618 */
11619 cursize = 0x10;
11620
11621 while (cursize < tp->nvram_size) {
11622 if (tg3_nvram_read(tp, cursize, &val) != 0)
11623 return;
11624
11625 if (val == magic)
11626 break;
11627
11628 cursize <<= 1;
11629 }
11630
11631 tp->nvram_size = cursize;
11632 }
11633
11634 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
11635 {
11636 u32 val;
11637
11638 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
11639 return;
11640
11641 /* Selfboot format */
11642 if (val != TG3_EEPROM_MAGIC) {
11643 tg3_get_eeprom_size(tp);
11644 return;
11645 }
11646
11647 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
11648 if (val != 0) {
11649 /* This is confusing. We want to operate on the
11650 * 16-bit value at offset 0xf2. The tg3_nvram_read()
11651 * call will read from NVRAM and byteswap the data
11652 * according to the byteswapping settings for all
11653 * other register accesses. This ensures the data we
11654 * want will always reside in the lower 16-bits.
11655 * However, the data in NVRAM is in LE format, which
11656 * means the data from the NVRAM read will always be
11657 * opposite the endianness of the CPU. The 16-bit
11658 * byteswap then brings the data to CPU endianness.
11659 */
11660 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
11661 return;
11662 }
11663 }
11664 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11665 }
11666
11667 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
11668 {
11669 u32 nvcfg1;
11670
11671 nvcfg1 = tr32(NVRAM_CFG1);
11672 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
11673 tg3_flag_set(tp, FLASH);
11674 } else {
11675 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11676 tw32(NVRAM_CFG1, nvcfg1);
11677 }
11678
11679 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
11680 tg3_flag(tp, 5780_CLASS)) {
11681 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
11682 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
11683 tp->nvram_jedecnum = JEDEC_ATMEL;
11684 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
11685 tg3_flag_set(tp, NVRAM_BUFFERED);
11686 break;
11687 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
11688 tp->nvram_jedecnum = JEDEC_ATMEL;
11689 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
11690 break;
11691 case FLASH_VENDOR_ATMEL_EEPROM:
11692 tp->nvram_jedecnum = JEDEC_ATMEL;
11693 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11694 tg3_flag_set(tp, NVRAM_BUFFERED);
11695 break;
11696 case FLASH_VENDOR_ST:
11697 tp->nvram_jedecnum = JEDEC_ST;
11698 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
11699 tg3_flag_set(tp, NVRAM_BUFFERED);
11700 break;
11701 case FLASH_VENDOR_SAIFUN:
11702 tp->nvram_jedecnum = JEDEC_SAIFUN;
11703 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
11704 break;
11705 case FLASH_VENDOR_SST_SMALL:
11706 case FLASH_VENDOR_SST_LARGE:
11707 tp->nvram_jedecnum = JEDEC_SST;
11708 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
11709 break;
11710 }
11711 } else {
11712 tp->nvram_jedecnum = JEDEC_ATMEL;
11713 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
11714 tg3_flag_set(tp, NVRAM_BUFFERED);
11715 }
11716 }
11717
11718 static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
11719 {
11720 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
11721 case FLASH_5752PAGE_SIZE_256:
11722 tp->nvram_pagesize = 256;
11723 break;
11724 case FLASH_5752PAGE_SIZE_512:
11725 tp->nvram_pagesize = 512;
11726 break;
11727 case FLASH_5752PAGE_SIZE_1K:
11728 tp->nvram_pagesize = 1024;
11729 break;
11730 case FLASH_5752PAGE_SIZE_2K:
11731 tp->nvram_pagesize = 2048;
11732 break;
11733 case FLASH_5752PAGE_SIZE_4K:
11734 tp->nvram_pagesize = 4096;
11735 break;
11736 case FLASH_5752PAGE_SIZE_264:
11737 tp->nvram_pagesize = 264;
11738 break;
11739 case FLASH_5752PAGE_SIZE_528:
11740 tp->nvram_pagesize = 528;
11741 break;
11742 }
11743 }
11744
11745 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
11746 {
11747 u32 nvcfg1;
11748
11749 nvcfg1 = tr32(NVRAM_CFG1);
11750
11751 /* NVRAM protection for TPM */
11752 if (nvcfg1 & (1 << 27))
11753 tg3_flag_set(tp, PROTECTED_NVRAM);
11754
11755 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11756 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
11757 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
11758 tp->nvram_jedecnum = JEDEC_ATMEL;
11759 tg3_flag_set(tp, NVRAM_BUFFERED);
11760 break;
11761 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11762 tp->nvram_jedecnum = JEDEC_ATMEL;
11763 tg3_flag_set(tp, NVRAM_BUFFERED);
11764 tg3_flag_set(tp, FLASH);
11765 break;
11766 case FLASH_5752VENDOR_ST_M45PE10:
11767 case FLASH_5752VENDOR_ST_M45PE20:
11768 case FLASH_5752VENDOR_ST_M45PE40:
11769 tp->nvram_jedecnum = JEDEC_ST;
11770 tg3_flag_set(tp, NVRAM_BUFFERED);
11771 tg3_flag_set(tp, FLASH);
11772 break;
11773 }
11774
11775 if (tg3_flag(tp, FLASH)) {
11776 tg3_nvram_get_pagesize(tp, nvcfg1);
11777 } else {
11778 /* For eeprom, set pagesize to maximum eeprom size */
11779 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11780
11781 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11782 tw32(NVRAM_CFG1, nvcfg1);
11783 }
11784 }
11785
11786 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
11787 {
11788 u32 nvcfg1, protect = 0;
11789
11790 nvcfg1 = tr32(NVRAM_CFG1);
11791
11792 /* NVRAM protection for TPM */
11793 if (nvcfg1 & (1 << 27)) {
11794 tg3_flag_set(tp, PROTECTED_NVRAM);
11795 protect = 1;
11796 }
11797
11798 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
11799 switch (nvcfg1) {
11800 case FLASH_5755VENDOR_ATMEL_FLASH_1:
11801 case FLASH_5755VENDOR_ATMEL_FLASH_2:
11802 case FLASH_5755VENDOR_ATMEL_FLASH_3:
11803 case FLASH_5755VENDOR_ATMEL_FLASH_5:
11804 tp->nvram_jedecnum = JEDEC_ATMEL;
11805 tg3_flag_set(tp, NVRAM_BUFFERED);
11806 tg3_flag_set(tp, FLASH);
11807 tp->nvram_pagesize = 264;
11808 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
11809 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
11810 tp->nvram_size = (protect ? 0x3e200 :
11811 TG3_NVRAM_SIZE_512KB);
11812 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
11813 tp->nvram_size = (protect ? 0x1f200 :
11814 TG3_NVRAM_SIZE_256KB);
11815 else
11816 tp->nvram_size = (protect ? 0x1f200 :
11817 TG3_NVRAM_SIZE_128KB);
11818 break;
11819 case FLASH_5752VENDOR_ST_M45PE10:
11820 case FLASH_5752VENDOR_ST_M45PE20:
11821 case FLASH_5752VENDOR_ST_M45PE40:
11822 tp->nvram_jedecnum = JEDEC_ST;
11823 tg3_flag_set(tp, NVRAM_BUFFERED);
11824 tg3_flag_set(tp, FLASH);
11825 tp->nvram_pagesize = 256;
11826 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
11827 tp->nvram_size = (protect ?
11828 TG3_NVRAM_SIZE_64KB :
11829 TG3_NVRAM_SIZE_128KB);
11830 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
11831 tp->nvram_size = (protect ?
11832 TG3_NVRAM_SIZE_64KB :
11833 TG3_NVRAM_SIZE_256KB);
11834 else
11835 tp->nvram_size = (protect ?
11836 TG3_NVRAM_SIZE_128KB :
11837 TG3_NVRAM_SIZE_512KB);
11838 break;
11839 }
11840 }
11841
11842 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
11843 {
11844 u32 nvcfg1;
11845
11846 nvcfg1 = tr32(NVRAM_CFG1);
11847
11848 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11849 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
11850 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
11851 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
11852 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
11853 tp->nvram_jedecnum = JEDEC_ATMEL;
11854 tg3_flag_set(tp, NVRAM_BUFFERED);
11855 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11856
11857 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11858 tw32(NVRAM_CFG1, nvcfg1);
11859 break;
11860 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11861 case FLASH_5755VENDOR_ATMEL_FLASH_1:
11862 case FLASH_5755VENDOR_ATMEL_FLASH_2:
11863 case FLASH_5755VENDOR_ATMEL_FLASH_3:
11864 tp->nvram_jedecnum = JEDEC_ATMEL;
11865 tg3_flag_set(tp, NVRAM_BUFFERED);
11866 tg3_flag_set(tp, FLASH);
11867 tp->nvram_pagesize = 264;
11868 break;
11869 case FLASH_5752VENDOR_ST_M45PE10:
11870 case FLASH_5752VENDOR_ST_M45PE20:
11871 case FLASH_5752VENDOR_ST_M45PE40:
11872 tp->nvram_jedecnum = JEDEC_ST;
11873 tg3_flag_set(tp, NVRAM_BUFFERED);
11874 tg3_flag_set(tp, FLASH);
11875 tp->nvram_pagesize = 256;
11876 break;
11877 }
11878 }
11879
11880 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
11881 {
11882 u32 nvcfg1, protect = 0;
11883
11884 nvcfg1 = tr32(NVRAM_CFG1);
11885
11886 /* NVRAM protection for TPM */
11887 if (nvcfg1 & (1 << 27)) {
11888 tg3_flag_set(tp, PROTECTED_NVRAM);
11889 protect = 1;
11890 }
11891
11892 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
11893 switch (nvcfg1) {
11894 case FLASH_5761VENDOR_ATMEL_ADB021D:
11895 case FLASH_5761VENDOR_ATMEL_ADB041D:
11896 case FLASH_5761VENDOR_ATMEL_ADB081D:
11897 case FLASH_5761VENDOR_ATMEL_ADB161D:
11898 case FLASH_5761VENDOR_ATMEL_MDB021D:
11899 case FLASH_5761VENDOR_ATMEL_MDB041D:
11900 case FLASH_5761VENDOR_ATMEL_MDB081D:
11901 case FLASH_5761VENDOR_ATMEL_MDB161D:
11902 tp->nvram_jedecnum = JEDEC_ATMEL;
11903 tg3_flag_set(tp, NVRAM_BUFFERED);
11904 tg3_flag_set(tp, FLASH);
11905 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
11906 tp->nvram_pagesize = 256;
11907 break;
11908 case FLASH_5761VENDOR_ST_A_M45PE20:
11909 case FLASH_5761VENDOR_ST_A_M45PE40:
11910 case FLASH_5761VENDOR_ST_A_M45PE80:
11911 case FLASH_5761VENDOR_ST_A_M45PE16:
11912 case FLASH_5761VENDOR_ST_M_M45PE20:
11913 case FLASH_5761VENDOR_ST_M_M45PE40:
11914 case FLASH_5761VENDOR_ST_M_M45PE80:
11915 case FLASH_5761VENDOR_ST_M_M45PE16:
11916 tp->nvram_jedecnum = JEDEC_ST;
11917 tg3_flag_set(tp, NVRAM_BUFFERED);
11918 tg3_flag_set(tp, FLASH);
11919 tp->nvram_pagesize = 256;
11920 break;
11921 }
11922
11923 if (protect) {
11924 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
11925 } else {
11926 switch (nvcfg1) {
11927 case FLASH_5761VENDOR_ATMEL_ADB161D:
11928 case FLASH_5761VENDOR_ATMEL_MDB161D:
11929 case FLASH_5761VENDOR_ST_A_M45PE16:
11930 case FLASH_5761VENDOR_ST_M_M45PE16:
11931 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
11932 break;
11933 case FLASH_5761VENDOR_ATMEL_ADB081D:
11934 case FLASH_5761VENDOR_ATMEL_MDB081D:
11935 case FLASH_5761VENDOR_ST_A_M45PE80:
11936 case FLASH_5761VENDOR_ST_M_M45PE80:
11937 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
11938 break;
11939 case FLASH_5761VENDOR_ATMEL_ADB041D:
11940 case FLASH_5761VENDOR_ATMEL_MDB041D:
11941 case FLASH_5761VENDOR_ST_A_M45PE40:
11942 case FLASH_5761VENDOR_ST_M_M45PE40:
11943 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11944 break;
11945 case FLASH_5761VENDOR_ATMEL_ADB021D:
11946 case FLASH_5761VENDOR_ATMEL_MDB021D:
11947 case FLASH_5761VENDOR_ST_A_M45PE20:
11948 case FLASH_5761VENDOR_ST_M_M45PE20:
11949 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11950 break;
11951 }
11952 }
11953 }
11954
11955 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
11956 {
11957 tp->nvram_jedecnum = JEDEC_ATMEL;
11958 tg3_flag_set(tp, NVRAM_BUFFERED);
11959 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11960 }
11961
11962 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
11963 {
11964 u32 nvcfg1;
11965
11966 nvcfg1 = tr32(NVRAM_CFG1);
11967
11968 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11969 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
11970 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
11971 tp->nvram_jedecnum = JEDEC_ATMEL;
11972 tg3_flag_set(tp, NVRAM_BUFFERED);
11973 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11974
11975 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11976 tw32(NVRAM_CFG1, nvcfg1);
11977 return;
11978 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11979 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
11980 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
11981 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
11982 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
11983 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
11984 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
11985 tp->nvram_jedecnum = JEDEC_ATMEL;
11986 tg3_flag_set(tp, NVRAM_BUFFERED);
11987 tg3_flag_set(tp, FLASH);
11988
11989 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11990 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11991 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
11992 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
11993 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
11994 break;
11995 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
11996 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
11997 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11998 break;
11999 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12000 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12001 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12002 break;
12003 }
12004 break;
12005 case FLASH_5752VENDOR_ST_M45PE10:
12006 case FLASH_5752VENDOR_ST_M45PE20:
12007 case FLASH_5752VENDOR_ST_M45PE40:
12008 tp->nvram_jedecnum = JEDEC_ST;
12009 tg3_flag_set(tp, NVRAM_BUFFERED);
12010 tg3_flag_set(tp, FLASH);
12011
12012 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12013 case FLASH_5752VENDOR_ST_M45PE10:
12014 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12015 break;
12016 case FLASH_5752VENDOR_ST_M45PE20:
12017 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12018 break;
12019 case FLASH_5752VENDOR_ST_M45PE40:
12020 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12021 break;
12022 }
12023 break;
12024 default:
12025 tg3_flag_set(tp, NO_NVRAM);
12026 return;
12027 }
12028
12029 tg3_nvram_get_pagesize(tp, nvcfg1);
12030 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12031 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12032 }
12033
12034
12035 static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
12036 {
12037 u32 nvcfg1;
12038
12039 nvcfg1 = tr32(NVRAM_CFG1);
12040
12041 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12042 case FLASH_5717VENDOR_ATMEL_EEPROM:
12043 case FLASH_5717VENDOR_MICRO_EEPROM:
12044 tp->nvram_jedecnum = JEDEC_ATMEL;
12045 tg3_flag_set(tp, NVRAM_BUFFERED);
12046 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12047
12048 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12049 tw32(NVRAM_CFG1, nvcfg1);
12050 return;
12051 case FLASH_5717VENDOR_ATMEL_MDB011D:
12052 case FLASH_5717VENDOR_ATMEL_ADB011B:
12053 case FLASH_5717VENDOR_ATMEL_ADB011D:
12054 case FLASH_5717VENDOR_ATMEL_MDB021D:
12055 case FLASH_5717VENDOR_ATMEL_ADB021B:
12056 case FLASH_5717VENDOR_ATMEL_ADB021D:
12057 case FLASH_5717VENDOR_ATMEL_45USPT:
12058 tp->nvram_jedecnum = JEDEC_ATMEL;
12059 tg3_flag_set(tp, NVRAM_BUFFERED);
12060 tg3_flag_set(tp, FLASH);
12061
12062 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12063 case FLASH_5717VENDOR_ATMEL_MDB021D:
12064 /* Detect size with tg3_nvram_get_size() */
12065 break;
12066 case FLASH_5717VENDOR_ATMEL_ADB021B:
12067 case FLASH_5717VENDOR_ATMEL_ADB021D:
12068 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12069 break;
12070 default:
12071 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12072 break;
12073 }
12074 break;
12075 case FLASH_5717VENDOR_ST_M_M25PE10:
12076 case FLASH_5717VENDOR_ST_A_M25PE10:
12077 case FLASH_5717VENDOR_ST_M_M45PE10:
12078 case FLASH_5717VENDOR_ST_A_M45PE10:
12079 case FLASH_5717VENDOR_ST_M_M25PE20:
12080 case FLASH_5717VENDOR_ST_A_M25PE20:
12081 case FLASH_5717VENDOR_ST_M_M45PE20:
12082 case FLASH_5717VENDOR_ST_A_M45PE20:
12083 case FLASH_5717VENDOR_ST_25USPT:
12084 case FLASH_5717VENDOR_ST_45USPT:
12085 tp->nvram_jedecnum = JEDEC_ST;
12086 tg3_flag_set(tp, NVRAM_BUFFERED);
12087 tg3_flag_set(tp, FLASH);
12088
12089 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12090 case FLASH_5717VENDOR_ST_M_M25PE20:
12091 case FLASH_5717VENDOR_ST_M_M45PE20:
12092 /* Detect size with tg3_nvram_get_size() */
12093 break;
12094 case FLASH_5717VENDOR_ST_A_M25PE20:
12095 case FLASH_5717VENDOR_ST_A_M45PE20:
12096 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12097 break;
12098 default:
12099 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12100 break;
12101 }
12102 break;
12103 default:
12104 tg3_flag_set(tp, NO_NVRAM);
12105 return;
12106 }
12107
12108 tg3_nvram_get_pagesize(tp, nvcfg1);
12109 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12110 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12111 }
12112
12113 static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp)
12114 {
12115 u32 nvcfg1, nvmpinstrp;
12116
12117 nvcfg1 = tr32(NVRAM_CFG1);
12118 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
12119
12120 switch (nvmpinstrp) {
12121 case FLASH_5720_EEPROM_HD:
12122 case FLASH_5720_EEPROM_LD:
12123 tp->nvram_jedecnum = JEDEC_ATMEL;
12124 tg3_flag_set(tp, NVRAM_BUFFERED);
12125
12126 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12127 tw32(NVRAM_CFG1, nvcfg1);
12128 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
12129 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12130 else
12131 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
12132 return;
12133 case FLASH_5720VENDOR_M_ATMEL_DB011D:
12134 case FLASH_5720VENDOR_A_ATMEL_DB011B:
12135 case FLASH_5720VENDOR_A_ATMEL_DB011D:
12136 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12137 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12138 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12139 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12140 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12141 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12142 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12143 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12144 case FLASH_5720VENDOR_ATMEL_45USPT:
12145 tp->nvram_jedecnum = JEDEC_ATMEL;
12146 tg3_flag_set(tp, NVRAM_BUFFERED);
12147 tg3_flag_set(tp, FLASH);
12148
12149 switch (nvmpinstrp) {
12150 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12151 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12152 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12153 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12154 break;
12155 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12156 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12157 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12158 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12159 break;
12160 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12161 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12162 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12163 break;
12164 default:
12165 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12166 break;
12167 }
12168 break;
12169 case FLASH_5720VENDOR_M_ST_M25PE10:
12170 case FLASH_5720VENDOR_M_ST_M45PE10:
12171 case FLASH_5720VENDOR_A_ST_M25PE10:
12172 case FLASH_5720VENDOR_A_ST_M45PE10:
12173 case FLASH_5720VENDOR_M_ST_M25PE20:
12174 case FLASH_5720VENDOR_M_ST_M45PE20:
12175 case FLASH_5720VENDOR_A_ST_M25PE20:
12176 case FLASH_5720VENDOR_A_ST_M45PE20:
12177 case FLASH_5720VENDOR_M_ST_M25PE40:
12178 case FLASH_5720VENDOR_M_ST_M45PE40:
12179 case FLASH_5720VENDOR_A_ST_M25PE40:
12180 case FLASH_5720VENDOR_A_ST_M45PE40:
12181 case FLASH_5720VENDOR_M_ST_M25PE80:
12182 case FLASH_5720VENDOR_M_ST_M45PE80:
12183 case FLASH_5720VENDOR_A_ST_M25PE80:
12184 case FLASH_5720VENDOR_A_ST_M45PE80:
12185 case FLASH_5720VENDOR_ST_25USPT:
12186 case FLASH_5720VENDOR_ST_45USPT:
12187 tp->nvram_jedecnum = JEDEC_ST;
12188 tg3_flag_set(tp, NVRAM_BUFFERED);
12189 tg3_flag_set(tp, FLASH);
12190
12191 switch (nvmpinstrp) {
12192 case FLASH_5720VENDOR_M_ST_M25PE20:
12193 case FLASH_5720VENDOR_M_ST_M45PE20:
12194 case FLASH_5720VENDOR_A_ST_M25PE20:
12195 case FLASH_5720VENDOR_A_ST_M45PE20:
12196 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12197 break;
12198 case FLASH_5720VENDOR_M_ST_M25PE40:
12199 case FLASH_5720VENDOR_M_ST_M45PE40:
12200 case FLASH_5720VENDOR_A_ST_M25PE40:
12201 case FLASH_5720VENDOR_A_ST_M45PE40:
12202 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12203 break;
12204 case FLASH_5720VENDOR_M_ST_M25PE80:
12205 case FLASH_5720VENDOR_M_ST_M45PE80:
12206 case FLASH_5720VENDOR_A_ST_M25PE80:
12207 case FLASH_5720VENDOR_A_ST_M45PE80:
12208 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12209 break;
12210 default:
12211 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12212 break;
12213 }
12214 break;
12215 default:
12216 tg3_flag_set(tp, NO_NVRAM);
12217 return;
12218 }
12219
12220 tg3_nvram_get_pagesize(tp, nvcfg1);
12221 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12222 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12223 }
12224
12225 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
12226 static void __devinit tg3_nvram_init(struct tg3 *tp)
12227 {
12228 tw32_f(GRC_EEPROM_ADDR,
12229 (EEPROM_ADDR_FSM_RESET |
12230 (EEPROM_DEFAULT_CLOCK_PERIOD <<
12231 EEPROM_ADDR_CLKPERD_SHIFT)));
12232
12233 msleep(1);
12234
12235 /* Enable seeprom accesses. */
12236 tw32_f(GRC_LOCAL_CTRL,
12237 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
12238 udelay(100);
12239
12240 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12241 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
12242 tg3_flag_set(tp, NVRAM);
12243
12244 if (tg3_nvram_lock(tp)) {
12245 netdev_warn(tp->dev,
12246 "Cannot get nvram lock, %s failed\n",
12247 __func__);
12248 return;
12249 }
12250 tg3_enable_nvram_access(tp);
12251
12252 tp->nvram_size = 0;
12253
12254 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
12255 tg3_get_5752_nvram_info(tp);
12256 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12257 tg3_get_5755_nvram_info(tp);
12258 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12259 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12260 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12261 tg3_get_5787_nvram_info(tp);
12262 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12263 tg3_get_5761_nvram_info(tp);
12264 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12265 tg3_get_5906_nvram_info(tp);
12266 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
12267 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
12268 tg3_get_57780_nvram_info(tp);
12269 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
12270 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
12271 tg3_get_5717_nvram_info(tp);
12272 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
12273 tg3_get_5720_nvram_info(tp);
12274 else
12275 tg3_get_nvram_info(tp);
12276
12277 if (tp->nvram_size == 0)
12278 tg3_get_nvram_size(tp);
12279
12280 tg3_disable_nvram_access(tp);
12281 tg3_nvram_unlock(tp);
12282
12283 } else {
12284 tg3_flag_clear(tp, NVRAM);
12285 tg3_flag_clear(tp, NVRAM_BUFFERED);
12286
12287 tg3_get_eeprom_size(tp);
12288 }
12289 }
12290
12291 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
12292 u32 offset, u32 len, u8 *buf)
12293 {
12294 int i, j, rc = 0;
12295 u32 val;
12296
12297 for (i = 0; i < len; i += 4) {
12298 u32 addr;
12299 __be32 data;
12300
12301 addr = offset + i;
12302
12303 memcpy(&data, buf + i, 4);
12304
12305 /*
12306 * The SEEPROM interface expects the data to always be opposite
12307 * the native endian format. We accomplish this by reversing
12308 * all the operations that would have been performed on the
12309 * data from a call to tg3_nvram_read_be32().
12310 */
12311 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
12312
12313 val = tr32(GRC_EEPROM_ADDR);
12314 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
12315
12316 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
12317 EEPROM_ADDR_READ);
12318 tw32(GRC_EEPROM_ADDR, val |
12319 (0 << EEPROM_ADDR_DEVID_SHIFT) |
12320 (addr & EEPROM_ADDR_ADDR_MASK) |
12321 EEPROM_ADDR_START |
12322 EEPROM_ADDR_WRITE);
12323
12324 for (j = 0; j < 1000; j++) {
12325 val = tr32(GRC_EEPROM_ADDR);
12326
12327 if (val & EEPROM_ADDR_COMPLETE)
12328 break;
12329 msleep(1);
12330 }
12331 if (!(val & EEPROM_ADDR_COMPLETE)) {
12332 rc = -EBUSY;
12333 break;
12334 }
12335 }
12336
12337 return rc;
12338 }
12339
12340 /* offset and length are dword aligned */
12341 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
12342 u8 *buf)
12343 {
12344 int ret = 0;
12345 u32 pagesize = tp->nvram_pagesize;
12346 u32 pagemask = pagesize - 1;
12347 u32 nvram_cmd;
12348 u8 *tmp;
12349
12350 tmp = kmalloc(pagesize, GFP_KERNEL);
12351 if (tmp == NULL)
12352 return -ENOMEM;
12353
12354 while (len) {
12355 int j;
12356 u32 phy_addr, page_off, size;
12357
12358 phy_addr = offset & ~pagemask;
12359
12360 for (j = 0; j < pagesize; j += 4) {
12361 ret = tg3_nvram_read_be32(tp, phy_addr + j,
12362 (__be32 *) (tmp + j));
12363 if (ret)
12364 break;
12365 }
12366 if (ret)
12367 break;
12368
12369 page_off = offset & pagemask;
12370 size = pagesize;
12371 if (len < size)
12372 size = len;
12373
12374 len -= size;
12375
12376 memcpy(tmp + page_off, buf, size);
12377
12378 offset = offset + (pagesize - page_off);
12379
12380 tg3_enable_nvram_access(tp);
12381
12382 /*
12383 * Before we can erase the flash page, we need
12384 * to issue a special "write enable" command.
12385 */
12386 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12387
12388 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12389 break;
12390
12391 /* Erase the target page */
12392 tw32(NVRAM_ADDR, phy_addr);
12393
12394 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
12395 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
12396
12397 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12398 break;
12399
12400 /* Issue another write enable to start the write. */
12401 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12402
12403 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12404 break;
12405
12406 for (j = 0; j < pagesize; j += 4) {
12407 __be32 data;
12408
12409 data = *((__be32 *) (tmp + j));
12410
12411 tw32(NVRAM_WRDATA, be32_to_cpu(data));
12412
12413 tw32(NVRAM_ADDR, phy_addr + j);
12414
12415 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
12416 NVRAM_CMD_WR;
12417
12418 if (j == 0)
12419 nvram_cmd |= NVRAM_CMD_FIRST;
12420 else if (j == (pagesize - 4))
12421 nvram_cmd |= NVRAM_CMD_LAST;
12422
12423 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12424 break;
12425 }
12426 if (ret)
12427 break;
12428 }
12429
12430 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12431 tg3_nvram_exec_cmd(tp, nvram_cmd);
12432
12433 kfree(tmp);
12434
12435 return ret;
12436 }
12437
12438 /* offset and length are dword aligned */
12439 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
12440 u8 *buf)
12441 {
12442 int i, ret = 0;
12443
12444 for (i = 0; i < len; i += 4, offset += 4) {
12445 u32 page_off, phy_addr, nvram_cmd;
12446 __be32 data;
12447
12448 memcpy(&data, buf + i, 4);
12449 tw32(NVRAM_WRDATA, be32_to_cpu(data));
12450
12451 page_off = offset % tp->nvram_pagesize;
12452
12453 phy_addr = tg3_nvram_phys_addr(tp, offset);
12454
12455 tw32(NVRAM_ADDR, phy_addr);
12456
12457 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
12458
12459 if (page_off == 0 || i == 0)
12460 nvram_cmd |= NVRAM_CMD_FIRST;
12461 if (page_off == (tp->nvram_pagesize - 4))
12462 nvram_cmd |= NVRAM_CMD_LAST;
12463
12464 if (i == (len - 4))
12465 nvram_cmd |= NVRAM_CMD_LAST;
12466
12467 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
12468 !tg3_flag(tp, 5755_PLUS) &&
12469 (tp->nvram_jedecnum == JEDEC_ST) &&
12470 (nvram_cmd & NVRAM_CMD_FIRST)) {
12471
12472 if ((ret = tg3_nvram_exec_cmd(tp,
12473 NVRAM_CMD_WREN | NVRAM_CMD_GO |
12474 NVRAM_CMD_DONE)))
12475
12476 break;
12477 }
12478 if (!tg3_flag(tp, FLASH)) {
12479 /* We always do complete word writes to eeprom. */
12480 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
12481 }
12482
12483 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12484 break;
12485 }
12486 return ret;
12487 }
12488
12489 /* offset and length are dword aligned */
12490 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
12491 {
12492 int ret;
12493
12494 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
12495 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
12496 ~GRC_LCLCTRL_GPIO_OUTPUT1);
12497 udelay(40);
12498 }
12499
12500 if (!tg3_flag(tp, NVRAM)) {
12501 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
12502 } else {
12503 u32 grc_mode;
12504
12505 ret = tg3_nvram_lock(tp);
12506 if (ret)
12507 return ret;
12508
12509 tg3_enable_nvram_access(tp);
12510 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
12511 tw32(NVRAM_WRITE1, 0x406);
12512
12513 grc_mode = tr32(GRC_MODE);
12514 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
12515
12516 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
12517 ret = tg3_nvram_write_block_buffered(tp, offset, len,
12518 buf);
12519 } else {
12520 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
12521 buf);
12522 }
12523
12524 grc_mode = tr32(GRC_MODE);
12525 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
12526
12527 tg3_disable_nvram_access(tp);
12528 tg3_nvram_unlock(tp);
12529 }
12530
12531 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
12532 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
12533 udelay(40);
12534 }
12535
12536 return ret;
12537 }
12538
12539 struct subsys_tbl_ent {
12540 u16 subsys_vendor, subsys_devid;
12541 u32 phy_id;
12542 };
12543
12544 static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
12545 /* Broadcom boards. */
12546 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12547 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
12548 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12549 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
12550 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12551 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
12552 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12553 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
12554 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12555 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
12556 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12557 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
12558 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12559 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
12560 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12561 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
12562 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12563 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
12564 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12565 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
12566 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12567 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
12568
12569 /* 3com boards. */
12570 { TG3PCI_SUBVENDOR_ID_3COM,
12571 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
12572 { TG3PCI_SUBVENDOR_ID_3COM,
12573 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
12574 { TG3PCI_SUBVENDOR_ID_3COM,
12575 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
12576 { TG3PCI_SUBVENDOR_ID_3COM,
12577 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
12578 { TG3PCI_SUBVENDOR_ID_3COM,
12579 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
12580
12581 /* DELL boards. */
12582 { TG3PCI_SUBVENDOR_ID_DELL,
12583 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
12584 { TG3PCI_SUBVENDOR_ID_DELL,
12585 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
12586 { TG3PCI_SUBVENDOR_ID_DELL,
12587 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
12588 { TG3PCI_SUBVENDOR_ID_DELL,
12589 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
12590
12591 /* Compaq boards. */
12592 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12593 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
12594 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12595 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
12596 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12597 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
12598 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12599 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
12600 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12601 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
12602
12603 /* IBM boards. */
12604 { TG3PCI_SUBVENDOR_ID_IBM,
12605 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
12606 };
12607
12608 static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
12609 {
12610 int i;
12611
12612 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
12613 if ((subsys_id_to_phy_id[i].subsys_vendor ==
12614 tp->pdev->subsystem_vendor) &&
12615 (subsys_id_to_phy_id[i].subsys_devid ==
12616 tp->pdev->subsystem_device))
12617 return &subsys_id_to_phy_id[i];
12618 }
12619 return NULL;
12620 }
12621
12622 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
12623 {
12624 u32 val;
12625 u16 pmcsr;
12626
12627 /* On some early chips the SRAM cannot be accessed in D3hot state,
12628 * so need make sure we're in D0.
12629 */
12630 pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
12631 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
12632 pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
12633 msleep(1);
12634
12635 /* Make sure register accesses (indirect or otherwise)
12636 * will function correctly.
12637 */
12638 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12639 tp->misc_host_ctrl);
12640
12641 /* The memory arbiter has to be enabled in order for SRAM accesses
12642 * to succeed. Normally on powerup the tg3 chip firmware will make
12643 * sure it is enabled, but other entities such as system netboot
12644 * code might disable it.
12645 */
12646 val = tr32(MEMARB_MODE);
12647 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
12648
12649 tp->phy_id = TG3_PHY_ID_INVALID;
12650 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12651
12652 /* Assume an onboard device and WOL capable by default. */
12653 tg3_flag_set(tp, EEPROM_WRITE_PROT);
12654 tg3_flag_set(tp, WOL_CAP);
12655
12656 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12657 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
12658 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
12659 tg3_flag_set(tp, IS_NIC);
12660 }
12661 val = tr32(VCPU_CFGSHDW);
12662 if (val & VCPU_CFGSHDW_ASPM_DBNC)
12663 tg3_flag_set(tp, ASPM_WORKAROUND);
12664 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
12665 (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
12666 tg3_flag_set(tp, WOL_ENABLE);
12667 device_set_wakeup_enable(&tp->pdev->dev, true);
12668 }
12669 goto done;
12670 }
12671
12672 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
12673 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
12674 u32 nic_cfg, led_cfg;
12675 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
12676 int eeprom_phy_serdes = 0;
12677
12678 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
12679 tp->nic_sram_data_cfg = nic_cfg;
12680
12681 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
12682 ver >>= NIC_SRAM_DATA_VER_SHIFT;
12683 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12684 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
12685 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
12686 (ver > 0) && (ver < 0x100))
12687 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
12688
12689 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12690 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
12691
12692 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
12693 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
12694 eeprom_phy_serdes = 1;
12695
12696 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
12697 if (nic_phy_id != 0) {
12698 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
12699 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
12700
12701 eeprom_phy_id = (id1 >> 16) << 10;
12702 eeprom_phy_id |= (id2 & 0xfc00) << 16;
12703 eeprom_phy_id |= (id2 & 0x03ff) << 0;
12704 } else
12705 eeprom_phy_id = 0;
12706
12707 tp->phy_id = eeprom_phy_id;
12708 if (eeprom_phy_serdes) {
12709 if (!tg3_flag(tp, 5705_PLUS))
12710 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
12711 else
12712 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
12713 }
12714
12715 if (tg3_flag(tp, 5750_PLUS))
12716 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
12717 SHASTA_EXT_LED_MODE_MASK);
12718 else
12719 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
12720
12721 switch (led_cfg) {
12722 default:
12723 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
12724 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12725 break;
12726
12727 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
12728 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
12729 break;
12730
12731 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
12732 tp->led_ctrl = LED_CTRL_MODE_MAC;
12733
12734 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
12735 * read on some older 5700/5701 bootcode.
12736 */
12737 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
12738 ASIC_REV_5700 ||
12739 GET_ASIC_REV(tp->pci_chip_rev_id) ==
12740 ASIC_REV_5701)
12741 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12742
12743 break;
12744
12745 case SHASTA_EXT_LED_SHARED:
12746 tp->led_ctrl = LED_CTRL_MODE_SHARED;
12747 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
12748 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
12749 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
12750 LED_CTRL_MODE_PHY_2);
12751 break;
12752
12753 case SHASTA_EXT_LED_MAC:
12754 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
12755 break;
12756
12757 case SHASTA_EXT_LED_COMBO:
12758 tp->led_ctrl = LED_CTRL_MODE_COMBO;
12759 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
12760 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
12761 LED_CTRL_MODE_PHY_2);
12762 break;
12763
12764 }
12765
12766 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12767 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
12768 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
12769 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
12770
12771 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
12772 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12773
12774 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
12775 tg3_flag_set(tp, EEPROM_WRITE_PROT);
12776 if ((tp->pdev->subsystem_vendor ==
12777 PCI_VENDOR_ID_ARIMA) &&
12778 (tp->pdev->subsystem_device == 0x205a ||
12779 tp->pdev->subsystem_device == 0x2063))
12780 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
12781 } else {
12782 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
12783 tg3_flag_set(tp, IS_NIC);
12784 }
12785
12786 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
12787 tg3_flag_set(tp, ENABLE_ASF);
12788 if (tg3_flag(tp, 5750_PLUS))
12789 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
12790 }
12791
12792 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
12793 tg3_flag(tp, 5750_PLUS))
12794 tg3_flag_set(tp, ENABLE_APE);
12795
12796 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
12797 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
12798 tg3_flag_clear(tp, WOL_CAP);
12799
12800 if (tg3_flag(tp, WOL_CAP) &&
12801 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
12802 tg3_flag_set(tp, WOL_ENABLE);
12803 device_set_wakeup_enable(&tp->pdev->dev, true);
12804 }
12805
12806 if (cfg2 & (1 << 17))
12807 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
12808
12809 /* serdes signal pre-emphasis in register 0x590 set by */
12810 /* bootcode if bit 18 is set */
12811 if (cfg2 & (1 << 18))
12812 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
12813
12814 if ((tg3_flag(tp, 57765_PLUS) ||
12815 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
12816 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
12817 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
12818 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
12819
12820 if (tg3_flag(tp, PCI_EXPRESS) &&
12821 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
12822 !tg3_flag(tp, 57765_PLUS)) {
12823 u32 cfg3;
12824
12825 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
12826 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
12827 tg3_flag_set(tp, ASPM_WORKAROUND);
12828 }
12829
12830 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
12831 tg3_flag_set(tp, RGMII_INBAND_DISABLE);
12832 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
12833 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
12834 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
12835 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
12836 }
12837 done:
12838 if (tg3_flag(tp, WOL_CAP))
12839 device_set_wakeup_enable(&tp->pdev->dev,
12840 tg3_flag(tp, WOL_ENABLE));
12841 else
12842 device_set_wakeup_capable(&tp->pdev->dev, false);
12843 }
12844
12845 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
12846 {
12847 int i;
12848 u32 val;
12849
12850 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
12851 tw32(OTP_CTRL, cmd);
12852
12853 /* Wait for up to 1 ms for command to execute. */
12854 for (i = 0; i < 100; i++) {
12855 val = tr32(OTP_STATUS);
12856 if (val & OTP_STATUS_CMD_DONE)
12857 break;
12858 udelay(10);
12859 }
12860
12861 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
12862 }
12863
12864 /* Read the gphy configuration from the OTP region of the chip. The gphy
12865 * configuration is a 32-bit value that straddles the alignment boundary.
12866 * We do two 32-bit reads and then shift and merge the results.
12867 */
12868 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
12869 {
12870 u32 bhalf_otp, thalf_otp;
12871
12872 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
12873
12874 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
12875 return 0;
12876
12877 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
12878
12879 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
12880 return 0;
12881
12882 thalf_otp = tr32(OTP_READ_DATA);
12883
12884 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
12885
12886 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
12887 return 0;
12888
12889 bhalf_otp = tr32(OTP_READ_DATA);
12890
12891 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
12892 }
12893
12894 static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
12895 {
12896 u32 adv = ADVERTISED_Autoneg |
12897 ADVERTISED_Pause;
12898
12899 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12900 adv |= ADVERTISED_1000baseT_Half |
12901 ADVERTISED_1000baseT_Full;
12902
12903 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
12904 adv |= ADVERTISED_100baseT_Half |
12905 ADVERTISED_100baseT_Full |
12906 ADVERTISED_10baseT_Half |
12907 ADVERTISED_10baseT_Full |
12908 ADVERTISED_TP;
12909 else
12910 adv |= ADVERTISED_FIBRE;
12911
12912 tp->link_config.advertising = adv;
12913 tp->link_config.speed = SPEED_INVALID;
12914 tp->link_config.duplex = DUPLEX_INVALID;
12915 tp->link_config.autoneg = AUTONEG_ENABLE;
12916 tp->link_config.active_speed = SPEED_INVALID;
12917 tp->link_config.active_duplex = DUPLEX_INVALID;
12918 tp->link_config.orig_speed = SPEED_INVALID;
12919 tp->link_config.orig_duplex = DUPLEX_INVALID;
12920 tp->link_config.orig_autoneg = AUTONEG_INVALID;
12921 }
12922
12923 static int __devinit tg3_phy_probe(struct tg3 *tp)
12924 {
12925 u32 hw_phy_id_1, hw_phy_id_2;
12926 u32 hw_phy_id, hw_phy_id_masked;
12927 int err;
12928
12929 /* flow control autonegotiation is default behavior */
12930 tg3_flag_set(tp, PAUSE_AUTONEG);
12931 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
12932
12933 if (tg3_flag(tp, USE_PHYLIB))
12934 return tg3_phy_init(tp);
12935
12936 /* Reading the PHY ID register can conflict with ASF
12937 * firmware access to the PHY hardware.
12938 */
12939 err = 0;
12940 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
12941 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
12942 } else {
12943 /* Now read the physical PHY_ID from the chip and verify
12944 * that it is sane. If it doesn't look good, we fall back
12945 * to either the hard-coded table based PHY_ID and failing
12946 * that the value found in the eeprom area.
12947 */
12948 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
12949 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
12950
12951 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
12952 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
12953 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
12954
12955 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
12956 }
12957
12958 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
12959 tp->phy_id = hw_phy_id;
12960 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
12961 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
12962 else
12963 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
12964 } else {
12965 if (tp->phy_id != TG3_PHY_ID_INVALID) {
12966 /* Do nothing, phy ID already set up in
12967 * tg3_get_eeprom_hw_cfg().
12968 */
12969 } else {
12970 struct subsys_tbl_ent *p;
12971
12972 /* No eeprom signature? Try the hardcoded
12973 * subsys device table.
12974 */
12975 p = tg3_lookup_by_subsys(tp);
12976 if (!p)
12977 return -ENODEV;
12978
12979 tp->phy_id = p->phy_id;
12980 if (!tp->phy_id ||
12981 tp->phy_id == TG3_PHY_ID_BCM8002)
12982 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
12983 }
12984 }
12985
12986 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
12987 ((tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
12988 tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
12989 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
12990 tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
12991 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
12992
12993 tg3_phy_init_link_config(tp);
12994
12995 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
12996 !tg3_flag(tp, ENABLE_APE) &&
12997 !tg3_flag(tp, ENABLE_ASF)) {
12998 u32 bmsr, mask;
12999
13000 tg3_readphy(tp, MII_BMSR, &bmsr);
13001 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
13002 (bmsr & BMSR_LSTATUS))
13003 goto skip_phy_reset;
13004
13005 err = tg3_phy_reset(tp);
13006 if (err)
13007 return err;
13008
13009 tg3_phy_set_wirespeed(tp);
13010
13011 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
13012 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
13013 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
13014 if (!tg3_copper_is_advertising_all(tp, mask)) {
13015 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
13016 tp->link_config.flowctrl);
13017
13018 tg3_writephy(tp, MII_BMCR,
13019 BMCR_ANENABLE | BMCR_ANRESTART);
13020 }
13021 }
13022
13023 skip_phy_reset:
13024 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
13025 err = tg3_init_5401phy_dsp(tp);
13026 if (err)
13027 return err;
13028
13029 err = tg3_init_5401phy_dsp(tp);
13030 }
13031
13032 return err;
13033 }
13034
13035 static void __devinit tg3_read_vpd(struct tg3 *tp)
13036 {
13037 u8 *vpd_data;
13038 unsigned int block_end, rosize, len;
13039 int j, i = 0;
13040
13041 vpd_data = (u8 *)tg3_vpd_readblock(tp);
13042 if (!vpd_data)
13043 goto out_no_vpd;
13044
13045 i = pci_vpd_find_tag(vpd_data, 0, TG3_NVM_VPD_LEN,
13046 PCI_VPD_LRDT_RO_DATA);
13047 if (i < 0)
13048 goto out_not_found;
13049
13050 rosize = pci_vpd_lrdt_size(&vpd_data[i]);
13051 block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
13052 i += PCI_VPD_LRDT_TAG_SIZE;
13053
13054 if (block_end > TG3_NVM_VPD_LEN)
13055 goto out_not_found;
13056
13057 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13058 PCI_VPD_RO_KEYWORD_MFR_ID);
13059 if (j > 0) {
13060 len = pci_vpd_info_field_size(&vpd_data[j]);
13061
13062 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13063 if (j + len > block_end || len != 4 ||
13064 memcmp(&vpd_data[j], "1028", 4))
13065 goto partno;
13066
13067 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13068 PCI_VPD_RO_KEYWORD_VENDOR0);
13069 if (j < 0)
13070 goto partno;
13071
13072 len = pci_vpd_info_field_size(&vpd_data[j]);
13073
13074 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13075 if (j + len > block_end)
13076 goto partno;
13077
13078 memcpy(tp->fw_ver, &vpd_data[j], len);
13079 strncat(tp->fw_ver, " bc ", TG3_NVM_VPD_LEN - len - 1);
13080 }
13081
13082 partno:
13083 i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13084 PCI_VPD_RO_KEYWORD_PARTNO);
13085 if (i < 0)
13086 goto out_not_found;
13087
13088 len = pci_vpd_info_field_size(&vpd_data[i]);
13089
13090 i += PCI_VPD_INFO_FLD_HDR_SIZE;
13091 if (len > TG3_BPN_SIZE ||
13092 (len + i) > TG3_NVM_VPD_LEN)
13093 goto out_not_found;
13094
13095 memcpy(tp->board_part_number, &vpd_data[i], len);
13096
13097 out_not_found:
13098 kfree(vpd_data);
13099 if (tp->board_part_number[0])
13100 return;
13101
13102 out_no_vpd:
13103 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
13104 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717)
13105 strcpy(tp->board_part_number, "BCM5717");
13106 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
13107 strcpy(tp->board_part_number, "BCM5718");
13108 else
13109 goto nomatch;
13110 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
13111 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
13112 strcpy(tp->board_part_number, "BCM57780");
13113 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
13114 strcpy(tp->board_part_number, "BCM57760");
13115 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
13116 strcpy(tp->board_part_number, "BCM57790");
13117 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
13118 strcpy(tp->board_part_number, "BCM57788");
13119 else
13120 goto nomatch;
13121 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
13122 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
13123 strcpy(tp->board_part_number, "BCM57761");
13124 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
13125 strcpy(tp->board_part_number, "BCM57765");
13126 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
13127 strcpy(tp->board_part_number, "BCM57781");
13128 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
13129 strcpy(tp->board_part_number, "BCM57785");
13130 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
13131 strcpy(tp->board_part_number, "BCM57791");
13132 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13133 strcpy(tp->board_part_number, "BCM57795");
13134 else
13135 goto nomatch;
13136 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13137 strcpy(tp->board_part_number, "BCM95906");
13138 } else {
13139 nomatch:
13140 strcpy(tp->board_part_number, "none");
13141 }
13142 }
13143
13144 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
13145 {
13146 u32 val;
13147
13148 if (tg3_nvram_read(tp, offset, &val) ||
13149 (val & 0xfc000000) != 0x0c000000 ||
13150 tg3_nvram_read(tp, offset + 4, &val) ||
13151 val != 0)
13152 return 0;
13153
13154 return 1;
13155 }
13156
13157 static void __devinit tg3_read_bc_ver(struct tg3 *tp)
13158 {
13159 u32 val, offset, start, ver_offset;
13160 int i, dst_off;
13161 bool newver = false;
13162
13163 if (tg3_nvram_read(tp, 0xc, &offset) ||
13164 tg3_nvram_read(tp, 0x4, &start))
13165 return;
13166
13167 offset = tg3_nvram_logical_addr(tp, offset);
13168
13169 if (tg3_nvram_read(tp, offset, &val))
13170 return;
13171
13172 if ((val & 0xfc000000) == 0x0c000000) {
13173 if (tg3_nvram_read(tp, offset + 4, &val))
13174 return;
13175
13176 if (val == 0)
13177 newver = true;
13178 }
13179
13180 dst_off = strlen(tp->fw_ver);
13181
13182 if (newver) {
13183 if (TG3_VER_SIZE - dst_off < 16 ||
13184 tg3_nvram_read(tp, offset + 8, &ver_offset))
13185 return;
13186
13187 offset = offset + ver_offset - start;
13188 for (i = 0; i < 16; i += 4) {
13189 __be32 v;
13190 if (tg3_nvram_read_be32(tp, offset + i, &v))
13191 return;
13192
13193 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
13194 }
13195 } else {
13196 u32 major, minor;
13197
13198 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
13199 return;
13200
13201 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
13202 TG3_NVM_BCVER_MAJSFT;
13203 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
13204 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
13205 "v%d.%02d", major, minor);
13206 }
13207 }
13208
13209 static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
13210 {
13211 u32 val, major, minor;
13212
13213 /* Use native endian representation */
13214 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
13215 return;
13216
13217 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
13218 TG3_NVM_HWSB_CFG1_MAJSFT;
13219 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
13220 TG3_NVM_HWSB_CFG1_MINSFT;
13221
13222 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
13223 }
13224
13225 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
13226 {
13227 u32 offset, major, minor, build;
13228
13229 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
13230
13231 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
13232 return;
13233
13234 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
13235 case TG3_EEPROM_SB_REVISION_0:
13236 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
13237 break;
13238 case TG3_EEPROM_SB_REVISION_2:
13239 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
13240 break;
13241 case TG3_EEPROM_SB_REVISION_3:
13242 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
13243 break;
13244 case TG3_EEPROM_SB_REVISION_4:
13245 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
13246 break;
13247 case TG3_EEPROM_SB_REVISION_5:
13248 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
13249 break;
13250 case TG3_EEPROM_SB_REVISION_6:
13251 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
13252 break;
13253 default:
13254 return;
13255 }
13256
13257 if (tg3_nvram_read(tp, offset, &val))
13258 return;
13259
13260 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
13261 TG3_EEPROM_SB_EDH_BLD_SHFT;
13262 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
13263 TG3_EEPROM_SB_EDH_MAJ_SHFT;
13264 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
13265
13266 if (minor > 99 || build > 26)
13267 return;
13268
13269 offset = strlen(tp->fw_ver);
13270 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
13271 " v%d.%02d", major, minor);
13272
13273 if (build > 0) {
13274 offset = strlen(tp->fw_ver);
13275 if (offset < TG3_VER_SIZE - 1)
13276 tp->fw_ver[offset] = 'a' + build - 1;
13277 }
13278 }
13279
13280 static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
13281 {
13282 u32 val, offset, start;
13283 int i, vlen;
13284
13285 for (offset = TG3_NVM_DIR_START;
13286 offset < TG3_NVM_DIR_END;
13287 offset += TG3_NVM_DIRENT_SIZE) {
13288 if (tg3_nvram_read(tp, offset, &val))
13289 return;
13290
13291 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
13292 break;
13293 }
13294
13295 if (offset == TG3_NVM_DIR_END)
13296 return;
13297
13298 if (!tg3_flag(tp, 5705_PLUS))
13299 start = 0x08000000;
13300 else if (tg3_nvram_read(tp, offset - 4, &start))
13301 return;
13302
13303 if (tg3_nvram_read(tp, offset + 4, &offset) ||
13304 !tg3_fw_img_is_valid(tp, offset) ||
13305 tg3_nvram_read(tp, offset + 8, &val))
13306 return;
13307
13308 offset += val - start;
13309
13310 vlen = strlen(tp->fw_ver);
13311
13312 tp->fw_ver[vlen++] = ',';
13313 tp->fw_ver[vlen++] = ' ';
13314
13315 for (i = 0; i < 4; i++) {
13316 __be32 v;
13317 if (tg3_nvram_read_be32(tp, offset, &v))
13318 return;
13319
13320 offset += sizeof(v);
13321
13322 if (vlen > TG3_VER_SIZE - sizeof(v)) {
13323 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
13324 break;
13325 }
13326
13327 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
13328 vlen += sizeof(v);
13329 }
13330 }
13331
13332 static void __devinit tg3_read_dash_ver(struct tg3 *tp)
13333 {
13334 int vlen;
13335 u32 apedata;
13336 char *fwtype;
13337
13338 if (!tg3_flag(tp, ENABLE_APE) || !tg3_flag(tp, ENABLE_ASF))
13339 return;
13340
13341 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
13342 if (apedata != APE_SEG_SIG_MAGIC)
13343 return;
13344
13345 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
13346 if (!(apedata & APE_FW_STATUS_READY))
13347 return;
13348
13349 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
13350
13351 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) {
13352 tg3_flag_set(tp, APE_HAS_NCSI);
13353 fwtype = "NCSI";
13354 } else {
13355 fwtype = "DASH";
13356 }
13357
13358 vlen = strlen(tp->fw_ver);
13359
13360 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
13361 fwtype,
13362 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
13363 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
13364 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
13365 (apedata & APE_FW_VERSION_BLDMSK));
13366 }
13367
13368 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
13369 {
13370 u32 val;
13371 bool vpd_vers = false;
13372
13373 if (tp->fw_ver[0] != 0)
13374 vpd_vers = true;
13375
13376 if (tg3_flag(tp, NO_NVRAM)) {
13377 strcat(tp->fw_ver, "sb");
13378 return;
13379 }
13380
13381 if (tg3_nvram_read(tp, 0, &val))
13382 return;
13383
13384 if (val == TG3_EEPROM_MAGIC)
13385 tg3_read_bc_ver(tp);
13386 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
13387 tg3_read_sb_ver(tp, val);
13388 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
13389 tg3_read_hwsb_ver(tp);
13390 else
13391 return;
13392
13393 if (!tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || vpd_vers)
13394 goto done;
13395
13396 tg3_read_mgmtfw_ver(tp);
13397
13398 done:
13399 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
13400 }
13401
13402 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
13403
13404 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
13405 {
13406 if (tg3_flag(tp, LRG_PROD_RING_CAP))
13407 return TG3_RX_RET_MAX_SIZE_5717;
13408 else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
13409 return TG3_RX_RET_MAX_SIZE_5700;
13410 else
13411 return TG3_RX_RET_MAX_SIZE_5705;
13412 }
13413
13414 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
13415 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
13416 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
13417 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
13418 { },
13419 };
13420
13421 static int __devinit tg3_get_invariants(struct tg3 *tp)
13422 {
13423 u32 misc_ctrl_reg;
13424 u32 pci_state_reg, grc_misc_cfg;
13425 u32 val;
13426 u16 pci_cmd;
13427 int err;
13428
13429 /* Force memory write invalidate off. If we leave it on,
13430 * then on 5700_BX chips we have to enable a workaround.
13431 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
13432 * to match the cacheline size. The Broadcom driver have this
13433 * workaround but turns MWI off all the times so never uses
13434 * it. This seems to suggest that the workaround is insufficient.
13435 */
13436 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13437 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
13438 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13439
13440 /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
13441 * has the register indirect write enable bit set before
13442 * we try to access any of the MMIO registers. It is also
13443 * critical that the PCI-X hw workaround situation is decided
13444 * before that as well.
13445 */
13446 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13447 &misc_ctrl_reg);
13448
13449 tp->pci_chip_rev_id = (misc_ctrl_reg >>
13450 MISC_HOST_CTRL_CHIPREV_SHIFT);
13451 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
13452 u32 prod_id_asic_rev;
13453
13454 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
13455 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
13456 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
13457 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
13458 pci_read_config_dword(tp->pdev,
13459 TG3PCI_GEN2_PRODID_ASICREV,
13460 &prod_id_asic_rev);
13461 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
13462 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
13463 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
13464 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
13465 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
13466 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13467 pci_read_config_dword(tp->pdev,
13468 TG3PCI_GEN15_PRODID_ASICREV,
13469 &prod_id_asic_rev);
13470 else
13471 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
13472 &prod_id_asic_rev);
13473
13474 tp->pci_chip_rev_id = prod_id_asic_rev;
13475 }
13476
13477 /* Wrong chip ID in 5752 A0. This code can be removed later
13478 * as A0 is not in production.
13479 */
13480 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
13481 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
13482
13483 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
13484 * we need to disable memory and use config. cycles
13485 * only to access all registers. The 5702/03 chips
13486 * can mistakenly decode the special cycles from the
13487 * ICH chipsets as memory write cycles, causing corruption
13488 * of register and memory space. Only certain ICH bridges
13489 * will drive special cycles with non-zero data during the
13490 * address phase which can fall within the 5703's address
13491 * range. This is not an ICH bug as the PCI spec allows
13492 * non-zero address during special cycles. However, only
13493 * these ICH bridges are known to drive non-zero addresses
13494 * during special cycles.
13495 *
13496 * Since special cycles do not cross PCI bridges, we only
13497 * enable this workaround if the 5703 is on the secondary
13498 * bus of these ICH bridges.
13499 */
13500 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
13501 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
13502 static struct tg3_dev_id {
13503 u32 vendor;
13504 u32 device;
13505 u32 rev;
13506 } ich_chipsets[] = {
13507 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
13508 PCI_ANY_ID },
13509 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
13510 PCI_ANY_ID },
13511 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
13512 0xa },
13513 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
13514 PCI_ANY_ID },
13515 { },
13516 };
13517 struct tg3_dev_id *pci_id = &ich_chipsets[0];
13518 struct pci_dev *bridge = NULL;
13519
13520 while (pci_id->vendor != 0) {
13521 bridge = pci_get_device(pci_id->vendor, pci_id->device,
13522 bridge);
13523 if (!bridge) {
13524 pci_id++;
13525 continue;
13526 }
13527 if (pci_id->rev != PCI_ANY_ID) {
13528 if (bridge->revision > pci_id->rev)
13529 continue;
13530 }
13531 if (bridge->subordinate &&
13532 (bridge->subordinate->number ==
13533 tp->pdev->bus->number)) {
13534 tg3_flag_set(tp, ICH_WORKAROUND);
13535 pci_dev_put(bridge);
13536 break;
13537 }
13538 }
13539 }
13540
13541 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
13542 static struct tg3_dev_id {
13543 u32 vendor;
13544 u32 device;
13545 } bridge_chipsets[] = {
13546 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
13547 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
13548 { },
13549 };
13550 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
13551 struct pci_dev *bridge = NULL;
13552
13553 while (pci_id->vendor != 0) {
13554 bridge = pci_get_device(pci_id->vendor,
13555 pci_id->device,
13556 bridge);
13557 if (!bridge) {
13558 pci_id++;
13559 continue;
13560 }
13561 if (bridge->subordinate &&
13562 (bridge->subordinate->number <=
13563 tp->pdev->bus->number) &&
13564 (bridge->subordinate->subordinate >=
13565 tp->pdev->bus->number)) {
13566 tg3_flag_set(tp, 5701_DMA_BUG);
13567 pci_dev_put(bridge);
13568 break;
13569 }
13570 }
13571 }
13572
13573 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
13574 * DMA addresses > 40-bit. This bridge may have other additional
13575 * 57xx devices behind it in some 4-port NIC designs for example.
13576 * Any tg3 device found behind the bridge will also need the 40-bit
13577 * DMA workaround.
13578 */
13579 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
13580 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
13581 tg3_flag_set(tp, 5780_CLASS);
13582 tg3_flag_set(tp, 40BIT_DMA_BUG);
13583 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
13584 } else {
13585 struct pci_dev *bridge = NULL;
13586
13587 do {
13588 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
13589 PCI_DEVICE_ID_SERVERWORKS_EPB,
13590 bridge);
13591 if (bridge && bridge->subordinate &&
13592 (bridge->subordinate->number <=
13593 tp->pdev->bus->number) &&
13594 (bridge->subordinate->subordinate >=
13595 tp->pdev->bus->number)) {
13596 tg3_flag_set(tp, 40BIT_DMA_BUG);
13597 pci_dev_put(bridge);
13598 break;
13599 }
13600 } while (bridge);
13601 }
13602
13603 /* Initialize misc host control in PCI block. */
13604 tp->misc_host_ctrl |= (misc_ctrl_reg &
13605 MISC_HOST_CTRL_CHIPREV);
13606 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13607 tp->misc_host_ctrl);
13608
13609 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
13610 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
13611 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13612 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13613 tp->pdev_peer = tg3_find_peer(tp);
13614
13615 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13616 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13617 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13618 tg3_flag_set(tp, 5717_PLUS);
13619
13620 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
13621 tg3_flag(tp, 5717_PLUS))
13622 tg3_flag_set(tp, 57765_PLUS);
13623
13624 /* Intentionally exclude ASIC_REV_5906 */
13625 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13626 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13627 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13628 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13629 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13630 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13631 tg3_flag(tp, 57765_PLUS))
13632 tg3_flag_set(tp, 5755_PLUS);
13633
13634 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
13635 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
13636 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
13637 tg3_flag(tp, 5755_PLUS) ||
13638 tg3_flag(tp, 5780_CLASS))
13639 tg3_flag_set(tp, 5750_PLUS);
13640
13641 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
13642 tg3_flag(tp, 5750_PLUS))
13643 tg3_flag_set(tp, 5705_PLUS);
13644
13645 /* Determine TSO capabilities */
13646 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
13647 ; /* Do nothing. HW bug. */
13648 else if (tg3_flag(tp, 57765_PLUS))
13649 tg3_flag_set(tp, HW_TSO_3);
13650 else if (tg3_flag(tp, 5755_PLUS) ||
13651 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13652 tg3_flag_set(tp, HW_TSO_2);
13653 else if (tg3_flag(tp, 5750_PLUS)) {
13654 tg3_flag_set(tp, HW_TSO_1);
13655 tg3_flag_set(tp, TSO_BUG);
13656 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
13657 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
13658 tg3_flag_clear(tp, TSO_BUG);
13659 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13660 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13661 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
13662 tg3_flag_set(tp, TSO_BUG);
13663 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
13664 tp->fw_needed = FIRMWARE_TG3TSO5;
13665 else
13666 tp->fw_needed = FIRMWARE_TG3TSO;
13667 }
13668
13669 /* Selectively allow TSO based on operating conditions */
13670 if (tg3_flag(tp, HW_TSO_1) ||
13671 tg3_flag(tp, HW_TSO_2) ||
13672 tg3_flag(tp, HW_TSO_3) ||
13673 (tp->fw_needed && !tg3_flag(tp, ENABLE_ASF)))
13674 tg3_flag_set(tp, TSO_CAPABLE);
13675 else {
13676 tg3_flag_clear(tp, TSO_CAPABLE);
13677 tg3_flag_clear(tp, TSO_BUG);
13678 tp->fw_needed = NULL;
13679 }
13680
13681 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
13682 tp->fw_needed = FIRMWARE_TG3;
13683
13684 tp->irq_max = 1;
13685
13686 if (tg3_flag(tp, 5750_PLUS)) {
13687 tg3_flag_set(tp, SUPPORT_MSI);
13688 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
13689 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
13690 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
13691 tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
13692 tp->pdev_peer == tp->pdev))
13693 tg3_flag_clear(tp, SUPPORT_MSI);
13694
13695 if (tg3_flag(tp, 5755_PLUS) ||
13696 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13697 tg3_flag_set(tp, 1SHOT_MSI);
13698 }
13699
13700 if (tg3_flag(tp, 57765_PLUS)) {
13701 tg3_flag_set(tp, SUPPORT_MSIX);
13702 tp->irq_max = TG3_IRQ_MAX_VECS;
13703 }
13704 }
13705
13706 /* All chips can get confused if TX buffers
13707 * straddle the 4GB address boundary.
13708 */
13709 tg3_flag_set(tp, 4G_DMA_BNDRY_BUG);
13710
13711 if (tg3_flag(tp, 5755_PLUS))
13712 tg3_flag_set(tp, SHORT_DMA_BUG);
13713
13714 if (tg3_flag(tp, 5717_PLUS))
13715 tg3_flag_set(tp, LRG_PROD_RING_CAP);
13716
13717 if (tg3_flag(tp, 57765_PLUS) &&
13718 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719)
13719 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
13720
13721 if (!tg3_flag(tp, 5705_PLUS) ||
13722 tg3_flag(tp, 5780_CLASS) ||
13723 tg3_flag(tp, USE_JUMBO_BDFLAG))
13724 tg3_flag_set(tp, JUMBO_CAPABLE);
13725
13726 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
13727 &pci_state_reg);
13728
13729 tp->pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
13730 if (tp->pcie_cap != 0) {
13731 u16 lnkctl;
13732
13733 tg3_flag_set(tp, PCI_EXPRESS);
13734
13735 tp->pcie_readrq = 4096;
13736 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13737 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13738 tp->pcie_readrq = 2048;
13739
13740 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
13741
13742 pci_read_config_word(tp->pdev,
13743 tp->pcie_cap + PCI_EXP_LNKCTL,
13744 &lnkctl);
13745 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
13746 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
13747 ASIC_REV_5906) {
13748 tg3_flag_clear(tp, HW_TSO_2);
13749 tg3_flag_clear(tp, TSO_CAPABLE);
13750 }
13751 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13752 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13753 tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
13754 tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
13755 tg3_flag_set(tp, CLKREQ_BUG);
13756 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
13757 tg3_flag_set(tp, L1PLLPD_EN);
13758 }
13759 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
13760 tg3_flag_set(tp, PCI_EXPRESS);
13761 } else if (!tg3_flag(tp, 5705_PLUS) ||
13762 tg3_flag(tp, 5780_CLASS)) {
13763 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
13764 if (!tp->pcix_cap) {
13765 dev_err(&tp->pdev->dev,
13766 "Cannot find PCI-X capability, aborting\n");
13767 return -EIO;
13768 }
13769
13770 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
13771 tg3_flag_set(tp, PCIX_MODE);
13772 }
13773
13774 /* If we have an AMD 762 or VIA K8T800 chipset, write
13775 * reordering to the mailbox registers done by the host
13776 * controller can cause major troubles. We read back from
13777 * every mailbox register write to force the writes to be
13778 * posted to the chip in order.
13779 */
13780 if (pci_dev_present(tg3_write_reorder_chipsets) &&
13781 !tg3_flag(tp, PCI_EXPRESS))
13782 tg3_flag_set(tp, MBOX_WRITE_REORDER);
13783
13784 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
13785 &tp->pci_cacheline_sz);
13786 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
13787 &tp->pci_lat_timer);
13788 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
13789 tp->pci_lat_timer < 64) {
13790 tp->pci_lat_timer = 64;
13791 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
13792 tp->pci_lat_timer);
13793 }
13794
13795 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
13796 /* 5700 BX chips need to have their TX producer index
13797 * mailboxes written twice to workaround a bug.
13798 */
13799 tg3_flag_set(tp, TXD_MBOX_HWBUG);
13800
13801 /* If we are in PCI-X mode, enable register write workaround.
13802 *
13803 * The workaround is to use indirect register accesses
13804 * for all chip writes not to mailbox registers.
13805 */
13806 if (tg3_flag(tp, PCIX_MODE)) {
13807 u32 pm_reg;
13808
13809 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
13810
13811 /* The chip can have it's power management PCI config
13812 * space registers clobbered due to this bug.
13813 * So explicitly force the chip into D0 here.
13814 */
13815 pci_read_config_dword(tp->pdev,
13816 tp->pm_cap + PCI_PM_CTRL,
13817 &pm_reg);
13818 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
13819 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
13820 pci_write_config_dword(tp->pdev,
13821 tp->pm_cap + PCI_PM_CTRL,
13822 pm_reg);
13823
13824 /* Also, force SERR#/PERR# in PCI command. */
13825 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13826 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
13827 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13828 }
13829 }
13830
13831 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
13832 tg3_flag_set(tp, PCI_HIGH_SPEED);
13833 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
13834 tg3_flag_set(tp, PCI_32BIT);
13835
13836 /* Chip-specific fixup from Broadcom driver */
13837 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
13838 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
13839 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
13840 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
13841 }
13842
13843 /* Default fast path register access methods */
13844 tp->read32 = tg3_read32;
13845 tp->write32 = tg3_write32;
13846 tp->read32_mbox = tg3_read32;
13847 tp->write32_mbox = tg3_write32;
13848 tp->write32_tx_mbox = tg3_write32;
13849 tp->write32_rx_mbox = tg3_write32;
13850
13851 /* Various workaround register access methods */
13852 if (tg3_flag(tp, PCIX_TARGET_HWBUG))
13853 tp->write32 = tg3_write_indirect_reg32;
13854 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
13855 (tg3_flag(tp, PCI_EXPRESS) &&
13856 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
13857 /*
13858 * Back to back register writes can cause problems on these
13859 * chips, the workaround is to read back all reg writes
13860 * except those to mailbox regs.
13861 *
13862 * See tg3_write_indirect_reg32().
13863 */
13864 tp->write32 = tg3_write_flush_reg32;
13865 }
13866
13867 if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
13868 tp->write32_tx_mbox = tg3_write32_tx_mbox;
13869 if (tg3_flag(tp, MBOX_WRITE_REORDER))
13870 tp->write32_rx_mbox = tg3_write_flush_reg32;
13871 }
13872
13873 if (tg3_flag(tp, ICH_WORKAROUND)) {
13874 tp->read32 = tg3_read_indirect_reg32;
13875 tp->write32 = tg3_write_indirect_reg32;
13876 tp->read32_mbox = tg3_read_indirect_mbox;
13877 tp->write32_mbox = tg3_write_indirect_mbox;
13878 tp->write32_tx_mbox = tg3_write_indirect_mbox;
13879 tp->write32_rx_mbox = tg3_write_indirect_mbox;
13880
13881 iounmap(tp->regs);
13882 tp->regs = NULL;
13883
13884 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13885 pci_cmd &= ~PCI_COMMAND_MEMORY;
13886 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13887 }
13888 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13889 tp->read32_mbox = tg3_read32_mbox_5906;
13890 tp->write32_mbox = tg3_write32_mbox_5906;
13891 tp->write32_tx_mbox = tg3_write32_mbox_5906;
13892 tp->write32_rx_mbox = tg3_write32_mbox_5906;
13893 }
13894
13895 if (tp->write32 == tg3_write_indirect_reg32 ||
13896 (tg3_flag(tp, PCIX_MODE) &&
13897 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13898 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
13899 tg3_flag_set(tp, SRAM_USE_CONFIG);
13900
13901 /* Get eeprom hw config before calling tg3_set_power_state().
13902 * In particular, the TG3_FLAG_IS_NIC flag must be
13903 * determined before calling tg3_set_power_state() so that
13904 * we know whether or not to switch out of Vaux power.
13905 * When the flag is set, it means that GPIO1 is used for eeprom
13906 * write protect and also implies that it is a LOM where GPIOs
13907 * are not used to switch power.
13908 */
13909 tg3_get_eeprom_hw_cfg(tp);
13910
13911 if (tg3_flag(tp, ENABLE_APE)) {
13912 /* Allow reads and writes to the
13913 * APE register and memory space.
13914 */
13915 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
13916 PCISTATE_ALLOW_APE_SHMEM_WR |
13917 PCISTATE_ALLOW_APE_PSPACE_WR;
13918 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
13919 pci_state_reg);
13920 }
13921
13922 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13923 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13924 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13925 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13926 tg3_flag(tp, 57765_PLUS))
13927 tg3_flag_set(tp, CPMU_PRESENT);
13928
13929 /* Set up tp->grc_local_ctrl before calling tg3_power_up().
13930 * GPIO1 driven high will bring 5700's external PHY out of reset.
13931 * It is also used as eeprom write protect on LOMs.
13932 */
13933 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
13934 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13935 tg3_flag(tp, EEPROM_WRITE_PROT))
13936 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
13937 GRC_LCLCTRL_GPIO_OUTPUT1);
13938 /* Unused GPIO3 must be driven as output on 5752 because there
13939 * are no pull-up resistors on unused GPIO pins.
13940 */
13941 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
13942 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
13943
13944 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13945 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13946 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
13947 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
13948
13949 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
13950 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
13951 /* Turn off the debug UART. */
13952 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
13953 if (tg3_flag(tp, IS_NIC))
13954 /* Keep VMain power. */
13955 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
13956 GRC_LCLCTRL_GPIO_OUTPUT0;
13957 }
13958
13959 /* Force the chip into D0. */
13960 err = tg3_power_up(tp);
13961 if (err) {
13962 dev_err(&tp->pdev->dev, "Transition to D0 failed\n");
13963 return err;
13964 }
13965
13966 /* Derive initial jumbo mode from MTU assigned in
13967 * ether_setup() via the alloc_etherdev() call
13968 */
13969 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
13970 tg3_flag_set(tp, JUMBO_RING_ENABLE);
13971
13972 /* Determine WakeOnLan speed to use. */
13973 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13974 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
13975 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
13976 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
13977 tg3_flag_clear(tp, WOL_SPEED_100MB);
13978 } else {
13979 tg3_flag_set(tp, WOL_SPEED_100MB);
13980 }
13981
13982 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13983 tp->phy_flags |= TG3_PHYFLG_IS_FET;
13984
13985 /* A few boards don't want Ethernet@WireSpeed phy feature */
13986 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13987 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
13988 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
13989 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
13990 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
13991 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
13992 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
13993
13994 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
13995 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
13996 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
13997 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
13998 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
13999
14000 if (tg3_flag(tp, 5705_PLUS) &&
14001 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
14002 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
14003 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
14004 !tg3_flag(tp, 57765_PLUS)) {
14005 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14006 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14007 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14008 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
14009 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
14010 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
14011 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
14012 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
14013 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
14014 } else
14015 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
14016 }
14017
14018 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
14019 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
14020 tp->phy_otp = tg3_read_otp_phycfg(tp);
14021 if (tp->phy_otp == 0)
14022 tp->phy_otp = TG3_OTP_DEFAULT;
14023 }
14024
14025 if (tg3_flag(tp, CPMU_PRESENT))
14026 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
14027 else
14028 tp->mi_mode = MAC_MI_MODE_BASE;
14029
14030 tp->coalesce_mode = 0;
14031 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
14032 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
14033 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
14034
14035 /* Set these bits to enable statistics workaround. */
14036 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14037 tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
14038 tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
14039 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
14040 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
14041 }
14042
14043 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14044 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
14045 tg3_flag_set(tp, USE_PHYLIB);
14046
14047 err = tg3_mdio_init(tp);
14048 if (err)
14049 return err;
14050
14051 /* Initialize data/descriptor byte/word swapping. */
14052 val = tr32(GRC_MODE);
14053 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14054 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
14055 GRC_MODE_WORD_SWAP_B2HRX_DATA |
14056 GRC_MODE_B2HRX_ENABLE |
14057 GRC_MODE_HTX2B_ENABLE |
14058 GRC_MODE_HOST_STACKUP);
14059 else
14060 val &= GRC_MODE_HOST_STACKUP;
14061
14062 tw32(GRC_MODE, val | tp->grc_mode);
14063
14064 tg3_switch_clocks(tp);
14065
14066 /* Clear this out for sanity. */
14067 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
14068
14069 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14070 &pci_state_reg);
14071 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
14072 !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
14073 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
14074
14075 if (chiprevid == CHIPREV_ID_5701_A0 ||
14076 chiprevid == CHIPREV_ID_5701_B0 ||
14077 chiprevid == CHIPREV_ID_5701_B2 ||
14078 chiprevid == CHIPREV_ID_5701_B5) {
14079 void __iomem *sram_base;
14080
14081 /* Write some dummy words into the SRAM status block
14082 * area, see if it reads back correctly. If the return
14083 * value is bad, force enable the PCIX workaround.
14084 */
14085 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
14086
14087 writel(0x00000000, sram_base);
14088 writel(0x00000000, sram_base + 4);
14089 writel(0xffffffff, sram_base + 4);
14090 if (readl(sram_base) != 0x00000000)
14091 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14092 }
14093 }
14094
14095 udelay(50);
14096 tg3_nvram_init(tp);
14097
14098 grc_misc_cfg = tr32(GRC_MISC_CFG);
14099 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
14100
14101 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14102 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
14103 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
14104 tg3_flag_set(tp, IS_5788);
14105
14106 if (!tg3_flag(tp, IS_5788) &&
14107 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
14108 tg3_flag_set(tp, TAGGED_STATUS);
14109 if (tg3_flag(tp, TAGGED_STATUS)) {
14110 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
14111 HOSTCC_MODE_CLRTICK_TXBD);
14112
14113 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
14114 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14115 tp->misc_host_ctrl);
14116 }
14117
14118 /* Preserve the APE MAC_MODE bits */
14119 if (tg3_flag(tp, ENABLE_APE))
14120 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
14121 else
14122 tp->mac_mode = TG3_DEF_MAC_MODE;
14123
14124 /* these are limited to 10/100 only */
14125 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14126 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
14127 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14128 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14129 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
14130 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
14131 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
14132 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14133 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
14134 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
14135 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
14136 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
14137 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14138 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14139 (tp->phy_flags & TG3_PHYFLG_IS_FET))
14140 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
14141
14142 err = tg3_phy_probe(tp);
14143 if (err) {
14144 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
14145 /* ... but do not return immediately ... */
14146 tg3_mdio_fini(tp);
14147 }
14148
14149 tg3_read_vpd(tp);
14150 tg3_read_fw_ver(tp);
14151
14152 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
14153 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14154 } else {
14155 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14156 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14157 else
14158 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14159 }
14160
14161 /* 5700 {AX,BX} chips have a broken status block link
14162 * change bit implementation, so we must use the
14163 * status register in those cases.
14164 */
14165 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14166 tg3_flag_set(tp, USE_LINKCHG_REG);
14167 else
14168 tg3_flag_clear(tp, USE_LINKCHG_REG);
14169
14170 /* The led_ctrl is set during tg3_phy_probe, here we might
14171 * have to force the link status polling mechanism based
14172 * upon subsystem IDs.
14173 */
14174 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
14175 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14176 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
14177 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14178 tg3_flag_set(tp, USE_LINKCHG_REG);
14179 }
14180
14181 /* For all SERDES we poll the MAC status register. */
14182 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14183 tg3_flag_set(tp, POLL_SERDES);
14184 else
14185 tg3_flag_clear(tp, POLL_SERDES);
14186
14187 tp->rx_offset = NET_IP_ALIGN;
14188 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
14189 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14190 tg3_flag(tp, PCIX_MODE)) {
14191 tp->rx_offset = 0;
14192 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
14193 tp->rx_copy_thresh = ~(u16)0;
14194 #endif
14195 }
14196
14197 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
14198 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
14199 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
14200
14201 tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
14202
14203 /* Increment the rx prod index on the rx std ring by at most
14204 * 8 for these chips to workaround hw errata.
14205 */
14206 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14207 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14208 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
14209 tp->rx_std_max_post = 8;
14210
14211 if (tg3_flag(tp, ASPM_WORKAROUND))
14212 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
14213 PCIE_PWR_MGMT_L1_THRESH_MSK;
14214
14215 return err;
14216 }
14217
14218 #ifdef CONFIG_SPARC
14219 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
14220 {
14221 struct net_device *dev = tp->dev;
14222 struct pci_dev *pdev = tp->pdev;
14223 struct device_node *dp = pci_device_to_OF_node(pdev);
14224 const unsigned char *addr;
14225 int len;
14226
14227 addr = of_get_property(dp, "local-mac-address", &len);
14228 if (addr && len == 6) {
14229 memcpy(dev->dev_addr, addr, 6);
14230 memcpy(dev->perm_addr, dev->dev_addr, 6);
14231 return 0;
14232 }
14233 return -ENODEV;
14234 }
14235
14236 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
14237 {
14238 struct net_device *dev = tp->dev;
14239
14240 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
14241 memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
14242 return 0;
14243 }
14244 #endif
14245
14246 static int __devinit tg3_get_device_address(struct tg3 *tp)
14247 {
14248 struct net_device *dev = tp->dev;
14249 u32 hi, lo, mac_offset;
14250 int addr_ok = 0;
14251
14252 #ifdef CONFIG_SPARC
14253 if (!tg3_get_macaddr_sparc(tp))
14254 return 0;
14255 #endif
14256
14257 mac_offset = 0x7c;
14258 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14259 tg3_flag(tp, 5780_CLASS)) {
14260 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
14261 mac_offset = 0xcc;
14262 if (tg3_nvram_lock(tp))
14263 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
14264 else
14265 tg3_nvram_unlock(tp);
14266 } else if (tg3_flag(tp, 5717_PLUS)) {
14267 if (PCI_FUNC(tp->pdev->devfn) & 1)
14268 mac_offset = 0xcc;
14269 if (PCI_FUNC(tp->pdev->devfn) > 1)
14270 mac_offset += 0x18c;
14271 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14272 mac_offset = 0x10;
14273
14274 /* First try to get it from MAC address mailbox. */
14275 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
14276 if ((hi >> 16) == 0x484b) {
14277 dev->dev_addr[0] = (hi >> 8) & 0xff;
14278 dev->dev_addr[1] = (hi >> 0) & 0xff;
14279
14280 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
14281 dev->dev_addr[2] = (lo >> 24) & 0xff;
14282 dev->dev_addr[3] = (lo >> 16) & 0xff;
14283 dev->dev_addr[4] = (lo >> 8) & 0xff;
14284 dev->dev_addr[5] = (lo >> 0) & 0xff;
14285
14286 /* Some old bootcode may report a 0 MAC address in SRAM */
14287 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
14288 }
14289 if (!addr_ok) {
14290 /* Next, try NVRAM. */
14291 if (!tg3_flag(tp, NO_NVRAM) &&
14292 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
14293 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
14294 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
14295 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
14296 }
14297 /* Finally just fetch it out of the MAC control regs. */
14298 else {
14299 hi = tr32(MAC_ADDR_0_HIGH);
14300 lo = tr32(MAC_ADDR_0_LOW);
14301
14302 dev->dev_addr[5] = lo & 0xff;
14303 dev->dev_addr[4] = (lo >> 8) & 0xff;
14304 dev->dev_addr[3] = (lo >> 16) & 0xff;
14305 dev->dev_addr[2] = (lo >> 24) & 0xff;
14306 dev->dev_addr[1] = hi & 0xff;
14307 dev->dev_addr[0] = (hi >> 8) & 0xff;
14308 }
14309 }
14310
14311 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
14312 #ifdef CONFIG_SPARC
14313 if (!tg3_get_default_macaddr_sparc(tp))
14314 return 0;
14315 #endif
14316 return -EINVAL;
14317 }
14318 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
14319 return 0;
14320 }
14321
14322 #define BOUNDARY_SINGLE_CACHELINE 1
14323 #define BOUNDARY_MULTI_CACHELINE 2
14324
14325 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
14326 {
14327 int cacheline_size;
14328 u8 byte;
14329 int goal;
14330
14331 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
14332 if (byte == 0)
14333 cacheline_size = 1024;
14334 else
14335 cacheline_size = (int) byte * 4;
14336
14337 /* On 5703 and later chips, the boundary bits have no
14338 * effect.
14339 */
14340 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14341 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14342 !tg3_flag(tp, PCI_EXPRESS))
14343 goto out;
14344
14345 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
14346 goal = BOUNDARY_MULTI_CACHELINE;
14347 #else
14348 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
14349 goal = BOUNDARY_SINGLE_CACHELINE;
14350 #else
14351 goal = 0;
14352 #endif
14353 #endif
14354
14355 if (tg3_flag(tp, 57765_PLUS)) {
14356 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
14357 goto out;
14358 }
14359
14360 if (!goal)
14361 goto out;
14362
14363 /* PCI controllers on most RISC systems tend to disconnect
14364 * when a device tries to burst across a cache-line boundary.
14365 * Therefore, letting tg3 do so just wastes PCI bandwidth.
14366 *
14367 * Unfortunately, for PCI-E there are only limited
14368 * write-side controls for this, and thus for reads
14369 * we will still get the disconnects. We'll also waste
14370 * these PCI cycles for both read and write for chips
14371 * other than 5700 and 5701 which do not implement the
14372 * boundary bits.
14373 */
14374 if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
14375 switch (cacheline_size) {
14376 case 16:
14377 case 32:
14378 case 64:
14379 case 128:
14380 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14381 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
14382 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
14383 } else {
14384 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14385 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14386 }
14387 break;
14388
14389 case 256:
14390 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
14391 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
14392 break;
14393
14394 default:
14395 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14396 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14397 break;
14398 }
14399 } else if (tg3_flag(tp, PCI_EXPRESS)) {
14400 switch (cacheline_size) {
14401 case 16:
14402 case 32:
14403 case 64:
14404 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14405 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14406 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
14407 break;
14408 }
14409 /* fallthrough */
14410 case 128:
14411 default:
14412 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14413 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
14414 break;
14415 }
14416 } else {
14417 switch (cacheline_size) {
14418 case 16:
14419 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14420 val |= (DMA_RWCTRL_READ_BNDRY_16 |
14421 DMA_RWCTRL_WRITE_BNDRY_16);
14422 break;
14423 }
14424 /* fallthrough */
14425 case 32:
14426 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14427 val |= (DMA_RWCTRL_READ_BNDRY_32 |
14428 DMA_RWCTRL_WRITE_BNDRY_32);
14429 break;
14430 }
14431 /* fallthrough */
14432 case 64:
14433 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14434 val |= (DMA_RWCTRL_READ_BNDRY_64 |
14435 DMA_RWCTRL_WRITE_BNDRY_64);
14436 break;
14437 }
14438 /* fallthrough */
14439 case 128:
14440 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14441 val |= (DMA_RWCTRL_READ_BNDRY_128 |
14442 DMA_RWCTRL_WRITE_BNDRY_128);
14443 break;
14444 }
14445 /* fallthrough */
14446 case 256:
14447 val |= (DMA_RWCTRL_READ_BNDRY_256 |
14448 DMA_RWCTRL_WRITE_BNDRY_256);
14449 break;
14450 case 512:
14451 val |= (DMA_RWCTRL_READ_BNDRY_512 |
14452 DMA_RWCTRL_WRITE_BNDRY_512);
14453 break;
14454 case 1024:
14455 default:
14456 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
14457 DMA_RWCTRL_WRITE_BNDRY_1024);
14458 break;
14459 }
14460 }
14461
14462 out:
14463 return val;
14464 }
14465
14466 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
14467 {
14468 struct tg3_internal_buffer_desc test_desc;
14469 u32 sram_dma_descs;
14470 int i, ret;
14471
14472 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
14473
14474 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
14475 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
14476 tw32(RDMAC_STATUS, 0);
14477 tw32(WDMAC_STATUS, 0);
14478
14479 tw32(BUFMGR_MODE, 0);
14480 tw32(FTQ_RESET, 0);
14481
14482 test_desc.addr_hi = ((u64) buf_dma) >> 32;
14483 test_desc.addr_lo = buf_dma & 0xffffffff;
14484 test_desc.nic_mbuf = 0x00002100;
14485 test_desc.len = size;
14486
14487 /*
14488 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
14489 * the *second* time the tg3 driver was getting loaded after an
14490 * initial scan.
14491 *
14492 * Broadcom tells me:
14493 * ...the DMA engine is connected to the GRC block and a DMA
14494 * reset may affect the GRC block in some unpredictable way...
14495 * The behavior of resets to individual blocks has not been tested.
14496 *
14497 * Broadcom noted the GRC reset will also reset all sub-components.
14498 */
14499 if (to_device) {
14500 test_desc.cqid_sqid = (13 << 8) | 2;
14501
14502 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
14503 udelay(40);
14504 } else {
14505 test_desc.cqid_sqid = (16 << 8) | 7;
14506
14507 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
14508 udelay(40);
14509 }
14510 test_desc.flags = 0x00000005;
14511
14512 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
14513 u32 val;
14514
14515 val = *(((u32 *)&test_desc) + i);
14516 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
14517 sram_dma_descs + (i * sizeof(u32)));
14518 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
14519 }
14520 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
14521
14522 if (to_device)
14523 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
14524 else
14525 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
14526
14527 ret = -ENODEV;
14528 for (i = 0; i < 40; i++) {
14529 u32 val;
14530
14531 if (to_device)
14532 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
14533 else
14534 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
14535 if ((val & 0xffff) == sram_dma_descs) {
14536 ret = 0;
14537 break;
14538 }
14539
14540 udelay(100);
14541 }
14542
14543 return ret;
14544 }
14545
14546 #define TEST_BUFFER_SIZE 0x2000
14547
14548 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
14549 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
14550 { },
14551 };
14552
14553 static int __devinit tg3_test_dma(struct tg3 *tp)
14554 {
14555 dma_addr_t buf_dma;
14556 u32 *buf, saved_dma_rwctrl;
14557 int ret = 0;
14558
14559 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
14560 &buf_dma, GFP_KERNEL);
14561 if (!buf) {
14562 ret = -ENOMEM;
14563 goto out_nofree;
14564 }
14565
14566 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
14567 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
14568
14569 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
14570
14571 if (tg3_flag(tp, 57765_PLUS))
14572 goto out;
14573
14574 if (tg3_flag(tp, PCI_EXPRESS)) {
14575 /* DMA read watermark not used on PCIE */
14576 tp->dma_rwctrl |= 0x00180000;
14577 } else if (!tg3_flag(tp, PCIX_MODE)) {
14578 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
14579 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
14580 tp->dma_rwctrl |= 0x003f0000;
14581 else
14582 tp->dma_rwctrl |= 0x003f000f;
14583 } else {
14584 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14585 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
14586 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
14587 u32 read_water = 0x7;
14588
14589 /* If the 5704 is behind the EPB bridge, we can
14590 * do the less restrictive ONE_DMA workaround for
14591 * better performance.
14592 */
14593 if (tg3_flag(tp, 40BIT_DMA_BUG) &&
14594 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14595 tp->dma_rwctrl |= 0x8000;
14596 else if (ccval == 0x6 || ccval == 0x7)
14597 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
14598
14599 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
14600 read_water = 4;
14601 /* Set bit 23 to enable PCIX hw bug fix */
14602 tp->dma_rwctrl |=
14603 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
14604 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
14605 (1 << 23);
14606 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
14607 /* 5780 always in PCIX mode */
14608 tp->dma_rwctrl |= 0x00144000;
14609 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
14610 /* 5714 always in PCIX mode */
14611 tp->dma_rwctrl |= 0x00148000;
14612 } else {
14613 tp->dma_rwctrl |= 0x001b000f;
14614 }
14615 }
14616
14617 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14618 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14619 tp->dma_rwctrl &= 0xfffffff0;
14620
14621 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14622 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
14623 /* Remove this if it causes problems for some boards. */
14624 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
14625
14626 /* On 5700/5701 chips, we need to set this bit.
14627 * Otherwise the chip will issue cacheline transactions
14628 * to streamable DMA memory with not all the byte
14629 * enables turned on. This is an error on several
14630 * RISC PCI controllers, in particular sparc64.
14631 *
14632 * On 5703/5704 chips, this bit has been reassigned
14633 * a different meaning. In particular, it is used
14634 * on those chips to enable a PCI-X workaround.
14635 */
14636 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
14637 }
14638
14639 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14640
14641 #if 0
14642 /* Unneeded, already done by tg3_get_invariants. */
14643 tg3_switch_clocks(tp);
14644 #endif
14645
14646 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14647 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
14648 goto out;
14649
14650 /* It is best to perform DMA test with maximum write burst size
14651 * to expose the 5700/5701 write DMA bug.
14652 */
14653 saved_dma_rwctrl = tp->dma_rwctrl;
14654 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14655 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14656
14657 while (1) {
14658 u32 *p = buf, i;
14659
14660 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
14661 p[i] = i;
14662
14663 /* Send the buffer to the chip. */
14664 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
14665 if (ret) {
14666 dev_err(&tp->pdev->dev,
14667 "%s: Buffer write failed. err = %d\n",
14668 __func__, ret);
14669 break;
14670 }
14671
14672 #if 0
14673 /* validate data reached card RAM correctly. */
14674 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
14675 u32 val;
14676 tg3_read_mem(tp, 0x2100 + (i*4), &val);
14677 if (le32_to_cpu(val) != p[i]) {
14678 dev_err(&tp->pdev->dev,
14679 "%s: Buffer corrupted on device! "
14680 "(%d != %d)\n", __func__, val, i);
14681 /* ret = -ENODEV here? */
14682 }
14683 p[i] = 0;
14684 }
14685 #endif
14686 /* Now read it back. */
14687 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
14688 if (ret) {
14689 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
14690 "err = %d\n", __func__, ret);
14691 break;
14692 }
14693
14694 /* Verify it. */
14695 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
14696 if (p[i] == i)
14697 continue;
14698
14699 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
14700 DMA_RWCTRL_WRITE_BNDRY_16) {
14701 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14702 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
14703 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14704 break;
14705 } else {
14706 dev_err(&tp->pdev->dev,
14707 "%s: Buffer corrupted on read back! "
14708 "(%d != %d)\n", __func__, p[i], i);
14709 ret = -ENODEV;
14710 goto out;
14711 }
14712 }
14713
14714 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
14715 /* Success. */
14716 ret = 0;
14717 break;
14718 }
14719 }
14720 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
14721 DMA_RWCTRL_WRITE_BNDRY_16) {
14722 /* DMA test passed without adjusting DMA boundary,
14723 * now look for chipsets that are known to expose the
14724 * DMA bug without failing the test.
14725 */
14726 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
14727 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14728 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
14729 } else {
14730 /* Safe to use the calculated DMA boundary. */
14731 tp->dma_rwctrl = saved_dma_rwctrl;
14732 }
14733
14734 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14735 }
14736
14737 out:
14738 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
14739 out_nofree:
14740 return ret;
14741 }
14742
14743 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
14744 {
14745 if (tg3_flag(tp, 57765_PLUS)) {
14746 tp->bufmgr_config.mbuf_read_dma_low_water =
14747 DEFAULT_MB_RDMA_LOW_WATER_5705;
14748 tp->bufmgr_config.mbuf_mac_rx_low_water =
14749 DEFAULT_MB_MACRX_LOW_WATER_57765;
14750 tp->bufmgr_config.mbuf_high_water =
14751 DEFAULT_MB_HIGH_WATER_57765;
14752
14753 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14754 DEFAULT_MB_RDMA_LOW_WATER_5705;
14755 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14756 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
14757 tp->bufmgr_config.mbuf_high_water_jumbo =
14758 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
14759 } else if (tg3_flag(tp, 5705_PLUS)) {
14760 tp->bufmgr_config.mbuf_read_dma_low_water =
14761 DEFAULT_MB_RDMA_LOW_WATER_5705;
14762 tp->bufmgr_config.mbuf_mac_rx_low_water =
14763 DEFAULT_MB_MACRX_LOW_WATER_5705;
14764 tp->bufmgr_config.mbuf_high_water =
14765 DEFAULT_MB_HIGH_WATER_5705;
14766 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14767 tp->bufmgr_config.mbuf_mac_rx_low_water =
14768 DEFAULT_MB_MACRX_LOW_WATER_5906;
14769 tp->bufmgr_config.mbuf_high_water =
14770 DEFAULT_MB_HIGH_WATER_5906;
14771 }
14772
14773 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14774 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
14775 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14776 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
14777 tp->bufmgr_config.mbuf_high_water_jumbo =
14778 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
14779 } else {
14780 tp->bufmgr_config.mbuf_read_dma_low_water =
14781 DEFAULT_MB_RDMA_LOW_WATER;
14782 tp->bufmgr_config.mbuf_mac_rx_low_water =
14783 DEFAULT_MB_MACRX_LOW_WATER;
14784 tp->bufmgr_config.mbuf_high_water =
14785 DEFAULT_MB_HIGH_WATER;
14786
14787 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14788 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
14789 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14790 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
14791 tp->bufmgr_config.mbuf_high_water_jumbo =
14792 DEFAULT_MB_HIGH_WATER_JUMBO;
14793 }
14794
14795 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
14796 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
14797 }
14798
14799 static char * __devinit tg3_phy_string(struct tg3 *tp)
14800 {
14801 switch (tp->phy_id & TG3_PHY_ID_MASK) {
14802 case TG3_PHY_ID_BCM5400: return "5400";
14803 case TG3_PHY_ID_BCM5401: return "5401";
14804 case TG3_PHY_ID_BCM5411: return "5411";
14805 case TG3_PHY_ID_BCM5701: return "5701";
14806 case TG3_PHY_ID_BCM5703: return "5703";
14807 case TG3_PHY_ID_BCM5704: return "5704";
14808 case TG3_PHY_ID_BCM5705: return "5705";
14809 case TG3_PHY_ID_BCM5750: return "5750";
14810 case TG3_PHY_ID_BCM5752: return "5752";
14811 case TG3_PHY_ID_BCM5714: return "5714";
14812 case TG3_PHY_ID_BCM5780: return "5780";
14813 case TG3_PHY_ID_BCM5755: return "5755";
14814 case TG3_PHY_ID_BCM5787: return "5787";
14815 case TG3_PHY_ID_BCM5784: return "5784";
14816 case TG3_PHY_ID_BCM5756: return "5722/5756";
14817 case TG3_PHY_ID_BCM5906: return "5906";
14818 case TG3_PHY_ID_BCM5761: return "5761";
14819 case TG3_PHY_ID_BCM5718C: return "5718C";
14820 case TG3_PHY_ID_BCM5718S: return "5718S";
14821 case TG3_PHY_ID_BCM57765: return "57765";
14822 case TG3_PHY_ID_BCM5719C: return "5719C";
14823 case TG3_PHY_ID_BCM5720C: return "5720C";
14824 case TG3_PHY_ID_BCM8002: return "8002/serdes";
14825 case 0: return "serdes";
14826 default: return "unknown";
14827 }
14828 }
14829
14830 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
14831 {
14832 if (tg3_flag(tp, PCI_EXPRESS)) {
14833 strcpy(str, "PCI Express");
14834 return str;
14835 } else if (tg3_flag(tp, PCIX_MODE)) {
14836 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
14837
14838 strcpy(str, "PCIX:");
14839
14840 if ((clock_ctrl == 7) ||
14841 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
14842 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
14843 strcat(str, "133MHz");
14844 else if (clock_ctrl == 0)
14845 strcat(str, "33MHz");
14846 else if (clock_ctrl == 2)
14847 strcat(str, "50MHz");
14848 else if (clock_ctrl == 4)
14849 strcat(str, "66MHz");
14850 else if (clock_ctrl == 6)
14851 strcat(str, "100MHz");
14852 } else {
14853 strcpy(str, "PCI:");
14854 if (tg3_flag(tp, PCI_HIGH_SPEED))
14855 strcat(str, "66MHz");
14856 else
14857 strcat(str, "33MHz");
14858 }
14859 if (tg3_flag(tp, PCI_32BIT))
14860 strcat(str, ":32-bit");
14861 else
14862 strcat(str, ":64-bit");
14863 return str;
14864 }
14865
14866 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
14867 {
14868 struct pci_dev *peer;
14869 unsigned int func, devnr = tp->pdev->devfn & ~7;
14870
14871 for (func = 0; func < 8; func++) {
14872 peer = pci_get_slot(tp->pdev->bus, devnr | func);
14873 if (peer && peer != tp->pdev)
14874 break;
14875 pci_dev_put(peer);
14876 }
14877 /* 5704 can be configured in single-port mode, set peer to
14878 * tp->pdev in that case.
14879 */
14880 if (!peer) {
14881 peer = tp->pdev;
14882 return peer;
14883 }
14884
14885 /*
14886 * We don't need to keep the refcount elevated; there's no way
14887 * to remove one half of this device without removing the other
14888 */
14889 pci_dev_put(peer);
14890
14891 return peer;
14892 }
14893
14894 static void __devinit tg3_init_coal(struct tg3 *tp)
14895 {
14896 struct ethtool_coalesce *ec = &tp->coal;
14897
14898 memset(ec, 0, sizeof(*ec));
14899 ec->cmd = ETHTOOL_GCOALESCE;
14900 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
14901 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
14902 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
14903 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
14904 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
14905 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
14906 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
14907 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
14908 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
14909
14910 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
14911 HOSTCC_MODE_CLRTICK_TXBD)) {
14912 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
14913 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
14914 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
14915 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
14916 }
14917
14918 if (tg3_flag(tp, 5705_PLUS)) {
14919 ec->rx_coalesce_usecs_irq = 0;
14920 ec->tx_coalesce_usecs_irq = 0;
14921 ec->stats_block_coalesce_usecs = 0;
14922 }
14923 }
14924
14925 static const struct net_device_ops tg3_netdev_ops = {
14926 .ndo_open = tg3_open,
14927 .ndo_stop = tg3_close,
14928 .ndo_start_xmit = tg3_start_xmit,
14929 .ndo_get_stats64 = tg3_get_stats64,
14930 .ndo_validate_addr = eth_validate_addr,
14931 .ndo_set_multicast_list = tg3_set_rx_mode,
14932 .ndo_set_mac_address = tg3_set_mac_addr,
14933 .ndo_do_ioctl = tg3_ioctl,
14934 .ndo_tx_timeout = tg3_tx_timeout,
14935 .ndo_change_mtu = tg3_change_mtu,
14936 .ndo_fix_features = tg3_fix_features,
14937 .ndo_set_features = tg3_set_features,
14938 #ifdef CONFIG_NET_POLL_CONTROLLER
14939 .ndo_poll_controller = tg3_poll_controller,
14940 #endif
14941 };
14942
14943 static int __devinit tg3_init_one(struct pci_dev *pdev,
14944 const struct pci_device_id *ent)
14945 {
14946 struct net_device *dev;
14947 struct tg3 *tp;
14948 int i, err, pm_cap;
14949 u32 sndmbx, rcvmbx, intmbx;
14950 char str[40];
14951 u64 dma_mask, persist_dma_mask;
14952 u32 features = 0;
14953
14954 printk_once(KERN_INFO "%s\n", version);
14955
14956 err = pci_enable_device(pdev);
14957 if (err) {
14958 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
14959 return err;
14960 }
14961
14962 err = pci_request_regions(pdev, DRV_MODULE_NAME);
14963 if (err) {
14964 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
14965 goto err_out_disable_pdev;
14966 }
14967
14968 pci_set_master(pdev);
14969
14970 /* Find power-management capability. */
14971 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
14972 if (pm_cap == 0) {
14973 dev_err(&pdev->dev,
14974 "Cannot find Power Management capability, aborting\n");
14975 err = -EIO;
14976 goto err_out_free_res;
14977 }
14978
14979 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
14980 if (!dev) {
14981 dev_err(&pdev->dev, "Etherdev alloc failed, aborting\n");
14982 err = -ENOMEM;
14983 goto err_out_free_res;
14984 }
14985
14986 SET_NETDEV_DEV(dev, &pdev->dev);
14987
14988 tp = netdev_priv(dev);
14989 tp->pdev = pdev;
14990 tp->dev = dev;
14991 tp->pm_cap = pm_cap;
14992 tp->rx_mode = TG3_DEF_RX_MODE;
14993 tp->tx_mode = TG3_DEF_TX_MODE;
14994
14995 if (tg3_debug > 0)
14996 tp->msg_enable = tg3_debug;
14997 else
14998 tp->msg_enable = TG3_DEF_MSG_ENABLE;
14999
15000 /* The word/byte swap controls here control register access byte
15001 * swapping. DMA data byte swapping is controlled in the GRC_MODE
15002 * setting below.
15003 */
15004 tp->misc_host_ctrl =
15005 MISC_HOST_CTRL_MASK_PCI_INT |
15006 MISC_HOST_CTRL_WORD_SWAP |
15007 MISC_HOST_CTRL_INDIR_ACCESS |
15008 MISC_HOST_CTRL_PCISTATE_RW;
15009
15010 /* The NONFRM (non-frame) byte/word swap controls take effect
15011 * on descriptor entries, anything which isn't packet data.
15012 *
15013 * The StrongARM chips on the board (one for tx, one for rx)
15014 * are running in big-endian mode.
15015 */
15016 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
15017 GRC_MODE_WSWAP_NONFRM_DATA);
15018 #ifdef __BIG_ENDIAN
15019 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
15020 #endif
15021 spin_lock_init(&tp->lock);
15022 spin_lock_init(&tp->indirect_lock);
15023 INIT_WORK(&tp->reset_task, tg3_reset_task);
15024
15025 tp->regs = pci_ioremap_bar(pdev, BAR_0);
15026 if (!tp->regs) {
15027 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
15028 err = -ENOMEM;
15029 goto err_out_free_dev;
15030 }
15031
15032 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
15033 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
15034
15035 dev->ethtool_ops = &tg3_ethtool_ops;
15036 dev->watchdog_timeo = TG3_TX_TIMEOUT;
15037 dev->netdev_ops = &tg3_netdev_ops;
15038 dev->irq = pdev->irq;
15039
15040 err = tg3_get_invariants(tp);
15041 if (err) {
15042 dev_err(&pdev->dev,
15043 "Problem fetching invariants of chip, aborting\n");
15044 goto err_out_iounmap;
15045 }
15046
15047 /* The EPB bridge inside 5714, 5715, and 5780 and any
15048 * device behind the EPB cannot support DMA addresses > 40-bit.
15049 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
15050 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
15051 * do DMA address check in tg3_start_xmit().
15052 */
15053 if (tg3_flag(tp, IS_5788))
15054 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
15055 else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
15056 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
15057 #ifdef CONFIG_HIGHMEM
15058 dma_mask = DMA_BIT_MASK(64);
15059 #endif
15060 } else
15061 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
15062
15063 /* Configure DMA attributes. */
15064 if (dma_mask > DMA_BIT_MASK(32)) {
15065 err = pci_set_dma_mask(pdev, dma_mask);
15066 if (!err) {
15067 features |= NETIF_F_HIGHDMA;
15068 err = pci_set_consistent_dma_mask(pdev,
15069 persist_dma_mask);
15070 if (err < 0) {
15071 dev_err(&pdev->dev, "Unable to obtain 64 bit "
15072 "DMA for consistent allocations\n");
15073 goto err_out_iounmap;
15074 }
15075 }
15076 }
15077 if (err || dma_mask == DMA_BIT_MASK(32)) {
15078 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
15079 if (err) {
15080 dev_err(&pdev->dev,
15081 "No usable DMA configuration, aborting\n");
15082 goto err_out_iounmap;
15083 }
15084 }
15085
15086 tg3_init_bufmgr_config(tp);
15087
15088 features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
15089
15090 /* 5700 B0 chips do not support checksumming correctly due
15091 * to hardware bugs.
15092 */
15093 if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
15094 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
15095
15096 if (tg3_flag(tp, 5755_PLUS))
15097 features |= NETIF_F_IPV6_CSUM;
15098 }
15099
15100 /* TSO is on by default on chips that support hardware TSO.
15101 * Firmware TSO on older chips gives lower performance, so it
15102 * is off by default, but can be enabled using ethtool.
15103 */
15104 if ((tg3_flag(tp, HW_TSO_1) ||
15105 tg3_flag(tp, HW_TSO_2) ||
15106 tg3_flag(tp, HW_TSO_3)) &&
15107 (features & NETIF_F_IP_CSUM))
15108 features |= NETIF_F_TSO;
15109 if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
15110 if (features & NETIF_F_IPV6_CSUM)
15111 features |= NETIF_F_TSO6;
15112 if (tg3_flag(tp, HW_TSO_3) ||
15113 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
15114 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
15115 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
15116 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
15117 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
15118 features |= NETIF_F_TSO_ECN;
15119 }
15120
15121 dev->features |= features;
15122 dev->vlan_features |= features;
15123
15124 /*
15125 * Add loopback capability only for a subset of devices that support
15126 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
15127 * loopback for the remaining devices.
15128 */
15129 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
15130 !tg3_flag(tp, CPMU_PRESENT))
15131 /* Add the loopback capability */
15132 features |= NETIF_F_LOOPBACK;
15133
15134 dev->hw_features |= features;
15135
15136 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
15137 !tg3_flag(tp, TSO_CAPABLE) &&
15138 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
15139 tg3_flag_set(tp, MAX_RXPEND_64);
15140 tp->rx_pending = 63;
15141 }
15142
15143 err = tg3_get_device_address(tp);
15144 if (err) {
15145 dev_err(&pdev->dev,
15146 "Could not obtain valid ethernet address, aborting\n");
15147 goto err_out_iounmap;
15148 }
15149
15150 if (tg3_flag(tp, ENABLE_APE)) {
15151 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
15152 if (!tp->aperegs) {
15153 dev_err(&pdev->dev,
15154 "Cannot map APE registers, aborting\n");
15155 err = -ENOMEM;
15156 goto err_out_iounmap;
15157 }
15158
15159 tg3_ape_lock_init(tp);
15160
15161 if (tg3_flag(tp, ENABLE_ASF))
15162 tg3_read_dash_ver(tp);
15163 }
15164
15165 /*
15166 * Reset chip in case UNDI or EFI driver did not shutdown
15167 * DMA self test will enable WDMAC and we'll see (spurious)
15168 * pending DMA on the PCI bus at that point.
15169 */
15170 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
15171 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
15172 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
15173 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15174 }
15175
15176 err = tg3_test_dma(tp);
15177 if (err) {
15178 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
15179 goto err_out_apeunmap;
15180 }
15181
15182 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
15183 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
15184 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
15185 for (i = 0; i < tp->irq_max; i++) {
15186 struct tg3_napi *tnapi = &tp->napi[i];
15187
15188 tnapi->tp = tp;
15189 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
15190
15191 tnapi->int_mbox = intmbx;
15192 if (i < 4)
15193 intmbx += 0x8;
15194 else
15195 intmbx += 0x4;
15196
15197 tnapi->consmbox = rcvmbx;
15198 tnapi->prodmbox = sndmbx;
15199
15200 if (i)
15201 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
15202 else
15203 tnapi->coal_now = HOSTCC_MODE_NOW;
15204
15205 if (!tg3_flag(tp, SUPPORT_MSIX))
15206 break;
15207
15208 /*
15209 * If we support MSIX, we'll be using RSS. If we're using
15210 * RSS, the first vector only handles link interrupts and the
15211 * remaining vectors handle rx and tx interrupts. Reuse the
15212 * mailbox values for the next iteration. The values we setup
15213 * above are still useful for the single vectored mode.
15214 */
15215 if (!i)
15216 continue;
15217
15218 rcvmbx += 0x8;
15219
15220 if (sndmbx & 0x4)
15221 sndmbx -= 0x4;
15222 else
15223 sndmbx += 0xc;
15224 }
15225
15226 tg3_init_coal(tp);
15227
15228 pci_set_drvdata(pdev, dev);
15229
15230 err = register_netdev(dev);
15231 if (err) {
15232 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
15233 goto err_out_apeunmap;
15234 }
15235
15236 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
15237 tp->board_part_number,
15238 tp->pci_chip_rev_id,
15239 tg3_bus_string(tp, str),
15240 dev->dev_addr);
15241
15242 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
15243 struct phy_device *phydev;
15244 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
15245 netdev_info(dev,
15246 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
15247 phydev->drv->name, dev_name(&phydev->dev));
15248 } else {
15249 char *ethtype;
15250
15251 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
15252 ethtype = "10/100Base-TX";
15253 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
15254 ethtype = "1000Base-SX";
15255 else
15256 ethtype = "10/100/1000Base-T";
15257
15258 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
15259 "(WireSpeed[%d], EEE[%d])\n",
15260 tg3_phy_string(tp), ethtype,
15261 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
15262 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
15263 }
15264
15265 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
15266 (dev->features & NETIF_F_RXCSUM) != 0,
15267 tg3_flag(tp, USE_LINKCHG_REG) != 0,
15268 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
15269 tg3_flag(tp, ENABLE_ASF) != 0,
15270 tg3_flag(tp, TSO_CAPABLE) != 0);
15271 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
15272 tp->dma_rwctrl,
15273 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
15274 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
15275
15276 pci_save_state(pdev);
15277
15278 return 0;
15279
15280 err_out_apeunmap:
15281 if (tp->aperegs) {
15282 iounmap(tp->aperegs);
15283 tp->aperegs = NULL;
15284 }
15285
15286 err_out_iounmap:
15287 if (tp->regs) {
15288 iounmap(tp->regs);
15289 tp->regs = NULL;
15290 }
15291
15292 err_out_free_dev:
15293 free_netdev(dev);
15294
15295 err_out_free_res:
15296 pci_release_regions(pdev);
15297
15298 err_out_disable_pdev:
15299 pci_disable_device(pdev);
15300 pci_set_drvdata(pdev, NULL);
15301 return err;
15302 }
15303
15304 static void __devexit tg3_remove_one(struct pci_dev *pdev)
15305 {
15306 struct net_device *dev = pci_get_drvdata(pdev);
15307
15308 if (dev) {
15309 struct tg3 *tp = netdev_priv(dev);
15310
15311 if (tp->fw)
15312 release_firmware(tp->fw);
15313
15314 cancel_work_sync(&tp->reset_task);
15315
15316 if (!tg3_flag(tp, USE_PHYLIB)) {
15317 tg3_phy_fini(tp);
15318 tg3_mdio_fini(tp);
15319 }
15320
15321 unregister_netdev(dev);
15322 if (tp->aperegs) {
15323 iounmap(tp->aperegs);
15324 tp->aperegs = NULL;
15325 }
15326 if (tp->regs) {
15327 iounmap(tp->regs);
15328 tp->regs = NULL;
15329 }
15330 free_netdev(dev);
15331 pci_release_regions(pdev);
15332 pci_disable_device(pdev);
15333 pci_set_drvdata(pdev, NULL);
15334 }
15335 }
15336
15337 #ifdef CONFIG_PM_SLEEP
15338 static int tg3_suspend(struct device *device)
15339 {
15340 struct pci_dev *pdev = to_pci_dev(device);
15341 struct net_device *dev = pci_get_drvdata(pdev);
15342 struct tg3 *tp = netdev_priv(dev);
15343 int err;
15344
15345 if (!netif_running(dev))
15346 return 0;
15347
15348 flush_work_sync(&tp->reset_task);
15349 tg3_phy_stop(tp);
15350 tg3_netif_stop(tp);
15351
15352 del_timer_sync(&tp->timer);
15353
15354 tg3_full_lock(tp, 1);
15355 tg3_disable_ints(tp);
15356 tg3_full_unlock(tp);
15357
15358 netif_device_detach(dev);
15359
15360 tg3_full_lock(tp, 0);
15361 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15362 tg3_flag_clear(tp, INIT_COMPLETE);
15363 tg3_full_unlock(tp);
15364
15365 err = tg3_power_down_prepare(tp);
15366 if (err) {
15367 int err2;
15368
15369 tg3_full_lock(tp, 0);
15370
15371 tg3_flag_set(tp, INIT_COMPLETE);
15372 err2 = tg3_restart_hw(tp, 1);
15373 if (err2)
15374 goto out;
15375
15376 tp->timer.expires = jiffies + tp->timer_offset;
15377 add_timer(&tp->timer);
15378
15379 netif_device_attach(dev);
15380 tg3_netif_start(tp);
15381
15382 out:
15383 tg3_full_unlock(tp);
15384
15385 if (!err2)
15386 tg3_phy_start(tp);
15387 }
15388
15389 return err;
15390 }
15391
15392 static int tg3_resume(struct device *device)
15393 {
15394 struct pci_dev *pdev = to_pci_dev(device);
15395 struct net_device *dev = pci_get_drvdata(pdev);
15396 struct tg3 *tp = netdev_priv(dev);
15397 int err;
15398
15399 if (!netif_running(dev))
15400 return 0;
15401
15402 netif_device_attach(dev);
15403
15404 tg3_full_lock(tp, 0);
15405
15406 tg3_flag_set(tp, INIT_COMPLETE);
15407 err = tg3_restart_hw(tp, 1);
15408 if (err)
15409 goto out;
15410
15411 tp->timer.expires = jiffies + tp->timer_offset;
15412 add_timer(&tp->timer);
15413
15414 tg3_netif_start(tp);
15415
15416 out:
15417 tg3_full_unlock(tp);
15418
15419 if (!err)
15420 tg3_phy_start(tp);
15421
15422 return err;
15423 }
15424
15425 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
15426 #define TG3_PM_OPS (&tg3_pm_ops)
15427
15428 #else
15429
15430 #define TG3_PM_OPS NULL
15431
15432 #endif /* CONFIG_PM_SLEEP */
15433
15434 /**
15435 * tg3_io_error_detected - called when PCI error is detected
15436 * @pdev: Pointer to PCI device
15437 * @state: The current pci connection state
15438 *
15439 * This function is called after a PCI bus error affecting
15440 * this device has been detected.
15441 */
15442 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
15443 pci_channel_state_t state)
15444 {
15445 struct net_device *netdev = pci_get_drvdata(pdev);
15446 struct tg3 *tp = netdev_priv(netdev);
15447 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
15448
15449 netdev_info(netdev, "PCI I/O error detected\n");
15450
15451 rtnl_lock();
15452
15453 if (!netif_running(netdev))
15454 goto done;
15455
15456 tg3_phy_stop(tp);
15457
15458 tg3_netif_stop(tp);
15459
15460 del_timer_sync(&tp->timer);
15461 tg3_flag_clear(tp, RESTART_TIMER);
15462
15463 /* Want to make sure that the reset task doesn't run */
15464 cancel_work_sync(&tp->reset_task);
15465 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
15466 tg3_flag_clear(tp, RESTART_TIMER);
15467
15468 netif_device_detach(netdev);
15469
15470 /* Clean up software state, even if MMIO is blocked */
15471 tg3_full_lock(tp, 0);
15472 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
15473 tg3_full_unlock(tp);
15474
15475 done:
15476 if (state == pci_channel_io_perm_failure)
15477 err = PCI_ERS_RESULT_DISCONNECT;
15478 else
15479 pci_disable_device(pdev);
15480
15481 rtnl_unlock();
15482
15483 return err;
15484 }
15485
15486 /**
15487 * tg3_io_slot_reset - called after the pci bus has been reset.
15488 * @pdev: Pointer to PCI device
15489 *
15490 * Restart the card from scratch, as if from a cold-boot.
15491 * At this point, the card has exprienced a hard reset,
15492 * followed by fixups by BIOS, and has its config space
15493 * set up identically to what it was at cold boot.
15494 */
15495 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
15496 {
15497 struct net_device *netdev = pci_get_drvdata(pdev);
15498 struct tg3 *tp = netdev_priv(netdev);
15499 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
15500 int err;
15501
15502 rtnl_lock();
15503
15504 if (pci_enable_device(pdev)) {
15505 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
15506 goto done;
15507 }
15508
15509 pci_set_master(pdev);
15510 pci_restore_state(pdev);
15511 pci_save_state(pdev);
15512
15513 if (!netif_running(netdev)) {
15514 rc = PCI_ERS_RESULT_RECOVERED;
15515 goto done;
15516 }
15517
15518 err = tg3_power_up(tp);
15519 if (err) {
15520 netdev_err(netdev, "Failed to restore register access.\n");
15521 goto done;
15522 }
15523
15524 rc = PCI_ERS_RESULT_RECOVERED;
15525
15526 done:
15527 rtnl_unlock();
15528
15529 return rc;
15530 }
15531
15532 /**
15533 * tg3_io_resume - called when traffic can start flowing again.
15534 * @pdev: Pointer to PCI device
15535 *
15536 * This callback is called when the error recovery driver tells
15537 * us that its OK to resume normal operation.
15538 */
15539 static void tg3_io_resume(struct pci_dev *pdev)
15540 {
15541 struct net_device *netdev = pci_get_drvdata(pdev);
15542 struct tg3 *tp = netdev_priv(netdev);
15543 int err;
15544
15545 rtnl_lock();
15546
15547 if (!netif_running(netdev))
15548 goto done;
15549
15550 tg3_full_lock(tp, 0);
15551 tg3_flag_set(tp, INIT_COMPLETE);
15552 err = tg3_restart_hw(tp, 1);
15553 tg3_full_unlock(tp);
15554 if (err) {
15555 netdev_err(netdev, "Cannot restart hardware after reset.\n");
15556 goto done;
15557 }
15558
15559 netif_device_attach(netdev);
15560
15561 tp->timer.expires = jiffies + tp->timer_offset;
15562 add_timer(&tp->timer);
15563
15564 tg3_netif_start(tp);
15565
15566 tg3_phy_start(tp);
15567
15568 done:
15569 rtnl_unlock();
15570 }
15571
15572 static struct pci_error_handlers tg3_err_handler = {
15573 .error_detected = tg3_io_error_detected,
15574 .slot_reset = tg3_io_slot_reset,
15575 .resume = tg3_io_resume
15576 };
15577
15578 static struct pci_driver tg3_driver = {
15579 .name = DRV_MODULE_NAME,
15580 .id_table = tg3_pci_tbl,
15581 .probe = tg3_init_one,
15582 .remove = __devexit_p(tg3_remove_one),
15583 .err_handler = &tg3_err_handler,
15584 .driver.pm = TG3_PM_OPS,
15585 };
15586
15587 static int __init tg3_init(void)
15588 {
15589 return pci_register_driver(&tg3_driver);
15590 }
15591
15592 static void __exit tg3_cleanup(void)
15593 {
15594 pci_unregister_driver(&tg3_driver);
15595 }
15596
15597 module_init(tg3_init);
15598 module_exit(tg3_cleanup);