2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2011 Broadcom Corporation.
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
48 #include <net/checksum.h>
51 #include <asm/system.h>
53 #include <asm/byteorder.h>
54 #include <linux/uaccess.h>
57 #include <asm/idprom.h>
66 /* Functions & macros to verify TG3_FLAGS types */
68 static inline int _tg3_flag(enum TG3_FLAGS flag
, unsigned long *bits
)
70 return test_bit(flag
, bits
);
73 static inline void _tg3_flag_set(enum TG3_FLAGS flag
, unsigned long *bits
)
78 static inline void _tg3_flag_clear(enum TG3_FLAGS flag
, unsigned long *bits
)
80 clear_bit(flag
, bits
);
83 #define tg3_flag(tp, flag) \
84 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
85 #define tg3_flag_set(tp, flag) \
86 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
87 #define tg3_flag_clear(tp, flag) \
88 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
90 #define DRV_MODULE_NAME "tg3"
92 #define TG3_MIN_NUM 119
93 #define DRV_MODULE_VERSION \
94 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
95 #define DRV_MODULE_RELDATE "May 18, 2011"
97 #define TG3_DEF_MAC_MODE 0
98 #define TG3_DEF_RX_MODE 0
99 #define TG3_DEF_TX_MODE 0
100 #define TG3_DEF_MSG_ENABLE \
110 /* length of time before we decide the hardware is borked,
111 * and dev->tx_timeout() should be called to fix the problem
114 #define TG3_TX_TIMEOUT (5 * HZ)
116 /* hardware minimum and maximum for a single frame's data payload */
117 #define TG3_MIN_MTU 60
118 #define TG3_MAX_MTU(tp) \
119 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
121 /* These numbers seem to be hard coded in the NIC firmware somehow.
122 * You can't change the ring sizes, but you can change where you place
123 * them in the NIC onboard memory.
125 #define TG3_RX_STD_RING_SIZE(tp) \
126 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
127 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
128 #define TG3_DEF_RX_RING_PENDING 200
129 #define TG3_RX_JMB_RING_SIZE(tp) \
130 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
131 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
132 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
133 #define TG3_RSS_INDIR_TBL_SIZE 128
135 /* Do not place this n-ring entries value into the tp struct itself,
136 * we really want to expose these constants to GCC so that modulo et
137 * al. operations are done with shifts and masks instead of with
138 * hw multiply/modulo instructions. Another solution would be to
139 * replace things like '% foo' with '& (foo - 1)'.
142 #define TG3_TX_RING_SIZE 512
143 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
145 #define TG3_RX_STD_RING_BYTES(tp) \
146 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
147 #define TG3_RX_JMB_RING_BYTES(tp) \
148 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
149 #define TG3_RX_RCB_RING_BYTES(tp) \
150 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
151 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
153 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
155 #define TG3_DMA_BYTE_ENAB 64
157 #define TG3_RX_STD_DMA_SZ 1536
158 #define TG3_RX_JMB_DMA_SZ 9046
160 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
162 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
163 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
165 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
166 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
168 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
169 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
171 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
172 * that are at least dword aligned when used in PCIX mode. The driver
173 * works around this bug by double copying the packet. This workaround
174 * is built into the normal double copy length check for efficiency.
176 * However, the double copy is only necessary on those architectures
177 * where unaligned memory accesses are inefficient. For those architectures
178 * where unaligned memory accesses incur little penalty, we can reintegrate
179 * the 5701 in the normal rx path. Doing so saves a device structure
180 * dereference by hardcoding the double copy threshold in place.
182 #define TG3_RX_COPY_THRESHOLD 256
183 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
184 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
186 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
189 /* minimum number of free TX descriptors required to wake up TX process */
190 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
192 #define TG3_RAW_IP_ALIGN 2
194 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
196 #define FIRMWARE_TG3 "tigon/tg3.bin"
197 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
198 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
200 static char version
[] __devinitdata
=
201 DRV_MODULE_NAME
".c:v" DRV_MODULE_VERSION
" (" DRV_MODULE_RELDATE
")";
203 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
204 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
205 MODULE_LICENSE("GPL");
206 MODULE_VERSION(DRV_MODULE_VERSION
);
207 MODULE_FIRMWARE(FIRMWARE_TG3
);
208 MODULE_FIRMWARE(FIRMWARE_TG3TSO
);
209 MODULE_FIRMWARE(FIRMWARE_TG3TSO5
);
211 static int tg3_debug
= -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
212 module_param(tg3_debug
, int, 0);
213 MODULE_PARM_DESC(tg3_debug
, "Tigon3 bitmapped debugging message enable value");
215 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl
) = {
216 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5700
)},
217 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5701
)},
218 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702
)},
219 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5703
)},
220 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5704
)},
221 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702FE
)},
222 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705
)},
223 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705_2
)},
224 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705M
)},
225 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705M_2
)},
226 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702X
)},
227 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5703X
)},
228 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5704S
)},
229 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702A3
)},
230 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5703A3
)},
231 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5782
)},
232 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5788
)},
233 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5789
)},
234 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5901
)},
235 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5901_2
)},
236 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5704S_2
)},
237 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705F
)},
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5721
)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5722
)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5751
)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5751M
)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5751F
)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5752
)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5752M
)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5753
)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5753M
)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5753F
)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5754
)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5754M
)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5755
)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5755M
)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5756
)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5786
)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5787
)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5787M
)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5787F
)},
257 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5714
)},
258 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5714S
)},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5715
)},
260 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5715S
)},
261 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5780
)},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5780S
)},
263 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5781
)},
264 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5906
)},
265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5906M
)},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5784
)},
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5764
)},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5723
)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5761
)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5761E
)},
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5761S
)},
272 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5761SE
)},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5785_G
)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5785_F
)},
275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57780
)},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57760
)},
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57790
)},
278 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57788
)},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5717
)},
280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5718
)},
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57781
)},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57785
)},
283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57761
)},
284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57765
)},
285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57791
)},
286 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57795
)},
287 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5719
)},
288 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5720
)},
289 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT
, PCI_DEVICE_ID_SYSKONNECT_9DXX
)},
290 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT
, PCI_DEVICE_ID_SYSKONNECT_9MXX
)},
291 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC1000
)},
292 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC1001
)},
293 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC1003
)},
294 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC9100
)},
295 {PCI_DEVICE(PCI_VENDOR_ID_APPLE
, PCI_DEVICE_ID_APPLE_TIGON3
)},
296 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
300 MODULE_DEVICE_TABLE(pci
, tg3_pci_tbl
);
302 static const struct {
303 const char string
[ETH_GSTRING_LEN
];
304 } ethtool_stats_keys
[] = {
307 { "rx_ucast_packets" },
308 { "rx_mcast_packets" },
309 { "rx_bcast_packets" },
311 { "rx_align_errors" },
312 { "rx_xon_pause_rcvd" },
313 { "rx_xoff_pause_rcvd" },
314 { "rx_mac_ctrl_rcvd" },
315 { "rx_xoff_entered" },
316 { "rx_frame_too_long_errors" },
318 { "rx_undersize_packets" },
319 { "rx_in_length_errors" },
320 { "rx_out_length_errors" },
321 { "rx_64_or_less_octet_packets" },
322 { "rx_65_to_127_octet_packets" },
323 { "rx_128_to_255_octet_packets" },
324 { "rx_256_to_511_octet_packets" },
325 { "rx_512_to_1023_octet_packets" },
326 { "rx_1024_to_1522_octet_packets" },
327 { "rx_1523_to_2047_octet_packets" },
328 { "rx_2048_to_4095_octet_packets" },
329 { "rx_4096_to_8191_octet_packets" },
330 { "rx_8192_to_9022_octet_packets" },
337 { "tx_flow_control" },
339 { "tx_single_collisions" },
340 { "tx_mult_collisions" },
342 { "tx_excessive_collisions" },
343 { "tx_late_collisions" },
344 { "tx_collide_2times" },
345 { "tx_collide_3times" },
346 { "tx_collide_4times" },
347 { "tx_collide_5times" },
348 { "tx_collide_6times" },
349 { "tx_collide_7times" },
350 { "tx_collide_8times" },
351 { "tx_collide_9times" },
352 { "tx_collide_10times" },
353 { "tx_collide_11times" },
354 { "tx_collide_12times" },
355 { "tx_collide_13times" },
356 { "tx_collide_14times" },
357 { "tx_collide_15times" },
358 { "tx_ucast_packets" },
359 { "tx_mcast_packets" },
360 { "tx_bcast_packets" },
361 { "tx_carrier_sense_errors" },
365 { "dma_writeq_full" },
366 { "dma_write_prioq_full" },
370 { "rx_threshold_hit" },
372 { "dma_readq_full" },
373 { "dma_read_prioq_full" },
374 { "tx_comp_queue_full" },
376 { "ring_set_send_prod_index" },
377 { "ring_status_update" },
379 { "nic_avoided_irqs" },
380 { "nic_tx_threshold_hit" },
382 { "mbuf_lwm_thresh_hit" },
385 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
388 static const struct {
389 const char string
[ETH_GSTRING_LEN
];
390 } ethtool_test_keys
[] = {
391 { "nvram test (online) " },
392 { "link test (online) " },
393 { "register test (offline)" },
394 { "memory test (offline)" },
395 { "loopback test (offline)" },
396 { "interrupt test (offline)" },
399 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
402 static void tg3_write32(struct tg3
*tp
, u32 off
, u32 val
)
404 writel(val
, tp
->regs
+ off
);
407 static u32
tg3_read32(struct tg3
*tp
, u32 off
)
409 return readl(tp
->regs
+ off
);
412 static void tg3_ape_write32(struct tg3
*tp
, u32 off
, u32 val
)
414 writel(val
, tp
->aperegs
+ off
);
417 static u32
tg3_ape_read32(struct tg3
*tp
, u32 off
)
419 return readl(tp
->aperegs
+ off
);
422 static void tg3_write_indirect_reg32(struct tg3
*tp
, u32 off
, u32 val
)
426 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
427 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
);
428 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, val
);
429 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
432 static void tg3_write_flush_reg32(struct tg3
*tp
, u32 off
, u32 val
)
434 writel(val
, tp
->regs
+ off
);
435 readl(tp
->regs
+ off
);
438 static u32
tg3_read_indirect_reg32(struct tg3
*tp
, u32 off
)
443 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
444 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
);
445 pci_read_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, &val
);
446 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
450 static void tg3_write_indirect_mbox(struct tg3
*tp
, u32 off
, u32 val
)
454 if (off
== (MAILBOX_RCVRET_CON_IDX_0
+ TG3_64BIT_REG_LOW
)) {
455 pci_write_config_dword(tp
->pdev
, TG3PCI_RCV_RET_RING_CON_IDX
+
456 TG3_64BIT_REG_LOW
, val
);
459 if (off
== TG3_RX_STD_PROD_IDX_REG
) {
460 pci_write_config_dword(tp
->pdev
, TG3PCI_STD_RING_PROD_IDX
+
461 TG3_64BIT_REG_LOW
, val
);
465 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
466 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
+ 0x5600);
467 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, val
);
468 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
470 /* In indirect mode when disabling interrupts, we also need
471 * to clear the interrupt bit in the GRC local ctrl register.
473 if ((off
== (MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
)) &&
475 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_LOCAL_CTRL
,
476 tp
->grc_local_ctrl
|GRC_LCLCTRL_CLEARINT
);
480 static u32
tg3_read_indirect_mbox(struct tg3
*tp
, u32 off
)
485 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
486 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
+ 0x5600);
487 pci_read_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, &val
);
488 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
492 /* usec_wait specifies the wait time in usec when writing to certain registers
493 * where it is unsafe to read back the register without some delay.
494 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
495 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
497 static void _tw32_flush(struct tg3
*tp
, u32 off
, u32 val
, u32 usec_wait
)
499 if (tg3_flag(tp
, PCIX_TARGET_HWBUG
) || tg3_flag(tp
, ICH_WORKAROUND
))
500 /* Non-posted methods */
501 tp
->write32(tp
, off
, val
);
504 tg3_write32(tp
, off
, val
);
509 /* Wait again after the read for the posted method to guarantee that
510 * the wait time is met.
516 static inline void tw32_mailbox_flush(struct tg3
*tp
, u32 off
, u32 val
)
518 tp
->write32_mbox(tp
, off
, val
);
519 if (!tg3_flag(tp
, MBOX_WRITE_REORDER
) && !tg3_flag(tp
, ICH_WORKAROUND
))
520 tp
->read32_mbox(tp
, off
);
523 static void tg3_write32_tx_mbox(struct tg3
*tp
, u32 off
, u32 val
)
525 void __iomem
*mbox
= tp
->regs
+ off
;
527 if (tg3_flag(tp
, TXD_MBOX_HWBUG
))
529 if (tg3_flag(tp
, MBOX_WRITE_REORDER
))
533 static u32
tg3_read32_mbox_5906(struct tg3
*tp
, u32 off
)
535 return readl(tp
->regs
+ off
+ GRCMBOX_BASE
);
538 static void tg3_write32_mbox_5906(struct tg3
*tp
, u32 off
, u32 val
)
540 writel(val
, tp
->regs
+ off
+ GRCMBOX_BASE
);
543 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
544 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
545 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
546 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
547 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
549 #define tw32(reg, val) tp->write32(tp, reg, val)
550 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
551 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
552 #define tr32(reg) tp->read32(tp, reg)
554 static void tg3_write_mem(struct tg3
*tp
, u32 off
, u32 val
)
558 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
&&
559 (off
>= NIC_SRAM_STATS_BLK
) && (off
< NIC_SRAM_TX_BUFFER_DESC
))
562 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
563 if (tg3_flag(tp
, SRAM_USE_CONFIG
)) {
564 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, off
);
565 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_DATA
, val
);
567 /* Always leave this as zero. */
568 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, 0);
570 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, off
);
571 tw32_f(TG3PCI_MEM_WIN_DATA
, val
);
573 /* Always leave this as zero. */
574 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
576 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
579 static void tg3_read_mem(struct tg3
*tp
, u32 off
, u32
*val
)
583 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
&&
584 (off
>= NIC_SRAM_STATS_BLK
) && (off
< NIC_SRAM_TX_BUFFER_DESC
)) {
589 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
590 if (tg3_flag(tp
, SRAM_USE_CONFIG
)) {
591 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, off
);
592 pci_read_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_DATA
, val
);
594 /* Always leave this as zero. */
595 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, 0);
597 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, off
);
598 *val
= tr32(TG3PCI_MEM_WIN_DATA
);
600 /* Always leave this as zero. */
601 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
603 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
606 static void tg3_ape_lock_init(struct tg3
*tp
)
611 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
)
612 regbase
= TG3_APE_LOCK_GRANT
;
614 regbase
= TG3_APE_PER_LOCK_GRANT
;
616 /* Make sure the driver hasn't any stale locks. */
617 for (i
= 0; i
< 8; i
++)
618 tg3_ape_write32(tp
, regbase
+ 4 * i
, APE_LOCK_GRANT_DRIVER
);
621 static int tg3_ape_lock(struct tg3
*tp
, int locknum
)
625 u32 status
, req
, gnt
;
627 if (!tg3_flag(tp
, ENABLE_APE
))
631 case TG3_APE_LOCK_GRC
:
632 case TG3_APE_LOCK_MEM
:
638 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
) {
639 req
= TG3_APE_LOCK_REQ
;
640 gnt
= TG3_APE_LOCK_GRANT
;
642 req
= TG3_APE_PER_LOCK_REQ
;
643 gnt
= TG3_APE_PER_LOCK_GRANT
;
648 tg3_ape_write32(tp
, req
+ off
, APE_LOCK_REQ_DRIVER
);
650 /* Wait for up to 1 millisecond to acquire lock. */
651 for (i
= 0; i
< 100; i
++) {
652 status
= tg3_ape_read32(tp
, gnt
+ off
);
653 if (status
== APE_LOCK_GRANT_DRIVER
)
658 if (status
!= APE_LOCK_GRANT_DRIVER
) {
659 /* Revoke the lock request. */
660 tg3_ape_write32(tp
, gnt
+ off
,
661 APE_LOCK_GRANT_DRIVER
);
669 static void tg3_ape_unlock(struct tg3
*tp
, int locknum
)
673 if (!tg3_flag(tp
, ENABLE_APE
))
677 case TG3_APE_LOCK_GRC
:
678 case TG3_APE_LOCK_MEM
:
684 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
)
685 gnt
= TG3_APE_LOCK_GRANT
;
687 gnt
= TG3_APE_PER_LOCK_GRANT
;
689 tg3_ape_write32(tp
, gnt
+ 4 * locknum
, APE_LOCK_GRANT_DRIVER
);
692 static void tg3_disable_ints(struct tg3
*tp
)
696 tw32(TG3PCI_MISC_HOST_CTRL
,
697 (tp
->misc_host_ctrl
| MISC_HOST_CTRL_MASK_PCI_INT
));
698 for (i
= 0; i
< tp
->irq_max
; i
++)
699 tw32_mailbox_f(tp
->napi
[i
].int_mbox
, 0x00000001);
702 static void tg3_enable_ints(struct tg3
*tp
)
709 tw32(TG3PCI_MISC_HOST_CTRL
,
710 (tp
->misc_host_ctrl
& ~MISC_HOST_CTRL_MASK_PCI_INT
));
712 tp
->coal_now
= tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
;
713 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
714 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
716 tw32_mailbox_f(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
717 if (tg3_flag(tp
, 1SHOT_MSI
))
718 tw32_mailbox_f(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
720 tp
->coal_now
|= tnapi
->coal_now
;
723 /* Force an initial interrupt */
724 if (!tg3_flag(tp
, TAGGED_STATUS
) &&
725 (tp
->napi
[0].hw_status
->status
& SD_STATUS_UPDATED
))
726 tw32(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
| GRC_LCLCTRL_SETINT
);
728 tw32(HOSTCC_MODE
, tp
->coal_now
);
730 tp
->coal_now
&= ~(tp
->napi
[0].coal_now
| tp
->napi
[1].coal_now
);
733 static inline unsigned int tg3_has_work(struct tg3_napi
*tnapi
)
735 struct tg3
*tp
= tnapi
->tp
;
736 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
737 unsigned int work_exists
= 0;
739 /* check for phy events */
740 if (!(tg3_flag(tp
, USE_LINKCHG_REG
) || tg3_flag(tp
, POLL_SERDES
))) {
741 if (sblk
->status
& SD_STATUS_LINK_CHG
)
744 /* check for RX/TX work to do */
745 if (sblk
->idx
[0].tx_consumer
!= tnapi
->tx_cons
||
746 *(tnapi
->rx_rcb_prod_idx
) != tnapi
->rx_rcb_ptr
)
753 * similar to tg3_enable_ints, but it accurately determines whether there
754 * is new work pending and can return without flushing the PIO write
755 * which reenables interrupts
757 static void tg3_int_reenable(struct tg3_napi
*tnapi
)
759 struct tg3
*tp
= tnapi
->tp
;
761 tw32_mailbox(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
764 /* When doing tagged status, this work check is unnecessary.
765 * The last_tag we write above tells the chip which piece of
766 * work we've completed.
768 if (!tg3_flag(tp
, TAGGED_STATUS
) && tg3_has_work(tnapi
))
769 tw32(HOSTCC_MODE
, tp
->coalesce_mode
|
770 HOSTCC_MODE_ENABLE
| tnapi
->coal_now
);
773 static void tg3_switch_clocks(struct tg3
*tp
)
778 if (tg3_flag(tp
, CPMU_PRESENT
) || tg3_flag(tp
, 5780_CLASS
))
781 clock_ctrl
= tr32(TG3PCI_CLOCK_CTRL
);
783 orig_clock_ctrl
= clock_ctrl
;
784 clock_ctrl
&= (CLOCK_CTRL_FORCE_CLKRUN
|
785 CLOCK_CTRL_CLKRUN_OENABLE
|
787 tp
->pci_clock_ctrl
= clock_ctrl
;
789 if (tg3_flag(tp
, 5705_PLUS
)) {
790 if (orig_clock_ctrl
& CLOCK_CTRL_625_CORE
) {
791 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
792 clock_ctrl
| CLOCK_CTRL_625_CORE
, 40);
794 } else if ((orig_clock_ctrl
& CLOCK_CTRL_44MHZ_CORE
) != 0) {
795 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
797 (CLOCK_CTRL_44MHZ_CORE
| CLOCK_CTRL_ALTCLK
),
799 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
800 clock_ctrl
| (CLOCK_CTRL_ALTCLK
),
803 tw32_wait_f(TG3PCI_CLOCK_CTRL
, clock_ctrl
, 40);
806 #define PHY_BUSY_LOOPS 5000
808 static int tg3_readphy(struct tg3
*tp
, int reg
, u32
*val
)
814 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
816 (tp
->mi_mode
& ~MAC_MI_MODE_AUTO_POLL
));
822 frame_val
= ((tp
->phy_addr
<< MI_COM_PHY_ADDR_SHIFT
) &
823 MI_COM_PHY_ADDR_MASK
);
824 frame_val
|= ((reg
<< MI_COM_REG_ADDR_SHIFT
) &
825 MI_COM_REG_ADDR_MASK
);
826 frame_val
|= (MI_COM_CMD_READ
| MI_COM_START
);
828 tw32_f(MAC_MI_COM
, frame_val
);
830 loops
= PHY_BUSY_LOOPS
;
833 frame_val
= tr32(MAC_MI_COM
);
835 if ((frame_val
& MI_COM_BUSY
) == 0) {
837 frame_val
= tr32(MAC_MI_COM
);
845 *val
= frame_val
& MI_COM_DATA_MASK
;
849 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
850 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
857 static int tg3_writephy(struct tg3
*tp
, int reg
, u32 val
)
863 if ((tp
->phy_flags
& TG3_PHYFLG_IS_FET
) &&
864 (reg
== MII_TG3_CTRL
|| reg
== MII_TG3_AUX_CTRL
))
867 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
869 (tp
->mi_mode
& ~MAC_MI_MODE_AUTO_POLL
));
873 frame_val
= ((tp
->phy_addr
<< MI_COM_PHY_ADDR_SHIFT
) &
874 MI_COM_PHY_ADDR_MASK
);
875 frame_val
|= ((reg
<< MI_COM_REG_ADDR_SHIFT
) &
876 MI_COM_REG_ADDR_MASK
);
877 frame_val
|= (val
& MI_COM_DATA_MASK
);
878 frame_val
|= (MI_COM_CMD_WRITE
| MI_COM_START
);
880 tw32_f(MAC_MI_COM
, frame_val
);
882 loops
= PHY_BUSY_LOOPS
;
885 frame_val
= tr32(MAC_MI_COM
);
886 if ((frame_val
& MI_COM_BUSY
) == 0) {
888 frame_val
= tr32(MAC_MI_COM
);
898 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
899 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
906 static int tg3_phy_cl45_write(struct tg3
*tp
, u32 devad
, u32 addr
, u32 val
)
910 err
= tg3_writephy(tp
, MII_TG3_MMD_CTRL
, devad
);
914 err
= tg3_writephy(tp
, MII_TG3_MMD_ADDRESS
, addr
);
918 err
= tg3_writephy(tp
, MII_TG3_MMD_CTRL
,
919 MII_TG3_MMD_CTRL_DATA_NOINC
| devad
);
923 err
= tg3_writephy(tp
, MII_TG3_MMD_ADDRESS
, val
);
929 static int tg3_phy_cl45_read(struct tg3
*tp
, u32 devad
, u32 addr
, u32
*val
)
933 err
= tg3_writephy(tp
, MII_TG3_MMD_CTRL
, devad
);
937 err
= tg3_writephy(tp
, MII_TG3_MMD_ADDRESS
, addr
);
941 err
= tg3_writephy(tp
, MII_TG3_MMD_CTRL
,
942 MII_TG3_MMD_CTRL_DATA_NOINC
| devad
);
946 err
= tg3_readphy(tp
, MII_TG3_MMD_ADDRESS
, val
);
952 static int tg3_phydsp_read(struct tg3
*tp
, u32 reg
, u32
*val
)
956 err
= tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, reg
);
958 err
= tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, val
);
963 static int tg3_phydsp_write(struct tg3
*tp
, u32 reg
, u32 val
)
967 err
= tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, reg
);
969 err
= tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, val
);
974 static int tg3_phy_auxctl_read(struct tg3
*tp
, int reg
, u32
*val
)
978 err
= tg3_writephy(tp
, MII_TG3_AUX_CTRL
,
979 (reg
<< MII_TG3_AUXCTL_MISC_RDSEL_SHIFT
) |
980 MII_TG3_AUXCTL_SHDWSEL_MISC
);
982 err
= tg3_readphy(tp
, MII_TG3_AUX_CTRL
, val
);
987 static int tg3_phy_auxctl_write(struct tg3
*tp
, int reg
, u32 set
)
989 if (reg
== MII_TG3_AUXCTL_SHDWSEL_MISC
)
990 set
|= MII_TG3_AUXCTL_MISC_WREN
;
992 return tg3_writephy(tp
, MII_TG3_AUX_CTRL
, set
| reg
);
995 #define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
996 tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
997 MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
998 MII_TG3_AUXCTL_ACTL_TX_6DB)
1000 #define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
1001 tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1002 MII_TG3_AUXCTL_ACTL_TX_6DB);
1004 static int tg3_bmcr_reset(struct tg3
*tp
)
1009 /* OK, reset it, and poll the BMCR_RESET bit until it
1010 * clears or we time out.
1012 phy_control
= BMCR_RESET
;
1013 err
= tg3_writephy(tp
, MII_BMCR
, phy_control
);
1019 err
= tg3_readphy(tp
, MII_BMCR
, &phy_control
);
1023 if ((phy_control
& BMCR_RESET
) == 0) {
1035 static int tg3_mdio_read(struct mii_bus
*bp
, int mii_id
, int reg
)
1037 struct tg3
*tp
= bp
->priv
;
1040 spin_lock_bh(&tp
->lock
);
1042 if (tg3_readphy(tp
, reg
, &val
))
1045 spin_unlock_bh(&tp
->lock
);
1050 static int tg3_mdio_write(struct mii_bus
*bp
, int mii_id
, int reg
, u16 val
)
1052 struct tg3
*tp
= bp
->priv
;
1055 spin_lock_bh(&tp
->lock
);
1057 if (tg3_writephy(tp
, reg
, val
))
1060 spin_unlock_bh(&tp
->lock
);
1065 static int tg3_mdio_reset(struct mii_bus
*bp
)
1070 static void tg3_mdio_config_5785(struct tg3
*tp
)
1073 struct phy_device
*phydev
;
1075 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
1076 switch (phydev
->drv
->phy_id
& phydev
->drv
->phy_id_mask
) {
1077 case PHY_ID_BCM50610
:
1078 case PHY_ID_BCM50610M
:
1079 val
= MAC_PHYCFG2_50610_LED_MODES
;
1081 case PHY_ID_BCMAC131
:
1082 val
= MAC_PHYCFG2_AC131_LED_MODES
;
1084 case PHY_ID_RTL8211C
:
1085 val
= MAC_PHYCFG2_RTL8211C_LED_MODES
;
1087 case PHY_ID_RTL8201E
:
1088 val
= MAC_PHYCFG2_RTL8201E_LED_MODES
;
1094 if (phydev
->interface
!= PHY_INTERFACE_MODE_RGMII
) {
1095 tw32(MAC_PHYCFG2
, val
);
1097 val
= tr32(MAC_PHYCFG1
);
1098 val
&= ~(MAC_PHYCFG1_RGMII_INT
|
1099 MAC_PHYCFG1_RXCLK_TO_MASK
| MAC_PHYCFG1_TXCLK_TO_MASK
);
1100 val
|= MAC_PHYCFG1_RXCLK_TIMEOUT
| MAC_PHYCFG1_TXCLK_TIMEOUT
;
1101 tw32(MAC_PHYCFG1
, val
);
1106 if (!tg3_flag(tp
, RGMII_INBAND_DISABLE
))
1107 val
|= MAC_PHYCFG2_EMODE_MASK_MASK
|
1108 MAC_PHYCFG2_FMODE_MASK_MASK
|
1109 MAC_PHYCFG2_GMODE_MASK_MASK
|
1110 MAC_PHYCFG2_ACT_MASK_MASK
|
1111 MAC_PHYCFG2_QUAL_MASK_MASK
|
1112 MAC_PHYCFG2_INBAND_ENABLE
;
1114 tw32(MAC_PHYCFG2
, val
);
1116 val
= tr32(MAC_PHYCFG1
);
1117 val
&= ~(MAC_PHYCFG1_RXCLK_TO_MASK
| MAC_PHYCFG1_TXCLK_TO_MASK
|
1118 MAC_PHYCFG1_RGMII_EXT_RX_DEC
| MAC_PHYCFG1_RGMII_SND_STAT_EN
);
1119 if (!tg3_flag(tp
, RGMII_INBAND_DISABLE
)) {
1120 if (tg3_flag(tp
, RGMII_EXT_IBND_RX_EN
))
1121 val
|= MAC_PHYCFG1_RGMII_EXT_RX_DEC
;
1122 if (tg3_flag(tp
, RGMII_EXT_IBND_TX_EN
))
1123 val
|= MAC_PHYCFG1_RGMII_SND_STAT_EN
;
1125 val
|= MAC_PHYCFG1_RXCLK_TIMEOUT
| MAC_PHYCFG1_TXCLK_TIMEOUT
|
1126 MAC_PHYCFG1_RGMII_INT
| MAC_PHYCFG1_TXC_DRV
;
1127 tw32(MAC_PHYCFG1
, val
);
1129 val
= tr32(MAC_EXT_RGMII_MODE
);
1130 val
&= ~(MAC_RGMII_MODE_RX_INT_B
|
1131 MAC_RGMII_MODE_RX_QUALITY
|
1132 MAC_RGMII_MODE_RX_ACTIVITY
|
1133 MAC_RGMII_MODE_RX_ENG_DET
|
1134 MAC_RGMII_MODE_TX_ENABLE
|
1135 MAC_RGMII_MODE_TX_LOWPWR
|
1136 MAC_RGMII_MODE_TX_RESET
);
1137 if (!tg3_flag(tp
, RGMII_INBAND_DISABLE
)) {
1138 if (tg3_flag(tp
, RGMII_EXT_IBND_RX_EN
))
1139 val
|= MAC_RGMII_MODE_RX_INT_B
|
1140 MAC_RGMII_MODE_RX_QUALITY
|
1141 MAC_RGMII_MODE_RX_ACTIVITY
|
1142 MAC_RGMII_MODE_RX_ENG_DET
;
1143 if (tg3_flag(tp
, RGMII_EXT_IBND_TX_EN
))
1144 val
|= MAC_RGMII_MODE_TX_ENABLE
|
1145 MAC_RGMII_MODE_TX_LOWPWR
|
1146 MAC_RGMII_MODE_TX_RESET
;
1148 tw32(MAC_EXT_RGMII_MODE
, val
);
1151 static void tg3_mdio_start(struct tg3
*tp
)
1153 tp
->mi_mode
&= ~MAC_MI_MODE_AUTO_POLL
;
1154 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
1157 if (tg3_flag(tp
, MDIOBUS_INITED
) &&
1158 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
)
1159 tg3_mdio_config_5785(tp
);
1162 static int tg3_mdio_init(struct tg3
*tp
)
1166 struct phy_device
*phydev
;
1168 if (tg3_flag(tp
, 5717_PLUS
)) {
1171 tp
->phy_addr
= PCI_FUNC(tp
->pdev
->devfn
) + 1;
1173 if (tp
->pci_chip_rev_id
!= CHIPREV_ID_5717_A0
)
1174 is_serdes
= tr32(SG_DIG_STATUS
) & SG_DIG_IS_SERDES
;
1176 is_serdes
= tr32(TG3_CPMU_PHY_STRAP
) &
1177 TG3_CPMU_PHY_STRAP_IS_SERDES
;
1181 tp
->phy_addr
= TG3_PHY_MII_ADDR
;
1185 if (!tg3_flag(tp
, USE_PHYLIB
) || tg3_flag(tp
, MDIOBUS_INITED
))
1188 tp
->mdio_bus
= mdiobus_alloc();
1189 if (tp
->mdio_bus
== NULL
)
1192 tp
->mdio_bus
->name
= "tg3 mdio bus";
1193 snprintf(tp
->mdio_bus
->id
, MII_BUS_ID_SIZE
, "%x",
1194 (tp
->pdev
->bus
->number
<< 8) | tp
->pdev
->devfn
);
1195 tp
->mdio_bus
->priv
= tp
;
1196 tp
->mdio_bus
->parent
= &tp
->pdev
->dev
;
1197 tp
->mdio_bus
->read
= &tg3_mdio_read
;
1198 tp
->mdio_bus
->write
= &tg3_mdio_write
;
1199 tp
->mdio_bus
->reset
= &tg3_mdio_reset
;
1200 tp
->mdio_bus
->phy_mask
= ~(1 << TG3_PHY_MII_ADDR
);
1201 tp
->mdio_bus
->irq
= &tp
->mdio_irq
[0];
1203 for (i
= 0; i
< PHY_MAX_ADDR
; i
++)
1204 tp
->mdio_bus
->irq
[i
] = PHY_POLL
;
1206 /* The bus registration will look for all the PHYs on the mdio bus.
1207 * Unfortunately, it does not ensure the PHY is powered up before
1208 * accessing the PHY ID registers. A chip reset is the
1209 * quickest way to bring the device back to an operational state..
1211 if (tg3_readphy(tp
, MII_BMCR
, ®
) || (reg
& BMCR_PDOWN
))
1214 i
= mdiobus_register(tp
->mdio_bus
);
1216 dev_warn(&tp
->pdev
->dev
, "mdiobus_reg failed (0x%x)\n", i
);
1217 mdiobus_free(tp
->mdio_bus
);
1221 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
1223 if (!phydev
|| !phydev
->drv
) {
1224 dev_warn(&tp
->pdev
->dev
, "No PHY devices\n");
1225 mdiobus_unregister(tp
->mdio_bus
);
1226 mdiobus_free(tp
->mdio_bus
);
1230 switch (phydev
->drv
->phy_id
& phydev
->drv
->phy_id_mask
) {
1231 case PHY_ID_BCM57780
:
1232 phydev
->interface
= PHY_INTERFACE_MODE_GMII
;
1233 phydev
->dev_flags
|= PHY_BRCM_AUTO_PWRDWN_ENABLE
;
1235 case PHY_ID_BCM50610
:
1236 case PHY_ID_BCM50610M
:
1237 phydev
->dev_flags
|= PHY_BRCM_CLEAR_RGMII_MODE
|
1238 PHY_BRCM_RX_REFCLK_UNUSED
|
1239 PHY_BRCM_DIS_TXCRXC_NOENRGY
|
1240 PHY_BRCM_AUTO_PWRDWN_ENABLE
;
1241 if (tg3_flag(tp
, RGMII_INBAND_DISABLE
))
1242 phydev
->dev_flags
|= PHY_BRCM_STD_IBND_DISABLE
;
1243 if (tg3_flag(tp
, RGMII_EXT_IBND_RX_EN
))
1244 phydev
->dev_flags
|= PHY_BRCM_EXT_IBND_RX_ENABLE
;
1245 if (tg3_flag(tp
, RGMII_EXT_IBND_TX_EN
))
1246 phydev
->dev_flags
|= PHY_BRCM_EXT_IBND_TX_ENABLE
;
1248 case PHY_ID_RTL8211C
:
1249 phydev
->interface
= PHY_INTERFACE_MODE_RGMII
;
1251 case PHY_ID_RTL8201E
:
1252 case PHY_ID_BCMAC131
:
1253 phydev
->interface
= PHY_INTERFACE_MODE_MII
;
1254 phydev
->dev_flags
|= PHY_BRCM_AUTO_PWRDWN_ENABLE
;
1255 tp
->phy_flags
|= TG3_PHYFLG_IS_FET
;
1259 tg3_flag_set(tp
, MDIOBUS_INITED
);
1261 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
)
1262 tg3_mdio_config_5785(tp
);
1267 static void tg3_mdio_fini(struct tg3
*tp
)
1269 if (tg3_flag(tp
, MDIOBUS_INITED
)) {
1270 tg3_flag_clear(tp
, MDIOBUS_INITED
);
1271 mdiobus_unregister(tp
->mdio_bus
);
1272 mdiobus_free(tp
->mdio_bus
);
1276 /* tp->lock is held. */
1277 static inline void tg3_generate_fw_event(struct tg3
*tp
)
1281 val
= tr32(GRC_RX_CPU_EVENT
);
1282 val
|= GRC_RX_CPU_DRIVER_EVENT
;
1283 tw32_f(GRC_RX_CPU_EVENT
, val
);
1285 tp
->last_event_jiffies
= jiffies
;
1288 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1290 /* tp->lock is held. */
1291 static void tg3_wait_for_event_ack(struct tg3
*tp
)
1294 unsigned int delay_cnt
;
1297 /* If enough time has passed, no wait is necessary. */
1298 time_remain
= (long)(tp
->last_event_jiffies
+ 1 +
1299 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC
)) -
1301 if (time_remain
< 0)
1304 /* Check if we can shorten the wait time. */
1305 delay_cnt
= jiffies_to_usecs(time_remain
);
1306 if (delay_cnt
> TG3_FW_EVENT_TIMEOUT_USEC
)
1307 delay_cnt
= TG3_FW_EVENT_TIMEOUT_USEC
;
1308 delay_cnt
= (delay_cnt
>> 3) + 1;
1310 for (i
= 0; i
< delay_cnt
; i
++) {
1311 if (!(tr32(GRC_RX_CPU_EVENT
) & GRC_RX_CPU_DRIVER_EVENT
))
1317 /* tp->lock is held. */
1318 static void tg3_ump_link_report(struct tg3
*tp
)
1323 if (!tg3_flag(tp
, 5780_CLASS
) || !tg3_flag(tp
, ENABLE_ASF
))
1326 tg3_wait_for_event_ack(tp
);
1328 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_MBOX
, FWCMD_NICDRV_LINK_UPDATE
);
1330 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_LEN_MBOX
, 14);
1333 if (!tg3_readphy(tp
, MII_BMCR
, ®
))
1335 if (!tg3_readphy(tp
, MII_BMSR
, ®
))
1336 val
|= (reg
& 0xffff);
1337 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
, val
);
1340 if (!tg3_readphy(tp
, MII_ADVERTISE
, ®
))
1342 if (!tg3_readphy(tp
, MII_LPA
, ®
))
1343 val
|= (reg
& 0xffff);
1344 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
+ 4, val
);
1347 if (!(tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)) {
1348 if (!tg3_readphy(tp
, MII_CTRL1000
, ®
))
1350 if (!tg3_readphy(tp
, MII_STAT1000
, ®
))
1351 val
|= (reg
& 0xffff);
1353 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
+ 8, val
);
1355 if (!tg3_readphy(tp
, MII_PHYADDR
, ®
))
1359 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
+ 12, val
);
1361 tg3_generate_fw_event(tp
);
1364 static void tg3_link_report(struct tg3
*tp
)
1366 if (!netif_carrier_ok(tp
->dev
)) {
1367 netif_info(tp
, link
, tp
->dev
, "Link is down\n");
1368 tg3_ump_link_report(tp
);
1369 } else if (netif_msg_link(tp
)) {
1370 netdev_info(tp
->dev
, "Link is up at %d Mbps, %s duplex\n",
1371 (tp
->link_config
.active_speed
== SPEED_1000
?
1373 (tp
->link_config
.active_speed
== SPEED_100
?
1375 (tp
->link_config
.active_duplex
== DUPLEX_FULL
?
1378 netdev_info(tp
->dev
, "Flow control is %s for TX and %s for RX\n",
1379 (tp
->link_config
.active_flowctrl
& FLOW_CTRL_TX
) ?
1381 (tp
->link_config
.active_flowctrl
& FLOW_CTRL_RX
) ?
1384 if (tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
)
1385 netdev_info(tp
->dev
, "EEE is %s\n",
1386 tp
->setlpicnt
? "enabled" : "disabled");
1388 tg3_ump_link_report(tp
);
1392 static u16
tg3_advert_flowctrl_1000T(u8 flow_ctrl
)
1396 if ((flow_ctrl
& FLOW_CTRL_TX
) && (flow_ctrl
& FLOW_CTRL_RX
))
1397 miireg
= ADVERTISE_PAUSE_CAP
;
1398 else if (flow_ctrl
& FLOW_CTRL_TX
)
1399 miireg
= ADVERTISE_PAUSE_ASYM
;
1400 else if (flow_ctrl
& FLOW_CTRL_RX
)
1401 miireg
= ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
;
1408 static u16
tg3_advert_flowctrl_1000X(u8 flow_ctrl
)
1412 if ((flow_ctrl
& FLOW_CTRL_TX
) && (flow_ctrl
& FLOW_CTRL_RX
))
1413 miireg
= ADVERTISE_1000XPAUSE
;
1414 else if (flow_ctrl
& FLOW_CTRL_TX
)
1415 miireg
= ADVERTISE_1000XPSE_ASYM
;
1416 else if (flow_ctrl
& FLOW_CTRL_RX
)
1417 miireg
= ADVERTISE_1000XPAUSE
| ADVERTISE_1000XPSE_ASYM
;
1424 static u8
tg3_resolve_flowctrl_1000X(u16 lcladv
, u16 rmtadv
)
1428 if (lcladv
& ADVERTISE_1000XPAUSE
) {
1429 if (lcladv
& ADVERTISE_1000XPSE_ASYM
) {
1430 if (rmtadv
& LPA_1000XPAUSE
)
1431 cap
= FLOW_CTRL_TX
| FLOW_CTRL_RX
;
1432 else if (rmtadv
& LPA_1000XPAUSE_ASYM
)
1435 if (rmtadv
& LPA_1000XPAUSE
)
1436 cap
= FLOW_CTRL_TX
| FLOW_CTRL_RX
;
1438 } else if (lcladv
& ADVERTISE_1000XPSE_ASYM
) {
1439 if ((rmtadv
& LPA_1000XPAUSE
) && (rmtadv
& LPA_1000XPAUSE_ASYM
))
1446 static void tg3_setup_flow_control(struct tg3
*tp
, u32 lcladv
, u32 rmtadv
)
1450 u32 old_rx_mode
= tp
->rx_mode
;
1451 u32 old_tx_mode
= tp
->tx_mode
;
1453 if (tg3_flag(tp
, USE_PHYLIB
))
1454 autoneg
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
]->autoneg
;
1456 autoneg
= tp
->link_config
.autoneg
;
1458 if (autoneg
== AUTONEG_ENABLE
&& tg3_flag(tp
, PAUSE_AUTONEG
)) {
1459 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)
1460 flowctrl
= tg3_resolve_flowctrl_1000X(lcladv
, rmtadv
);
1462 flowctrl
= mii_resolve_flowctrl_fdx(lcladv
, rmtadv
);
1464 flowctrl
= tp
->link_config
.flowctrl
;
1466 tp
->link_config
.active_flowctrl
= flowctrl
;
1468 if (flowctrl
& FLOW_CTRL_RX
)
1469 tp
->rx_mode
|= RX_MODE_FLOW_CTRL_ENABLE
;
1471 tp
->rx_mode
&= ~RX_MODE_FLOW_CTRL_ENABLE
;
1473 if (old_rx_mode
!= tp
->rx_mode
)
1474 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
1476 if (flowctrl
& FLOW_CTRL_TX
)
1477 tp
->tx_mode
|= TX_MODE_FLOW_CTRL_ENABLE
;
1479 tp
->tx_mode
&= ~TX_MODE_FLOW_CTRL_ENABLE
;
1481 if (old_tx_mode
!= tp
->tx_mode
)
1482 tw32_f(MAC_TX_MODE
, tp
->tx_mode
);
1485 static void tg3_adjust_link(struct net_device
*dev
)
1487 u8 oldflowctrl
, linkmesg
= 0;
1488 u32 mac_mode
, lcl_adv
, rmt_adv
;
1489 struct tg3
*tp
= netdev_priv(dev
);
1490 struct phy_device
*phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
1492 spin_lock_bh(&tp
->lock
);
1494 mac_mode
= tp
->mac_mode
& ~(MAC_MODE_PORT_MODE_MASK
|
1495 MAC_MODE_HALF_DUPLEX
);
1497 oldflowctrl
= tp
->link_config
.active_flowctrl
;
1503 if (phydev
->speed
== SPEED_100
|| phydev
->speed
== SPEED_10
)
1504 mac_mode
|= MAC_MODE_PORT_MODE_MII
;
1505 else if (phydev
->speed
== SPEED_1000
||
1506 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5785
)
1507 mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
1509 mac_mode
|= MAC_MODE_PORT_MODE_MII
;
1511 if (phydev
->duplex
== DUPLEX_HALF
)
1512 mac_mode
|= MAC_MODE_HALF_DUPLEX
;
1514 lcl_adv
= tg3_advert_flowctrl_1000T(
1515 tp
->link_config
.flowctrl
);
1518 rmt_adv
= LPA_PAUSE_CAP
;
1519 if (phydev
->asym_pause
)
1520 rmt_adv
|= LPA_PAUSE_ASYM
;
1523 tg3_setup_flow_control(tp
, lcl_adv
, rmt_adv
);
1525 mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
1527 if (mac_mode
!= tp
->mac_mode
) {
1528 tp
->mac_mode
= mac_mode
;
1529 tw32_f(MAC_MODE
, tp
->mac_mode
);
1533 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
) {
1534 if (phydev
->speed
== SPEED_10
)
1536 MAC_MI_STAT_10MBPS_MODE
|
1537 MAC_MI_STAT_LNKSTAT_ATTN_ENAB
);
1539 tw32(MAC_MI_STAT
, MAC_MI_STAT_LNKSTAT_ATTN_ENAB
);
1542 if (phydev
->speed
== SPEED_1000
&& phydev
->duplex
== DUPLEX_HALF
)
1543 tw32(MAC_TX_LENGTHS
,
1544 ((2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
1545 (6 << TX_LENGTHS_IPG_SHIFT
) |
1546 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT
)));
1548 tw32(MAC_TX_LENGTHS
,
1549 ((2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
1550 (6 << TX_LENGTHS_IPG_SHIFT
) |
1551 (32 << TX_LENGTHS_SLOT_TIME_SHIFT
)));
1553 if ((phydev
->link
&& tp
->link_config
.active_speed
== SPEED_INVALID
) ||
1554 (!phydev
->link
&& tp
->link_config
.active_speed
!= SPEED_INVALID
) ||
1555 phydev
->speed
!= tp
->link_config
.active_speed
||
1556 phydev
->duplex
!= tp
->link_config
.active_duplex
||
1557 oldflowctrl
!= tp
->link_config
.active_flowctrl
)
1560 tp
->link_config
.active_speed
= phydev
->speed
;
1561 tp
->link_config
.active_duplex
= phydev
->duplex
;
1563 spin_unlock_bh(&tp
->lock
);
1566 tg3_link_report(tp
);
1569 static int tg3_phy_init(struct tg3
*tp
)
1571 struct phy_device
*phydev
;
1573 if (tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
)
1576 /* Bring the PHY back to a known state. */
1579 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
1581 /* Attach the MAC to the PHY. */
1582 phydev
= phy_connect(tp
->dev
, dev_name(&phydev
->dev
), tg3_adjust_link
,
1583 phydev
->dev_flags
, phydev
->interface
);
1584 if (IS_ERR(phydev
)) {
1585 dev_err(&tp
->pdev
->dev
, "Could not attach to PHY\n");
1586 return PTR_ERR(phydev
);
1589 /* Mask with MAC supported features. */
1590 switch (phydev
->interface
) {
1591 case PHY_INTERFACE_MODE_GMII
:
1592 case PHY_INTERFACE_MODE_RGMII
:
1593 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
1594 phydev
->supported
&= (PHY_GBIT_FEATURES
|
1596 SUPPORTED_Asym_Pause
);
1600 case PHY_INTERFACE_MODE_MII
:
1601 phydev
->supported
&= (PHY_BASIC_FEATURES
|
1603 SUPPORTED_Asym_Pause
);
1606 phy_disconnect(tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
]);
1610 tp
->phy_flags
|= TG3_PHYFLG_IS_CONNECTED
;
1612 phydev
->advertising
= phydev
->supported
;
1617 static void tg3_phy_start(struct tg3
*tp
)
1619 struct phy_device
*phydev
;
1621 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
1624 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
1626 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
) {
1627 tp
->phy_flags
&= ~TG3_PHYFLG_IS_LOW_POWER
;
1628 phydev
->speed
= tp
->link_config
.orig_speed
;
1629 phydev
->duplex
= tp
->link_config
.orig_duplex
;
1630 phydev
->autoneg
= tp
->link_config
.orig_autoneg
;
1631 phydev
->advertising
= tp
->link_config
.orig_advertising
;
1636 phy_start_aneg(phydev
);
1639 static void tg3_phy_stop(struct tg3
*tp
)
1641 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
1644 phy_stop(tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
]);
1647 static void tg3_phy_fini(struct tg3
*tp
)
1649 if (tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
) {
1650 phy_disconnect(tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
]);
1651 tp
->phy_flags
&= ~TG3_PHYFLG_IS_CONNECTED
;
1655 static void tg3_phy_fet_toggle_apd(struct tg3
*tp
, bool enable
)
1659 if (!tg3_readphy(tp
, MII_TG3_FET_TEST
, &phytest
)) {
1662 tg3_writephy(tp
, MII_TG3_FET_TEST
,
1663 phytest
| MII_TG3_FET_SHADOW_EN
);
1664 if (!tg3_readphy(tp
, MII_TG3_FET_SHDW_AUXSTAT2
, &phy
)) {
1666 phy
|= MII_TG3_FET_SHDW_AUXSTAT2_APD
;
1668 phy
&= ~MII_TG3_FET_SHDW_AUXSTAT2_APD
;
1669 tg3_writephy(tp
, MII_TG3_FET_SHDW_AUXSTAT2
, phy
);
1671 tg3_writephy(tp
, MII_TG3_FET_TEST
, phytest
);
1675 static void tg3_phy_toggle_apd(struct tg3
*tp
, bool enable
)
1679 if (!tg3_flag(tp
, 5705_PLUS
) ||
1680 (tg3_flag(tp
, 5717_PLUS
) &&
1681 (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)))
1684 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
1685 tg3_phy_fet_toggle_apd(tp
, enable
);
1689 reg
= MII_TG3_MISC_SHDW_WREN
|
1690 MII_TG3_MISC_SHDW_SCR5_SEL
|
1691 MII_TG3_MISC_SHDW_SCR5_LPED
|
1692 MII_TG3_MISC_SHDW_SCR5_DLPTLM
|
1693 MII_TG3_MISC_SHDW_SCR5_SDTL
|
1694 MII_TG3_MISC_SHDW_SCR5_C125OE
;
1695 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5784
|| !enable
)
1696 reg
|= MII_TG3_MISC_SHDW_SCR5_DLLAPD
;
1698 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, reg
);
1701 reg
= MII_TG3_MISC_SHDW_WREN
|
1702 MII_TG3_MISC_SHDW_APD_SEL
|
1703 MII_TG3_MISC_SHDW_APD_WKTM_84MS
;
1705 reg
|= MII_TG3_MISC_SHDW_APD_ENABLE
;
1707 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, reg
);
1710 static void tg3_phy_toggle_automdix(struct tg3
*tp
, int enable
)
1714 if (!tg3_flag(tp
, 5705_PLUS
) ||
1715 (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
1718 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
1721 if (!tg3_readphy(tp
, MII_TG3_FET_TEST
, &ephy
)) {
1722 u32 reg
= MII_TG3_FET_SHDW_MISCCTRL
;
1724 tg3_writephy(tp
, MII_TG3_FET_TEST
,
1725 ephy
| MII_TG3_FET_SHADOW_EN
);
1726 if (!tg3_readphy(tp
, reg
, &phy
)) {
1728 phy
|= MII_TG3_FET_SHDW_MISCCTRL_MDIX
;
1730 phy
&= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX
;
1731 tg3_writephy(tp
, reg
, phy
);
1733 tg3_writephy(tp
, MII_TG3_FET_TEST
, ephy
);
1738 ret
= tg3_phy_auxctl_read(tp
,
1739 MII_TG3_AUXCTL_SHDWSEL_MISC
, &phy
);
1742 phy
|= MII_TG3_AUXCTL_MISC_FORCE_AMDIX
;
1744 phy
&= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX
;
1745 tg3_phy_auxctl_write(tp
,
1746 MII_TG3_AUXCTL_SHDWSEL_MISC
, phy
);
1751 static void tg3_phy_set_wirespeed(struct tg3
*tp
)
1756 if (tp
->phy_flags
& TG3_PHYFLG_NO_ETH_WIRE_SPEED
)
1759 ret
= tg3_phy_auxctl_read(tp
, MII_TG3_AUXCTL_SHDWSEL_MISC
, &val
);
1761 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_MISC
,
1762 val
| MII_TG3_AUXCTL_MISC_WIRESPD_EN
);
1765 static void tg3_phy_apply_otp(struct tg3
*tp
)
1774 if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp
))
1777 phy
= ((otp
& TG3_OTP_AGCTGT_MASK
) >> TG3_OTP_AGCTGT_SHIFT
);
1778 phy
|= MII_TG3_DSP_TAP1_AGCTGT_DFLT
;
1779 tg3_phydsp_write(tp
, MII_TG3_DSP_TAP1
, phy
);
1781 phy
= ((otp
& TG3_OTP_HPFFLTR_MASK
) >> TG3_OTP_HPFFLTR_SHIFT
) |
1782 ((otp
& TG3_OTP_HPFOVER_MASK
) >> TG3_OTP_HPFOVER_SHIFT
);
1783 tg3_phydsp_write(tp
, MII_TG3_DSP_AADJ1CH0
, phy
);
1785 phy
= ((otp
& TG3_OTP_LPFDIS_MASK
) >> TG3_OTP_LPFDIS_SHIFT
);
1786 phy
|= MII_TG3_DSP_AADJ1CH3_ADCCKADJ
;
1787 tg3_phydsp_write(tp
, MII_TG3_DSP_AADJ1CH3
, phy
);
1789 phy
= ((otp
& TG3_OTP_VDAC_MASK
) >> TG3_OTP_VDAC_SHIFT
);
1790 tg3_phydsp_write(tp
, MII_TG3_DSP_EXP75
, phy
);
1792 phy
= ((otp
& TG3_OTP_10BTAMP_MASK
) >> TG3_OTP_10BTAMP_SHIFT
);
1793 tg3_phydsp_write(tp
, MII_TG3_DSP_EXP96
, phy
);
1795 phy
= ((otp
& TG3_OTP_ROFF_MASK
) >> TG3_OTP_ROFF_SHIFT
) |
1796 ((otp
& TG3_OTP_RCOFF_MASK
) >> TG3_OTP_RCOFF_SHIFT
);
1797 tg3_phydsp_write(tp
, MII_TG3_DSP_EXP97
, phy
);
1799 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp
);
1802 static void tg3_phy_eee_adjust(struct tg3
*tp
, u32 current_link_up
)
1806 if (!(tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
))
1811 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
&&
1812 current_link_up
== 1 &&
1813 tp
->link_config
.active_duplex
== DUPLEX_FULL
&&
1814 (tp
->link_config
.active_speed
== SPEED_100
||
1815 tp
->link_config
.active_speed
== SPEED_1000
)) {
1818 if (tp
->link_config
.active_speed
== SPEED_1000
)
1819 eeectl
= TG3_CPMU_EEE_CTRL_EXIT_16_5_US
;
1821 eeectl
= TG3_CPMU_EEE_CTRL_EXIT_36_US
;
1823 tw32(TG3_CPMU_EEE_CTRL
, eeectl
);
1825 tg3_phy_cl45_read(tp
, MDIO_MMD_AN
,
1826 TG3_CL45_D7_EEERES_STAT
, &val
);
1828 if (val
== TG3_CL45_D7_EEERES_STAT_LP_1000T
||
1829 val
== TG3_CL45_D7_EEERES_STAT_LP_100TX
)
1833 if (!tp
->setlpicnt
) {
1834 val
= tr32(TG3_CPMU_EEE_MODE
);
1835 tw32(TG3_CPMU_EEE_MODE
, val
& ~TG3_CPMU_EEEMD_LPI_ENABLE
);
1839 static void tg3_phy_eee_enable(struct tg3
*tp
)
1843 if (tp
->link_config
.active_speed
== SPEED_1000
&&
1844 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
||
1845 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
||
1846 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
) &&
1847 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp
)) {
1848 tg3_phydsp_write(tp
, MII_TG3_DSP_TAP26
, 0x0003);
1849 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp
);
1852 val
= tr32(TG3_CPMU_EEE_MODE
);
1853 tw32(TG3_CPMU_EEE_MODE
, val
| TG3_CPMU_EEEMD_LPI_ENABLE
);
1856 static int tg3_wait_macro_done(struct tg3
*tp
)
1863 if (!tg3_readphy(tp
, MII_TG3_DSP_CONTROL
, &tmp32
)) {
1864 if ((tmp32
& 0x1000) == 0)
1874 static int tg3_phy_write_and_check_testpat(struct tg3
*tp
, int *resetp
)
1876 static const u32 test_pat
[4][6] = {
1877 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1878 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1879 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1880 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1884 for (chan
= 0; chan
< 4; chan
++) {
1887 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
1888 (chan
* 0x2000) | 0x0200);
1889 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0002);
1891 for (i
= 0; i
< 6; i
++)
1892 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
,
1895 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0202);
1896 if (tg3_wait_macro_done(tp
)) {
1901 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
1902 (chan
* 0x2000) | 0x0200);
1903 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0082);
1904 if (tg3_wait_macro_done(tp
)) {
1909 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0802);
1910 if (tg3_wait_macro_done(tp
)) {
1915 for (i
= 0; i
< 6; i
+= 2) {
1918 if (tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &low
) ||
1919 tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &high
) ||
1920 tg3_wait_macro_done(tp
)) {
1926 if (low
!= test_pat
[chan
][i
] ||
1927 high
!= test_pat
[chan
][i
+1]) {
1928 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x000b);
1929 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x4001);
1930 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x4005);
1940 static int tg3_phy_reset_chanpat(struct tg3
*tp
)
1944 for (chan
= 0; chan
< 4; chan
++) {
1947 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
1948 (chan
* 0x2000) | 0x0200);
1949 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0002);
1950 for (i
= 0; i
< 6; i
++)
1951 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x000);
1952 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0202);
1953 if (tg3_wait_macro_done(tp
))
1960 static int tg3_phy_reset_5703_4_5(struct tg3
*tp
)
1962 u32 reg32
, phy9_orig
;
1963 int retries
, do_phy_reset
, err
;
1969 err
= tg3_bmcr_reset(tp
);
1975 /* Disable transmitter and interrupt. */
1976 if (tg3_readphy(tp
, MII_TG3_EXT_CTRL
, ®32
))
1980 tg3_writephy(tp
, MII_TG3_EXT_CTRL
, reg32
);
1982 /* Set full-duplex, 1000 mbps. */
1983 tg3_writephy(tp
, MII_BMCR
,
1984 BMCR_FULLDPLX
| TG3_BMCR_SPEED1000
);
1986 /* Set to master mode. */
1987 if (tg3_readphy(tp
, MII_TG3_CTRL
, &phy9_orig
))
1990 tg3_writephy(tp
, MII_TG3_CTRL
,
1991 (MII_TG3_CTRL_AS_MASTER
|
1992 MII_TG3_CTRL_ENABLE_AS_MASTER
));
1994 err
= TG3_PHY_AUXCTL_SMDSP_ENABLE(tp
);
1998 /* Block the PHY control access. */
1999 tg3_phydsp_write(tp
, 0x8005, 0x0800);
2001 err
= tg3_phy_write_and_check_testpat(tp
, &do_phy_reset
);
2004 } while (--retries
);
2006 err
= tg3_phy_reset_chanpat(tp
);
2010 tg3_phydsp_write(tp
, 0x8005, 0x0000);
2012 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x8200);
2013 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0000);
2015 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp
);
2017 tg3_writephy(tp
, MII_TG3_CTRL
, phy9_orig
);
2019 if (!tg3_readphy(tp
, MII_TG3_EXT_CTRL
, ®32
)) {
2021 tg3_writephy(tp
, MII_TG3_EXT_CTRL
, reg32
);
2028 /* This will reset the tigon3 PHY if there is no valid
2029 * link unless the FORCE argument is non-zero.
2031 static int tg3_phy_reset(struct tg3
*tp
)
2036 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
2037 val
= tr32(GRC_MISC_CFG
);
2038 tw32_f(GRC_MISC_CFG
, val
& ~GRC_MISC_CFG_EPHY_IDDQ
);
2041 err
= tg3_readphy(tp
, MII_BMSR
, &val
);
2042 err
|= tg3_readphy(tp
, MII_BMSR
, &val
);
2046 if (netif_running(tp
->dev
) && netif_carrier_ok(tp
->dev
)) {
2047 netif_carrier_off(tp
->dev
);
2048 tg3_link_report(tp
);
2051 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
||
2052 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
||
2053 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) {
2054 err
= tg3_phy_reset_5703_4_5(tp
);
2061 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
&&
2062 GET_CHIP_REV(tp
->pci_chip_rev_id
) != CHIPREV_5784_AX
) {
2063 cpmuctrl
= tr32(TG3_CPMU_CTRL
);
2064 if (cpmuctrl
& CPMU_CTRL_GPHY_10MB_RXONLY
)
2066 cpmuctrl
& ~CPMU_CTRL_GPHY_10MB_RXONLY
);
2069 err
= tg3_bmcr_reset(tp
);
2073 if (cpmuctrl
& CPMU_CTRL_GPHY_10MB_RXONLY
) {
2074 val
= MII_TG3_DSP_EXP8_AEDW
| MII_TG3_DSP_EXP8_REJ2MHz
;
2075 tg3_phydsp_write(tp
, MII_TG3_DSP_EXP8
, val
);
2077 tw32(TG3_CPMU_CTRL
, cpmuctrl
);
2080 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5784_AX
||
2081 GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5761_AX
) {
2082 val
= tr32(TG3_CPMU_LSPD_1000MB_CLK
);
2083 if ((val
& CPMU_LSPD_1000MB_MACCLK_MASK
) ==
2084 CPMU_LSPD_1000MB_MACCLK_12_5
) {
2085 val
&= ~CPMU_LSPD_1000MB_MACCLK_MASK
;
2087 tw32_f(TG3_CPMU_LSPD_1000MB_CLK
, val
);
2091 if (tg3_flag(tp
, 5717_PLUS
) &&
2092 (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
))
2095 tg3_phy_apply_otp(tp
);
2097 if (tp
->phy_flags
& TG3_PHYFLG_ENABLE_APD
)
2098 tg3_phy_toggle_apd(tp
, true);
2100 tg3_phy_toggle_apd(tp
, false);
2103 if ((tp
->phy_flags
& TG3_PHYFLG_ADC_BUG
) &&
2104 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp
)) {
2105 tg3_phydsp_write(tp
, 0x201f, 0x2aaa);
2106 tg3_phydsp_write(tp
, 0x000a, 0x0323);
2107 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp
);
2110 if (tp
->phy_flags
& TG3_PHYFLG_5704_A0_BUG
) {
2111 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8d68);
2112 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8d68);
2115 if (tp
->phy_flags
& TG3_PHYFLG_BER_BUG
) {
2116 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp
)) {
2117 tg3_phydsp_write(tp
, 0x000a, 0x310b);
2118 tg3_phydsp_write(tp
, 0x201f, 0x9506);
2119 tg3_phydsp_write(tp
, 0x401f, 0x14e2);
2120 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp
);
2122 } else if (tp
->phy_flags
& TG3_PHYFLG_JITTER_BUG
) {
2123 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp
)) {
2124 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x000a);
2125 if (tp
->phy_flags
& TG3_PHYFLG_ADJUST_TRIM
) {
2126 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x110b);
2127 tg3_writephy(tp
, MII_TG3_TEST1
,
2128 MII_TG3_TEST1_TRIM_EN
| 0x4);
2130 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x010b);
2132 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp
);
2136 /* Set Extended packet length bit (bit 14) on all chips that */
2137 /* support jumbo frames */
2138 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5401
) {
2139 /* Cannot do read-modify-write on 5401 */
2140 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, 0x4c20);
2141 } else if (tg3_flag(tp
, JUMBO_CAPABLE
)) {
2142 /* Set bit 14 with read-modify-write to preserve other bits */
2143 err
= tg3_phy_auxctl_read(tp
,
2144 MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, &val
);
2146 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_AUXCTL
,
2147 val
| MII_TG3_AUXCTL_ACTL_EXTPKTLEN
);
2150 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2151 * jumbo frames transmission.
2153 if (tg3_flag(tp
, JUMBO_CAPABLE
)) {
2154 if (!tg3_readphy(tp
, MII_TG3_EXT_CTRL
, &val
))
2155 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
2156 val
| MII_TG3_EXT_CTRL_FIFO_ELASTIC
);
2159 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
2160 /* adjust output voltage */
2161 tg3_writephy(tp
, MII_TG3_FET_PTEST
, 0x12);
2164 tg3_phy_toggle_automdix(tp
, 1);
2165 tg3_phy_set_wirespeed(tp
);
2169 static void tg3_frob_aux_power(struct tg3
*tp
)
2171 bool need_vaux
= false;
2173 /* The GPIOs do something completely different on 57765. */
2174 if (!tg3_flag(tp
, IS_NIC
) ||
2175 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
||
2176 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
)
2179 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
||
2180 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
||
2181 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
||
2182 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
) &&
2183 tp
->pdev_peer
!= tp
->pdev
) {
2184 struct net_device
*dev_peer
;
2186 dev_peer
= pci_get_drvdata(tp
->pdev_peer
);
2188 /* remove_one() may have been run on the peer. */
2190 struct tg3
*tp_peer
= netdev_priv(dev_peer
);
2192 if (tg3_flag(tp_peer
, INIT_COMPLETE
))
2195 if (tg3_flag(tp_peer
, WOL_ENABLE
) ||
2196 tg3_flag(tp_peer
, ENABLE_ASF
))
2201 if (tg3_flag(tp
, WOL_ENABLE
) || tg3_flag(tp
, ENABLE_ASF
))
2205 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
2206 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) {
2207 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
2208 (GRC_LCLCTRL_GPIO_OE0
|
2209 GRC_LCLCTRL_GPIO_OE1
|
2210 GRC_LCLCTRL_GPIO_OE2
|
2211 GRC_LCLCTRL_GPIO_OUTPUT0
|
2212 GRC_LCLCTRL_GPIO_OUTPUT1
),
2214 } else if (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5761
||
2215 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5761S
) {
2216 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2217 u32 grc_local_ctrl
= GRC_LCLCTRL_GPIO_OE0
|
2218 GRC_LCLCTRL_GPIO_OE1
|
2219 GRC_LCLCTRL_GPIO_OE2
|
2220 GRC_LCLCTRL_GPIO_OUTPUT0
|
2221 GRC_LCLCTRL_GPIO_OUTPUT1
|
2223 tw32_wait_f(GRC_LOCAL_CTRL
, grc_local_ctrl
, 100);
2225 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OUTPUT2
;
2226 tw32_wait_f(GRC_LOCAL_CTRL
, grc_local_ctrl
, 100);
2228 grc_local_ctrl
&= ~GRC_LCLCTRL_GPIO_OUTPUT0
;
2229 tw32_wait_f(GRC_LOCAL_CTRL
, grc_local_ctrl
, 100);
2232 u32 grc_local_ctrl
= 0;
2234 /* Workaround to prevent overdrawing Amps. */
2235 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) ==
2237 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE3
;
2238 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
2239 grc_local_ctrl
, 100);
2242 /* On 5753 and variants, GPIO2 cannot be used. */
2243 no_gpio2
= tp
->nic_sram_data_cfg
&
2244 NIC_SRAM_DATA_CFG_NO_GPIO2
;
2246 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE0
|
2247 GRC_LCLCTRL_GPIO_OE1
|
2248 GRC_LCLCTRL_GPIO_OE2
|
2249 GRC_LCLCTRL_GPIO_OUTPUT1
|
2250 GRC_LCLCTRL_GPIO_OUTPUT2
;
2252 grc_local_ctrl
&= ~(GRC_LCLCTRL_GPIO_OE2
|
2253 GRC_LCLCTRL_GPIO_OUTPUT2
);
2255 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
2256 grc_local_ctrl
, 100);
2258 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OUTPUT0
;
2260 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
2261 grc_local_ctrl
, 100);
2264 grc_local_ctrl
&= ~GRC_LCLCTRL_GPIO_OUTPUT2
;
2265 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
2266 grc_local_ctrl
, 100);
2270 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
&&
2271 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5701
) {
2272 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
2273 (GRC_LCLCTRL_GPIO_OE1
|
2274 GRC_LCLCTRL_GPIO_OUTPUT1
), 100);
2276 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
2277 GRC_LCLCTRL_GPIO_OE1
, 100);
2279 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
2280 (GRC_LCLCTRL_GPIO_OE1
|
2281 GRC_LCLCTRL_GPIO_OUTPUT1
), 100);
2286 static int tg3_5700_link_polarity(struct tg3
*tp
, u32 speed
)
2288 if (tp
->led_ctrl
== LED_CTRL_MODE_PHY_2
)
2290 else if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5411
) {
2291 if (speed
!= SPEED_10
)
2293 } else if (speed
== SPEED_10
)
2299 static int tg3_setup_phy(struct tg3
*, int);
2301 #define RESET_KIND_SHUTDOWN 0
2302 #define RESET_KIND_INIT 1
2303 #define RESET_KIND_SUSPEND 2
2305 static void tg3_write_sig_post_reset(struct tg3
*, int);
2306 static int tg3_halt_cpu(struct tg3
*, u32
);
2308 static void tg3_power_down_phy(struct tg3
*tp
, bool do_low_power
)
2312 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
2313 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
) {
2314 u32 sg_dig_ctrl
= tr32(SG_DIG_CTRL
);
2315 u32 serdes_cfg
= tr32(MAC_SERDES_CFG
);
2318 SG_DIG_USING_HW_AUTONEG
| SG_DIG_SOFT_RESET
;
2319 tw32(SG_DIG_CTRL
, sg_dig_ctrl
);
2320 tw32(MAC_SERDES_CFG
, serdes_cfg
| (1 << 15));
2325 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
2327 val
= tr32(GRC_MISC_CFG
);
2328 tw32_f(GRC_MISC_CFG
, val
| GRC_MISC_CFG_EPHY_IDDQ
);
2331 } else if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
2333 if (!tg3_readphy(tp
, MII_TG3_FET_TEST
, &phytest
)) {
2336 tg3_writephy(tp
, MII_ADVERTISE
, 0);
2337 tg3_writephy(tp
, MII_BMCR
,
2338 BMCR_ANENABLE
| BMCR_ANRESTART
);
2340 tg3_writephy(tp
, MII_TG3_FET_TEST
,
2341 phytest
| MII_TG3_FET_SHADOW_EN
);
2342 if (!tg3_readphy(tp
, MII_TG3_FET_SHDW_AUXMODE4
, &phy
)) {
2343 phy
|= MII_TG3_FET_SHDW_AUXMODE4_SBPD
;
2345 MII_TG3_FET_SHDW_AUXMODE4
,
2348 tg3_writephy(tp
, MII_TG3_FET_TEST
, phytest
);
2351 } else if (do_low_power
) {
2352 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
2353 MII_TG3_EXT_CTRL_FORCE_LED_OFF
);
2355 val
= MII_TG3_AUXCTL_PCTL_100TX_LPWR
|
2356 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE
|
2357 MII_TG3_AUXCTL_PCTL_VREG_11V
;
2358 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_PWRCTL
, val
);
2361 /* The PHY should not be powered down on some chips because
2364 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
2365 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
||
2366 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5780
&&
2367 (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)))
2370 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5784_AX
||
2371 GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5761_AX
) {
2372 val
= tr32(TG3_CPMU_LSPD_1000MB_CLK
);
2373 val
&= ~CPMU_LSPD_1000MB_MACCLK_MASK
;
2374 val
|= CPMU_LSPD_1000MB_MACCLK_12_5
;
2375 tw32_f(TG3_CPMU_LSPD_1000MB_CLK
, val
);
2378 tg3_writephy(tp
, MII_BMCR
, BMCR_PDOWN
);
2381 /* tp->lock is held. */
2382 static int tg3_nvram_lock(struct tg3
*tp
)
2384 if (tg3_flag(tp
, NVRAM
)) {
2387 if (tp
->nvram_lock_cnt
== 0) {
2388 tw32(NVRAM_SWARB
, SWARB_REQ_SET1
);
2389 for (i
= 0; i
< 8000; i
++) {
2390 if (tr32(NVRAM_SWARB
) & SWARB_GNT1
)
2395 tw32(NVRAM_SWARB
, SWARB_REQ_CLR1
);
2399 tp
->nvram_lock_cnt
++;
2404 /* tp->lock is held. */
2405 static void tg3_nvram_unlock(struct tg3
*tp
)
2407 if (tg3_flag(tp
, NVRAM
)) {
2408 if (tp
->nvram_lock_cnt
> 0)
2409 tp
->nvram_lock_cnt
--;
2410 if (tp
->nvram_lock_cnt
== 0)
2411 tw32_f(NVRAM_SWARB
, SWARB_REQ_CLR1
);
2415 /* tp->lock is held. */
2416 static void tg3_enable_nvram_access(struct tg3
*tp
)
2418 if (tg3_flag(tp
, 5750_PLUS
) && !tg3_flag(tp
, PROTECTED_NVRAM
)) {
2419 u32 nvaccess
= tr32(NVRAM_ACCESS
);
2421 tw32(NVRAM_ACCESS
, nvaccess
| ACCESS_ENABLE
);
2425 /* tp->lock is held. */
2426 static void tg3_disable_nvram_access(struct tg3
*tp
)
2428 if (tg3_flag(tp
, 5750_PLUS
) && !tg3_flag(tp
, PROTECTED_NVRAM
)) {
2429 u32 nvaccess
= tr32(NVRAM_ACCESS
);
2431 tw32(NVRAM_ACCESS
, nvaccess
& ~ACCESS_ENABLE
);
2435 static int tg3_nvram_read_using_eeprom(struct tg3
*tp
,
2436 u32 offset
, u32
*val
)
2441 if (offset
> EEPROM_ADDR_ADDR_MASK
|| (offset
% 4) != 0)
2444 tmp
= tr32(GRC_EEPROM_ADDR
) & ~(EEPROM_ADDR_ADDR_MASK
|
2445 EEPROM_ADDR_DEVID_MASK
|
2447 tw32(GRC_EEPROM_ADDR
,
2449 (0 << EEPROM_ADDR_DEVID_SHIFT
) |
2450 ((offset
<< EEPROM_ADDR_ADDR_SHIFT
) &
2451 EEPROM_ADDR_ADDR_MASK
) |
2452 EEPROM_ADDR_READ
| EEPROM_ADDR_START
);
2454 for (i
= 0; i
< 1000; i
++) {
2455 tmp
= tr32(GRC_EEPROM_ADDR
);
2457 if (tmp
& EEPROM_ADDR_COMPLETE
)
2461 if (!(tmp
& EEPROM_ADDR_COMPLETE
))
2464 tmp
= tr32(GRC_EEPROM_DATA
);
2467 * The data will always be opposite the native endian
2468 * format. Perform a blind byteswap to compensate.
2475 #define NVRAM_CMD_TIMEOUT 10000
2477 static int tg3_nvram_exec_cmd(struct tg3
*tp
, u32 nvram_cmd
)
2481 tw32(NVRAM_CMD
, nvram_cmd
);
2482 for (i
= 0; i
< NVRAM_CMD_TIMEOUT
; i
++) {
2484 if (tr32(NVRAM_CMD
) & NVRAM_CMD_DONE
) {
2490 if (i
== NVRAM_CMD_TIMEOUT
)
2496 static u32
tg3_nvram_phys_addr(struct tg3
*tp
, u32 addr
)
2498 if (tg3_flag(tp
, NVRAM
) &&
2499 tg3_flag(tp
, NVRAM_BUFFERED
) &&
2500 tg3_flag(tp
, FLASH
) &&
2501 !tg3_flag(tp
, NO_NVRAM_ADDR_TRANS
) &&
2502 (tp
->nvram_jedecnum
== JEDEC_ATMEL
))
2504 addr
= ((addr
/ tp
->nvram_pagesize
) <<
2505 ATMEL_AT45DB0X1B_PAGE_POS
) +
2506 (addr
% tp
->nvram_pagesize
);
2511 static u32
tg3_nvram_logical_addr(struct tg3
*tp
, u32 addr
)
2513 if (tg3_flag(tp
, NVRAM
) &&
2514 tg3_flag(tp
, NVRAM_BUFFERED
) &&
2515 tg3_flag(tp
, FLASH
) &&
2516 !tg3_flag(tp
, NO_NVRAM_ADDR_TRANS
) &&
2517 (tp
->nvram_jedecnum
== JEDEC_ATMEL
))
2519 addr
= ((addr
>> ATMEL_AT45DB0X1B_PAGE_POS
) *
2520 tp
->nvram_pagesize
) +
2521 (addr
& ((1 << ATMEL_AT45DB0X1B_PAGE_POS
) - 1));
2526 /* NOTE: Data read in from NVRAM is byteswapped according to
2527 * the byteswapping settings for all other register accesses.
2528 * tg3 devices are BE devices, so on a BE machine, the data
2529 * returned will be exactly as it is seen in NVRAM. On a LE
2530 * machine, the 32-bit value will be byteswapped.
2532 static int tg3_nvram_read(struct tg3
*tp
, u32 offset
, u32
*val
)
2536 if (!tg3_flag(tp
, NVRAM
))
2537 return tg3_nvram_read_using_eeprom(tp
, offset
, val
);
2539 offset
= tg3_nvram_phys_addr(tp
, offset
);
2541 if (offset
> NVRAM_ADDR_MSK
)
2544 ret
= tg3_nvram_lock(tp
);
2548 tg3_enable_nvram_access(tp
);
2550 tw32(NVRAM_ADDR
, offset
);
2551 ret
= tg3_nvram_exec_cmd(tp
, NVRAM_CMD_RD
| NVRAM_CMD_GO
|
2552 NVRAM_CMD_FIRST
| NVRAM_CMD_LAST
| NVRAM_CMD_DONE
);
2555 *val
= tr32(NVRAM_RDDATA
);
2557 tg3_disable_nvram_access(tp
);
2559 tg3_nvram_unlock(tp
);
2564 /* Ensures NVRAM data is in bytestream format. */
2565 static int tg3_nvram_read_be32(struct tg3
*tp
, u32 offset
, __be32
*val
)
2568 int res
= tg3_nvram_read(tp
, offset
, &v
);
2570 *val
= cpu_to_be32(v
);
2574 /* tp->lock is held. */
2575 static void __tg3_set_mac_addr(struct tg3
*tp
, int skip_mac_1
)
2577 u32 addr_high
, addr_low
;
2580 addr_high
= ((tp
->dev
->dev_addr
[0] << 8) |
2581 tp
->dev
->dev_addr
[1]);
2582 addr_low
= ((tp
->dev
->dev_addr
[2] << 24) |
2583 (tp
->dev
->dev_addr
[3] << 16) |
2584 (tp
->dev
->dev_addr
[4] << 8) |
2585 (tp
->dev
->dev_addr
[5] << 0));
2586 for (i
= 0; i
< 4; i
++) {
2587 if (i
== 1 && skip_mac_1
)
2589 tw32(MAC_ADDR_0_HIGH
+ (i
* 8), addr_high
);
2590 tw32(MAC_ADDR_0_LOW
+ (i
* 8), addr_low
);
2593 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
||
2594 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
) {
2595 for (i
= 0; i
< 12; i
++) {
2596 tw32(MAC_EXTADDR_0_HIGH
+ (i
* 8), addr_high
);
2597 tw32(MAC_EXTADDR_0_LOW
+ (i
* 8), addr_low
);
2601 addr_high
= (tp
->dev
->dev_addr
[0] +
2602 tp
->dev
->dev_addr
[1] +
2603 tp
->dev
->dev_addr
[2] +
2604 tp
->dev
->dev_addr
[3] +
2605 tp
->dev
->dev_addr
[4] +
2606 tp
->dev
->dev_addr
[5]) &
2607 TX_BACKOFF_SEED_MASK
;
2608 tw32(MAC_TX_BACKOFF_SEED
, addr_high
);
2611 static void tg3_enable_register_access(struct tg3
*tp
)
2614 * Make sure register accesses (indirect or otherwise) will function
2617 pci_write_config_dword(tp
->pdev
,
2618 TG3PCI_MISC_HOST_CTRL
, tp
->misc_host_ctrl
);
2621 static int tg3_power_up(struct tg3
*tp
)
2623 tg3_enable_register_access(tp
);
2625 pci_set_power_state(tp
->pdev
, PCI_D0
);
2627 /* Switch out of Vaux if it is a NIC */
2628 if (tg3_flag(tp
, IS_NIC
))
2629 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
, 100);
2634 static int tg3_power_down_prepare(struct tg3
*tp
)
2637 bool device_should_wake
, do_low_power
;
2639 tg3_enable_register_access(tp
);
2641 /* Restore the CLKREQ setting. */
2642 if (tg3_flag(tp
, CLKREQ_BUG
)) {
2645 pci_read_config_word(tp
->pdev
,
2646 tp
->pcie_cap
+ PCI_EXP_LNKCTL
,
2648 lnkctl
|= PCI_EXP_LNKCTL_CLKREQ_EN
;
2649 pci_write_config_word(tp
->pdev
,
2650 tp
->pcie_cap
+ PCI_EXP_LNKCTL
,
2654 misc_host_ctrl
= tr32(TG3PCI_MISC_HOST_CTRL
);
2655 tw32(TG3PCI_MISC_HOST_CTRL
,
2656 misc_host_ctrl
| MISC_HOST_CTRL_MASK_PCI_INT
);
2658 device_should_wake
= device_may_wakeup(&tp
->pdev
->dev
) &&
2659 tg3_flag(tp
, WOL_ENABLE
);
2661 if (tg3_flag(tp
, USE_PHYLIB
)) {
2662 do_low_power
= false;
2663 if ((tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
) &&
2664 !(tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)) {
2665 struct phy_device
*phydev
;
2666 u32 phyid
, advertising
;
2668 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
2670 tp
->phy_flags
|= TG3_PHYFLG_IS_LOW_POWER
;
2672 tp
->link_config
.orig_speed
= phydev
->speed
;
2673 tp
->link_config
.orig_duplex
= phydev
->duplex
;
2674 tp
->link_config
.orig_autoneg
= phydev
->autoneg
;
2675 tp
->link_config
.orig_advertising
= phydev
->advertising
;
2677 advertising
= ADVERTISED_TP
|
2679 ADVERTISED_Autoneg
|
2680 ADVERTISED_10baseT_Half
;
2682 if (tg3_flag(tp
, ENABLE_ASF
) || device_should_wake
) {
2683 if (tg3_flag(tp
, WOL_SPEED_100MB
))
2685 ADVERTISED_100baseT_Half
|
2686 ADVERTISED_100baseT_Full
|
2687 ADVERTISED_10baseT_Full
;
2689 advertising
|= ADVERTISED_10baseT_Full
;
2692 phydev
->advertising
= advertising
;
2694 phy_start_aneg(phydev
);
2696 phyid
= phydev
->drv
->phy_id
& phydev
->drv
->phy_id_mask
;
2697 if (phyid
!= PHY_ID_BCMAC131
) {
2698 phyid
&= PHY_BCM_OUI_MASK
;
2699 if (phyid
== PHY_BCM_OUI_1
||
2700 phyid
== PHY_BCM_OUI_2
||
2701 phyid
== PHY_BCM_OUI_3
)
2702 do_low_power
= true;
2706 do_low_power
= true;
2708 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)) {
2709 tp
->phy_flags
|= TG3_PHYFLG_IS_LOW_POWER
;
2710 tp
->link_config
.orig_speed
= tp
->link_config
.speed
;
2711 tp
->link_config
.orig_duplex
= tp
->link_config
.duplex
;
2712 tp
->link_config
.orig_autoneg
= tp
->link_config
.autoneg
;
2715 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)) {
2716 tp
->link_config
.speed
= SPEED_10
;
2717 tp
->link_config
.duplex
= DUPLEX_HALF
;
2718 tp
->link_config
.autoneg
= AUTONEG_ENABLE
;
2719 tg3_setup_phy(tp
, 0);
2723 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
2726 val
= tr32(GRC_VCPU_EXT_CTRL
);
2727 tw32(GRC_VCPU_EXT_CTRL
, val
| GRC_VCPU_EXT_CTRL_DISABLE_WOL
);
2728 } else if (!tg3_flag(tp
, ENABLE_ASF
)) {
2732 for (i
= 0; i
< 200; i
++) {
2733 tg3_read_mem(tp
, NIC_SRAM_FW_ASF_STATUS_MBOX
, &val
);
2734 if (val
== ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1
)
2739 if (tg3_flag(tp
, WOL_CAP
))
2740 tg3_write_mem(tp
, NIC_SRAM_WOL_MBOX
, WOL_SIGNATURE
|
2741 WOL_DRV_STATE_SHUTDOWN
|
2745 if (device_should_wake
) {
2748 if (!(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)) {
2750 !(tp
->phy_flags
& TG3_PHYFLG_IS_FET
)) {
2751 tg3_phy_auxctl_write(tp
,
2752 MII_TG3_AUXCTL_SHDWSEL_PWRCTL
,
2753 MII_TG3_AUXCTL_PCTL_WOL_EN
|
2754 MII_TG3_AUXCTL_PCTL_100TX_LPWR
|
2755 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC
);
2759 if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)
2760 mac_mode
= MAC_MODE_PORT_MODE_GMII
;
2762 mac_mode
= MAC_MODE_PORT_MODE_MII
;
2764 mac_mode
|= tp
->mac_mode
& MAC_MODE_LINK_POLARITY
;
2765 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) ==
2767 u32 speed
= tg3_flag(tp
, WOL_SPEED_100MB
) ?
2768 SPEED_100
: SPEED_10
;
2769 if (tg3_5700_link_polarity(tp
, speed
))
2770 mac_mode
|= MAC_MODE_LINK_POLARITY
;
2772 mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
2775 mac_mode
= MAC_MODE_PORT_MODE_TBI
;
2778 if (!tg3_flag(tp
, 5750_PLUS
))
2779 tw32(MAC_LED_CTRL
, tp
->led_ctrl
);
2781 mac_mode
|= MAC_MODE_MAGIC_PKT_ENABLE
;
2782 if ((tg3_flag(tp
, 5705_PLUS
) && !tg3_flag(tp
, 5780_CLASS
)) &&
2783 (tg3_flag(tp
, ENABLE_ASF
) || tg3_flag(tp
, ENABLE_APE
)))
2784 mac_mode
|= MAC_MODE_KEEP_FRAME_IN_WOL
;
2786 if (tg3_flag(tp
, ENABLE_APE
))
2787 mac_mode
|= MAC_MODE_APE_TX_EN
|
2788 MAC_MODE_APE_RX_EN
|
2789 MAC_MODE_TDE_ENABLE
;
2791 tw32_f(MAC_MODE
, mac_mode
);
2794 tw32_f(MAC_RX_MODE
, RX_MODE_ENABLE
);
2798 if (!tg3_flag(tp
, WOL_SPEED_100MB
) &&
2799 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
2800 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
)) {
2803 base_val
= tp
->pci_clock_ctrl
;
2804 base_val
|= (CLOCK_CTRL_RXCLK_DISABLE
|
2805 CLOCK_CTRL_TXCLK_DISABLE
);
2807 tw32_wait_f(TG3PCI_CLOCK_CTRL
, base_val
| CLOCK_CTRL_ALTCLK
|
2808 CLOCK_CTRL_PWRDOWN_PLL133
, 40);
2809 } else if (tg3_flag(tp
, 5780_CLASS
) ||
2810 tg3_flag(tp
, CPMU_PRESENT
) ||
2811 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
2813 } else if (!(tg3_flag(tp
, 5750_PLUS
) && tg3_flag(tp
, ENABLE_ASF
))) {
2814 u32 newbits1
, newbits2
;
2816 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
2817 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) {
2818 newbits1
= (CLOCK_CTRL_RXCLK_DISABLE
|
2819 CLOCK_CTRL_TXCLK_DISABLE
|
2821 newbits2
= newbits1
| CLOCK_CTRL_44MHZ_CORE
;
2822 } else if (tg3_flag(tp
, 5705_PLUS
)) {
2823 newbits1
= CLOCK_CTRL_625_CORE
;
2824 newbits2
= newbits1
| CLOCK_CTRL_ALTCLK
;
2826 newbits1
= CLOCK_CTRL_ALTCLK
;
2827 newbits2
= newbits1
| CLOCK_CTRL_44MHZ_CORE
;
2830 tw32_wait_f(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
| newbits1
,
2833 tw32_wait_f(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
| newbits2
,
2836 if (!tg3_flag(tp
, 5705_PLUS
)) {
2839 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
2840 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) {
2841 newbits3
= (CLOCK_CTRL_RXCLK_DISABLE
|
2842 CLOCK_CTRL_TXCLK_DISABLE
|
2843 CLOCK_CTRL_44MHZ_CORE
);
2845 newbits3
= CLOCK_CTRL_44MHZ_CORE
;
2848 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
2849 tp
->pci_clock_ctrl
| newbits3
, 40);
2853 if (!(device_should_wake
) && !tg3_flag(tp
, ENABLE_ASF
))
2854 tg3_power_down_phy(tp
, do_low_power
);
2856 tg3_frob_aux_power(tp
);
2858 /* Workaround for unstable PLL clock */
2859 if ((GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5750_AX
) ||
2860 (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5750_BX
)) {
2861 u32 val
= tr32(0x7d00);
2863 val
&= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
2865 if (!tg3_flag(tp
, ENABLE_ASF
)) {
2868 err
= tg3_nvram_lock(tp
);
2869 tg3_halt_cpu(tp
, RX_CPU_BASE
);
2871 tg3_nvram_unlock(tp
);
2875 tg3_write_sig_post_reset(tp
, RESET_KIND_SHUTDOWN
);
2880 static void tg3_power_down(struct tg3
*tp
)
2882 tg3_power_down_prepare(tp
);
2884 pci_wake_from_d3(tp
->pdev
, tg3_flag(tp
, WOL_ENABLE
));
2885 pci_set_power_state(tp
->pdev
, PCI_D3hot
);
2888 static void tg3_aux_stat_to_speed_duplex(struct tg3
*tp
, u32 val
, u16
*speed
, u8
*duplex
)
2890 switch (val
& MII_TG3_AUX_STAT_SPDMASK
) {
2891 case MII_TG3_AUX_STAT_10HALF
:
2893 *duplex
= DUPLEX_HALF
;
2896 case MII_TG3_AUX_STAT_10FULL
:
2898 *duplex
= DUPLEX_FULL
;
2901 case MII_TG3_AUX_STAT_100HALF
:
2903 *duplex
= DUPLEX_HALF
;
2906 case MII_TG3_AUX_STAT_100FULL
:
2908 *duplex
= DUPLEX_FULL
;
2911 case MII_TG3_AUX_STAT_1000HALF
:
2912 *speed
= SPEED_1000
;
2913 *duplex
= DUPLEX_HALF
;
2916 case MII_TG3_AUX_STAT_1000FULL
:
2917 *speed
= SPEED_1000
;
2918 *duplex
= DUPLEX_FULL
;
2922 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
2923 *speed
= (val
& MII_TG3_AUX_STAT_100
) ? SPEED_100
:
2925 *duplex
= (val
& MII_TG3_AUX_STAT_FULL
) ? DUPLEX_FULL
:
2929 *speed
= SPEED_INVALID
;
2930 *duplex
= DUPLEX_INVALID
;
2935 static int tg3_phy_autoneg_cfg(struct tg3
*tp
, u32 advertise
, u32 flowctrl
)
2940 new_adv
= ADVERTISE_CSMA
;
2941 if (advertise
& ADVERTISED_10baseT_Half
)
2942 new_adv
|= ADVERTISE_10HALF
;
2943 if (advertise
& ADVERTISED_10baseT_Full
)
2944 new_adv
|= ADVERTISE_10FULL
;
2945 if (advertise
& ADVERTISED_100baseT_Half
)
2946 new_adv
|= ADVERTISE_100HALF
;
2947 if (advertise
& ADVERTISED_100baseT_Full
)
2948 new_adv
|= ADVERTISE_100FULL
;
2950 new_adv
|= tg3_advert_flowctrl_1000T(flowctrl
);
2952 err
= tg3_writephy(tp
, MII_ADVERTISE
, new_adv
);
2956 if (tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)
2960 if (advertise
& ADVERTISED_1000baseT_Half
)
2961 new_adv
|= MII_TG3_CTRL_ADV_1000_HALF
;
2962 if (advertise
& ADVERTISED_1000baseT_Full
)
2963 new_adv
|= MII_TG3_CTRL_ADV_1000_FULL
;
2965 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5701_A0
||
2966 tp
->pci_chip_rev_id
== CHIPREV_ID_5701_B0
)
2967 new_adv
|= (MII_TG3_CTRL_AS_MASTER
|
2968 MII_TG3_CTRL_ENABLE_AS_MASTER
);
2970 err
= tg3_writephy(tp
, MII_TG3_CTRL
, new_adv
);
2974 if (!(tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
))
2977 tw32(TG3_CPMU_EEE_MODE
,
2978 tr32(TG3_CPMU_EEE_MODE
) & ~TG3_CPMU_EEEMD_LPI_ENABLE
);
2980 err
= TG3_PHY_AUXCTL_SMDSP_ENABLE(tp
);
2984 switch (GET_ASIC_REV(tp
->pci_chip_rev_id
)) {
2986 case ASIC_REV_57765
:
2987 if (!tg3_phydsp_read(tp
, MII_TG3_DSP_CH34TP2
, &val
))
2988 tg3_phydsp_write(tp
, MII_TG3_DSP_CH34TP2
, val
|
2989 MII_TG3_DSP_CH34TP2_HIBW01
);
2992 val
= MII_TG3_DSP_TAP26_ALNOKO
|
2993 MII_TG3_DSP_TAP26_RMRXSTO
|
2994 MII_TG3_DSP_TAP26_OPCSINPT
;
2995 tg3_phydsp_write(tp
, MII_TG3_DSP_TAP26
, val
);
2999 /* Advertise 100-BaseTX EEE ability */
3000 if (advertise
& ADVERTISED_100baseT_Full
)
3001 val
|= MDIO_AN_EEE_ADV_100TX
;
3002 /* Advertise 1000-BaseT EEE ability */
3003 if (advertise
& ADVERTISED_1000baseT_Full
)
3004 val
|= MDIO_AN_EEE_ADV_1000T
;
3005 err
= tg3_phy_cl45_write(tp
, MDIO_MMD_AN
, MDIO_AN_EEE_ADV
, val
);
3007 err2
= TG3_PHY_AUXCTL_SMDSP_DISABLE(tp
);
3016 static void tg3_phy_copper_begin(struct tg3
*tp
)
3021 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
) {
3022 new_adv
= ADVERTISED_10baseT_Half
|
3023 ADVERTISED_10baseT_Full
;
3024 if (tg3_flag(tp
, WOL_SPEED_100MB
))
3025 new_adv
|= ADVERTISED_100baseT_Half
|
3026 ADVERTISED_100baseT_Full
;
3028 tg3_phy_autoneg_cfg(tp
, new_adv
,
3029 FLOW_CTRL_TX
| FLOW_CTRL_RX
);
3030 } else if (tp
->link_config
.speed
== SPEED_INVALID
) {
3031 if (tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)
3032 tp
->link_config
.advertising
&=
3033 ~(ADVERTISED_1000baseT_Half
|
3034 ADVERTISED_1000baseT_Full
);
3036 tg3_phy_autoneg_cfg(tp
, tp
->link_config
.advertising
,
3037 tp
->link_config
.flowctrl
);
3039 /* Asking for a specific link mode. */
3040 if (tp
->link_config
.speed
== SPEED_1000
) {
3041 if (tp
->link_config
.duplex
== DUPLEX_FULL
)
3042 new_adv
= ADVERTISED_1000baseT_Full
;
3044 new_adv
= ADVERTISED_1000baseT_Half
;
3045 } else if (tp
->link_config
.speed
== SPEED_100
) {
3046 if (tp
->link_config
.duplex
== DUPLEX_FULL
)
3047 new_adv
= ADVERTISED_100baseT_Full
;
3049 new_adv
= ADVERTISED_100baseT_Half
;
3051 if (tp
->link_config
.duplex
== DUPLEX_FULL
)
3052 new_adv
= ADVERTISED_10baseT_Full
;
3054 new_adv
= ADVERTISED_10baseT_Half
;
3057 tg3_phy_autoneg_cfg(tp
, new_adv
,
3058 tp
->link_config
.flowctrl
);
3061 if (tp
->link_config
.autoneg
== AUTONEG_DISABLE
&&
3062 tp
->link_config
.speed
!= SPEED_INVALID
) {
3063 u32 bmcr
, orig_bmcr
;
3065 tp
->link_config
.active_speed
= tp
->link_config
.speed
;
3066 tp
->link_config
.active_duplex
= tp
->link_config
.duplex
;
3069 switch (tp
->link_config
.speed
) {
3075 bmcr
|= BMCR_SPEED100
;
3079 bmcr
|= TG3_BMCR_SPEED1000
;
3083 if (tp
->link_config
.duplex
== DUPLEX_FULL
)
3084 bmcr
|= BMCR_FULLDPLX
;
3086 if (!tg3_readphy(tp
, MII_BMCR
, &orig_bmcr
) &&
3087 (bmcr
!= orig_bmcr
)) {
3088 tg3_writephy(tp
, MII_BMCR
, BMCR_LOOPBACK
);
3089 for (i
= 0; i
< 1500; i
++) {
3093 if (tg3_readphy(tp
, MII_BMSR
, &tmp
) ||
3094 tg3_readphy(tp
, MII_BMSR
, &tmp
))
3096 if (!(tmp
& BMSR_LSTATUS
)) {
3101 tg3_writephy(tp
, MII_BMCR
, bmcr
);
3105 tg3_writephy(tp
, MII_BMCR
,
3106 BMCR_ANENABLE
| BMCR_ANRESTART
);
3110 static int tg3_init_5401phy_dsp(struct tg3
*tp
)
3114 /* Turn off tap power management. */
3115 /* Set Extended packet length bit */
3116 err
= tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, 0x4c20);
3118 err
|= tg3_phydsp_write(tp
, 0x0012, 0x1804);
3119 err
|= tg3_phydsp_write(tp
, 0x0013, 0x1204);
3120 err
|= tg3_phydsp_write(tp
, 0x8006, 0x0132);
3121 err
|= tg3_phydsp_write(tp
, 0x8006, 0x0232);
3122 err
|= tg3_phydsp_write(tp
, 0x201f, 0x0a20);
3129 static int tg3_copper_is_advertising_all(struct tg3
*tp
, u32 mask
)
3131 u32 adv_reg
, all_mask
= 0;
3133 if (mask
& ADVERTISED_10baseT_Half
)
3134 all_mask
|= ADVERTISE_10HALF
;
3135 if (mask
& ADVERTISED_10baseT_Full
)
3136 all_mask
|= ADVERTISE_10FULL
;
3137 if (mask
& ADVERTISED_100baseT_Half
)
3138 all_mask
|= ADVERTISE_100HALF
;
3139 if (mask
& ADVERTISED_100baseT_Full
)
3140 all_mask
|= ADVERTISE_100FULL
;
3142 if (tg3_readphy(tp
, MII_ADVERTISE
, &adv_reg
))
3145 if ((adv_reg
& all_mask
) != all_mask
)
3147 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
3151 if (mask
& ADVERTISED_1000baseT_Half
)
3152 all_mask
|= ADVERTISE_1000HALF
;
3153 if (mask
& ADVERTISED_1000baseT_Full
)
3154 all_mask
|= ADVERTISE_1000FULL
;
3156 if (tg3_readphy(tp
, MII_TG3_CTRL
, &tg3_ctrl
))
3159 if ((tg3_ctrl
& all_mask
) != all_mask
)
3165 static int tg3_adv_1000T_flowctrl_ok(struct tg3
*tp
, u32
*lcladv
, u32
*rmtadv
)
3169 if (tg3_readphy(tp
, MII_ADVERTISE
, lcladv
))
3172 curadv
= *lcladv
& (ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
);
3173 reqadv
= tg3_advert_flowctrl_1000T(tp
->link_config
.flowctrl
);
3175 if (tp
->link_config
.active_duplex
== DUPLEX_FULL
) {
3176 if (curadv
!= reqadv
)
3179 if (tg3_flag(tp
, PAUSE_AUTONEG
))
3180 tg3_readphy(tp
, MII_LPA
, rmtadv
);
3182 /* Reprogram the advertisement register, even if it
3183 * does not affect the current link. If the link
3184 * gets renegotiated in the future, we can save an
3185 * additional renegotiation cycle by advertising
3186 * it correctly in the first place.
3188 if (curadv
!= reqadv
) {
3189 *lcladv
&= ~(ADVERTISE_PAUSE_CAP
|
3190 ADVERTISE_PAUSE_ASYM
);
3191 tg3_writephy(tp
, MII_ADVERTISE
, *lcladv
| reqadv
);
3198 static int tg3_setup_copper_phy(struct tg3
*tp
, int force_reset
)
3200 int current_link_up
;
3202 u32 lcl_adv
, rmt_adv
;
3210 (MAC_STATUS_SYNC_CHANGED
|
3211 MAC_STATUS_CFG_CHANGED
|
3212 MAC_STATUS_MI_COMPLETION
|
3213 MAC_STATUS_LNKSTATE_CHANGED
));
3216 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
3218 (tp
->mi_mode
& ~MAC_MI_MODE_AUTO_POLL
));
3222 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_PWRCTL
, 0);
3224 /* Some third-party PHYs need to be reset on link going
3227 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
||
3228 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
||
3229 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) &&
3230 netif_carrier_ok(tp
->dev
)) {
3231 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
3232 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
3233 !(bmsr
& BMSR_LSTATUS
))
3239 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5401
) {
3240 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
3241 if (tg3_readphy(tp
, MII_BMSR
, &bmsr
) ||
3242 !tg3_flag(tp
, INIT_COMPLETE
))
3245 if (!(bmsr
& BMSR_LSTATUS
)) {
3246 err
= tg3_init_5401phy_dsp(tp
);
3250 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
3251 for (i
= 0; i
< 1000; i
++) {
3253 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
3254 (bmsr
& BMSR_LSTATUS
)) {
3260 if ((tp
->phy_id
& TG3_PHY_ID_REV_MASK
) ==
3261 TG3_PHY_REV_BCM5401_B0
&&
3262 !(bmsr
& BMSR_LSTATUS
) &&
3263 tp
->link_config
.active_speed
== SPEED_1000
) {
3264 err
= tg3_phy_reset(tp
);
3266 err
= tg3_init_5401phy_dsp(tp
);
3271 } else if (tp
->pci_chip_rev_id
== CHIPREV_ID_5701_A0
||
3272 tp
->pci_chip_rev_id
== CHIPREV_ID_5701_B0
) {
3273 /* 5701 {A0,B0} CRC bug workaround */
3274 tg3_writephy(tp
, 0x15, 0x0a75);
3275 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8c68);
3276 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8d68);
3277 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8c68);
3280 /* Clear pending interrupts... */
3281 tg3_readphy(tp
, MII_TG3_ISTAT
, &val
);
3282 tg3_readphy(tp
, MII_TG3_ISTAT
, &val
);
3284 if (tp
->phy_flags
& TG3_PHYFLG_USE_MI_INTERRUPT
)
3285 tg3_writephy(tp
, MII_TG3_IMASK
, ~MII_TG3_INT_LINKCHG
);
3286 else if (!(tp
->phy_flags
& TG3_PHYFLG_IS_FET
))
3287 tg3_writephy(tp
, MII_TG3_IMASK
, ~0);
3289 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
3290 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) {
3291 if (tp
->led_ctrl
== LED_CTRL_MODE_PHY_1
)
3292 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
3293 MII_TG3_EXT_CTRL_LNK3_LED_MODE
);
3295 tg3_writephy(tp
, MII_TG3_EXT_CTRL
, 0);
3298 current_link_up
= 0;
3299 current_speed
= SPEED_INVALID
;
3300 current_duplex
= DUPLEX_INVALID
;
3302 if (tp
->phy_flags
& TG3_PHYFLG_CAPACITIVE_COUPLING
) {
3303 err
= tg3_phy_auxctl_read(tp
,
3304 MII_TG3_AUXCTL_SHDWSEL_MISCTEST
,
3306 if (!err
&& !(val
& (1 << 10))) {
3307 tg3_phy_auxctl_write(tp
,
3308 MII_TG3_AUXCTL_SHDWSEL_MISCTEST
,
3315 for (i
= 0; i
< 100; i
++) {
3316 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
3317 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
3318 (bmsr
& BMSR_LSTATUS
))
3323 if (bmsr
& BMSR_LSTATUS
) {
3326 tg3_readphy(tp
, MII_TG3_AUX_STAT
, &aux_stat
);
3327 for (i
= 0; i
< 2000; i
++) {
3329 if (!tg3_readphy(tp
, MII_TG3_AUX_STAT
, &aux_stat
) &&
3334 tg3_aux_stat_to_speed_duplex(tp
, aux_stat
,
3339 for (i
= 0; i
< 200; i
++) {
3340 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
3341 if (tg3_readphy(tp
, MII_BMCR
, &bmcr
))
3343 if (bmcr
&& bmcr
!= 0x7fff)
3351 tp
->link_config
.active_speed
= current_speed
;
3352 tp
->link_config
.active_duplex
= current_duplex
;
3354 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) {
3355 if ((bmcr
& BMCR_ANENABLE
) &&
3356 tg3_copper_is_advertising_all(tp
,
3357 tp
->link_config
.advertising
)) {
3358 if (tg3_adv_1000T_flowctrl_ok(tp
, &lcl_adv
,
3360 current_link_up
= 1;
3363 if (!(bmcr
& BMCR_ANENABLE
) &&
3364 tp
->link_config
.speed
== current_speed
&&
3365 tp
->link_config
.duplex
== current_duplex
&&
3366 tp
->link_config
.flowctrl
==
3367 tp
->link_config
.active_flowctrl
) {
3368 current_link_up
= 1;
3372 if (current_link_up
== 1 &&
3373 tp
->link_config
.active_duplex
== DUPLEX_FULL
)
3374 tg3_setup_flow_control(tp
, lcl_adv
, rmt_adv
);
3378 if (current_link_up
== 0 || (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)) {
3379 tg3_phy_copper_begin(tp
);
3381 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
3382 if ((!tg3_readphy(tp
, MII_BMSR
, &bmsr
) && (bmsr
& BMSR_LSTATUS
)) ||
3383 (tp
->mac_mode
& MAC_MODE_PORT_INT_LPBACK
))
3384 current_link_up
= 1;
3387 tp
->mac_mode
&= ~MAC_MODE_PORT_MODE_MASK
;
3388 if (current_link_up
== 1) {
3389 if (tp
->link_config
.active_speed
== SPEED_100
||
3390 tp
->link_config
.active_speed
== SPEED_10
)
3391 tp
->mac_mode
|= MAC_MODE_PORT_MODE_MII
;
3393 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
3394 } else if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
)
3395 tp
->mac_mode
|= MAC_MODE_PORT_MODE_MII
;
3397 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
3399 tp
->mac_mode
&= ~MAC_MODE_HALF_DUPLEX
;
3400 if (tp
->link_config
.active_duplex
== DUPLEX_HALF
)
3401 tp
->mac_mode
|= MAC_MODE_HALF_DUPLEX
;
3403 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
) {
3404 if (current_link_up
== 1 &&
3405 tg3_5700_link_polarity(tp
, tp
->link_config
.active_speed
))
3406 tp
->mac_mode
|= MAC_MODE_LINK_POLARITY
;
3408 tp
->mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
3411 /* ??? Without this setting Netgear GA302T PHY does not
3412 * ??? send/receive packets...
3414 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5411
&&
3415 tp
->pci_chip_rev_id
== CHIPREV_ID_5700_ALTIMA
) {
3416 tp
->mi_mode
|= MAC_MI_MODE_AUTO_POLL
;
3417 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
3421 tw32_f(MAC_MODE
, tp
->mac_mode
);
3424 tg3_phy_eee_adjust(tp
, current_link_up
);
3426 if (tg3_flag(tp
, USE_LINKCHG_REG
)) {
3427 /* Polled via timer. */
3428 tw32_f(MAC_EVENT
, 0);
3430 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
3434 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
&&
3435 current_link_up
== 1 &&
3436 tp
->link_config
.active_speed
== SPEED_1000
&&
3437 (tg3_flag(tp
, PCIX_MODE
) || tg3_flag(tp
, PCI_HIGH_SPEED
))) {
3440 (MAC_STATUS_SYNC_CHANGED
|
3441 MAC_STATUS_CFG_CHANGED
));
3444 NIC_SRAM_FIRMWARE_MBOX
,
3445 NIC_SRAM_FIRMWARE_MBOX_MAGIC2
);
3448 /* Prevent send BD corruption. */
3449 if (tg3_flag(tp
, CLKREQ_BUG
)) {
3450 u16 oldlnkctl
, newlnkctl
;
3452 pci_read_config_word(tp
->pdev
,
3453 tp
->pcie_cap
+ PCI_EXP_LNKCTL
,
3455 if (tp
->link_config
.active_speed
== SPEED_100
||
3456 tp
->link_config
.active_speed
== SPEED_10
)
3457 newlnkctl
= oldlnkctl
& ~PCI_EXP_LNKCTL_CLKREQ_EN
;
3459 newlnkctl
= oldlnkctl
| PCI_EXP_LNKCTL_CLKREQ_EN
;
3460 if (newlnkctl
!= oldlnkctl
)
3461 pci_write_config_word(tp
->pdev
,
3462 tp
->pcie_cap
+ PCI_EXP_LNKCTL
,
3466 if (current_link_up
!= netif_carrier_ok(tp
->dev
)) {
3467 if (current_link_up
)
3468 netif_carrier_on(tp
->dev
);
3470 netif_carrier_off(tp
->dev
);
3471 tg3_link_report(tp
);
3477 struct tg3_fiber_aneginfo
{
3479 #define ANEG_STATE_UNKNOWN 0
3480 #define ANEG_STATE_AN_ENABLE 1
3481 #define ANEG_STATE_RESTART_INIT 2
3482 #define ANEG_STATE_RESTART 3
3483 #define ANEG_STATE_DISABLE_LINK_OK 4
3484 #define ANEG_STATE_ABILITY_DETECT_INIT 5
3485 #define ANEG_STATE_ABILITY_DETECT 6
3486 #define ANEG_STATE_ACK_DETECT_INIT 7
3487 #define ANEG_STATE_ACK_DETECT 8
3488 #define ANEG_STATE_COMPLETE_ACK_INIT 9
3489 #define ANEG_STATE_COMPLETE_ACK 10
3490 #define ANEG_STATE_IDLE_DETECT_INIT 11
3491 #define ANEG_STATE_IDLE_DETECT 12
3492 #define ANEG_STATE_LINK_OK 13
3493 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
3494 #define ANEG_STATE_NEXT_PAGE_WAIT 15
3497 #define MR_AN_ENABLE 0x00000001
3498 #define MR_RESTART_AN 0x00000002
3499 #define MR_AN_COMPLETE 0x00000004
3500 #define MR_PAGE_RX 0x00000008
3501 #define MR_NP_LOADED 0x00000010
3502 #define MR_TOGGLE_TX 0x00000020
3503 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
3504 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
3505 #define MR_LP_ADV_SYM_PAUSE 0x00000100
3506 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
3507 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
3508 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
3509 #define MR_LP_ADV_NEXT_PAGE 0x00001000
3510 #define MR_TOGGLE_RX 0x00002000
3511 #define MR_NP_RX 0x00004000
3513 #define MR_LINK_OK 0x80000000
3515 unsigned long link_time
, cur_time
;
3517 u32 ability_match_cfg
;
3518 int ability_match_count
;
3520 char ability_match
, idle_match
, ack_match
;
3522 u32 txconfig
, rxconfig
;
3523 #define ANEG_CFG_NP 0x00000080
3524 #define ANEG_CFG_ACK 0x00000040
3525 #define ANEG_CFG_RF2 0x00000020
3526 #define ANEG_CFG_RF1 0x00000010
3527 #define ANEG_CFG_PS2 0x00000001
3528 #define ANEG_CFG_PS1 0x00008000
3529 #define ANEG_CFG_HD 0x00004000
3530 #define ANEG_CFG_FD 0x00002000
3531 #define ANEG_CFG_INVAL 0x00001f06
3536 #define ANEG_TIMER_ENAB 2
3537 #define ANEG_FAILED -1
3539 #define ANEG_STATE_SETTLE_TIME 10000
3541 static int tg3_fiber_aneg_smachine(struct tg3
*tp
,
3542 struct tg3_fiber_aneginfo
*ap
)
3545 unsigned long delta
;
3549 if (ap
->state
== ANEG_STATE_UNKNOWN
) {
3553 ap
->ability_match_cfg
= 0;
3554 ap
->ability_match_count
= 0;
3555 ap
->ability_match
= 0;
3561 if (tr32(MAC_STATUS
) & MAC_STATUS_RCVD_CFG
) {
3562 rx_cfg_reg
= tr32(MAC_RX_AUTO_NEG
);
3564 if (rx_cfg_reg
!= ap
->ability_match_cfg
) {
3565 ap
->ability_match_cfg
= rx_cfg_reg
;
3566 ap
->ability_match
= 0;
3567 ap
->ability_match_count
= 0;
3569 if (++ap
->ability_match_count
> 1) {
3570 ap
->ability_match
= 1;
3571 ap
->ability_match_cfg
= rx_cfg_reg
;
3574 if (rx_cfg_reg
& ANEG_CFG_ACK
)
3582 ap
->ability_match_cfg
= 0;
3583 ap
->ability_match_count
= 0;
3584 ap
->ability_match
= 0;
3590 ap
->rxconfig
= rx_cfg_reg
;
3593 switch (ap
->state
) {
3594 case ANEG_STATE_UNKNOWN
:
3595 if (ap
->flags
& (MR_AN_ENABLE
| MR_RESTART_AN
))
3596 ap
->state
= ANEG_STATE_AN_ENABLE
;
3599 case ANEG_STATE_AN_ENABLE
:
3600 ap
->flags
&= ~(MR_AN_COMPLETE
| MR_PAGE_RX
);
3601 if (ap
->flags
& MR_AN_ENABLE
) {
3604 ap
->ability_match_cfg
= 0;
3605 ap
->ability_match_count
= 0;
3606 ap
->ability_match
= 0;
3610 ap
->state
= ANEG_STATE_RESTART_INIT
;
3612 ap
->state
= ANEG_STATE_DISABLE_LINK_OK
;
3616 case ANEG_STATE_RESTART_INIT
:
3617 ap
->link_time
= ap
->cur_time
;
3618 ap
->flags
&= ~(MR_NP_LOADED
);
3620 tw32(MAC_TX_AUTO_NEG
, 0);
3621 tp
->mac_mode
|= MAC_MODE_SEND_CONFIGS
;
3622 tw32_f(MAC_MODE
, tp
->mac_mode
);
3625 ret
= ANEG_TIMER_ENAB
;
3626 ap
->state
= ANEG_STATE_RESTART
;
3629 case ANEG_STATE_RESTART
:
3630 delta
= ap
->cur_time
- ap
->link_time
;
3631 if (delta
> ANEG_STATE_SETTLE_TIME
)
3632 ap
->state
= ANEG_STATE_ABILITY_DETECT_INIT
;
3634 ret
= ANEG_TIMER_ENAB
;
3637 case ANEG_STATE_DISABLE_LINK_OK
:
3641 case ANEG_STATE_ABILITY_DETECT_INIT
:
3642 ap
->flags
&= ~(MR_TOGGLE_TX
);
3643 ap
->txconfig
= ANEG_CFG_FD
;
3644 flowctrl
= tg3_advert_flowctrl_1000X(tp
->link_config
.flowctrl
);
3645 if (flowctrl
& ADVERTISE_1000XPAUSE
)
3646 ap
->txconfig
|= ANEG_CFG_PS1
;
3647 if (flowctrl
& ADVERTISE_1000XPSE_ASYM
)
3648 ap
->txconfig
|= ANEG_CFG_PS2
;
3649 tw32(MAC_TX_AUTO_NEG
, ap
->txconfig
);
3650 tp
->mac_mode
|= MAC_MODE_SEND_CONFIGS
;
3651 tw32_f(MAC_MODE
, tp
->mac_mode
);
3654 ap
->state
= ANEG_STATE_ABILITY_DETECT
;
3657 case ANEG_STATE_ABILITY_DETECT
:
3658 if (ap
->ability_match
!= 0 && ap
->rxconfig
!= 0)
3659 ap
->state
= ANEG_STATE_ACK_DETECT_INIT
;
3662 case ANEG_STATE_ACK_DETECT_INIT
:
3663 ap
->txconfig
|= ANEG_CFG_ACK
;
3664 tw32(MAC_TX_AUTO_NEG
, ap
->txconfig
);
3665 tp
->mac_mode
|= MAC_MODE_SEND_CONFIGS
;
3666 tw32_f(MAC_MODE
, tp
->mac_mode
);
3669 ap
->state
= ANEG_STATE_ACK_DETECT
;
3672 case ANEG_STATE_ACK_DETECT
:
3673 if (ap
->ack_match
!= 0) {
3674 if ((ap
->rxconfig
& ~ANEG_CFG_ACK
) ==
3675 (ap
->ability_match_cfg
& ~ANEG_CFG_ACK
)) {
3676 ap
->state
= ANEG_STATE_COMPLETE_ACK_INIT
;
3678 ap
->state
= ANEG_STATE_AN_ENABLE
;
3680 } else if (ap
->ability_match
!= 0 &&
3681 ap
->rxconfig
== 0) {
3682 ap
->state
= ANEG_STATE_AN_ENABLE
;
3686 case ANEG_STATE_COMPLETE_ACK_INIT
:
3687 if (ap
->rxconfig
& ANEG_CFG_INVAL
) {
3691 ap
->flags
&= ~(MR_LP_ADV_FULL_DUPLEX
|
3692 MR_LP_ADV_HALF_DUPLEX
|
3693 MR_LP_ADV_SYM_PAUSE
|
3694 MR_LP_ADV_ASYM_PAUSE
|
3695 MR_LP_ADV_REMOTE_FAULT1
|
3696 MR_LP_ADV_REMOTE_FAULT2
|
3697 MR_LP_ADV_NEXT_PAGE
|
3700 if (ap
->rxconfig
& ANEG_CFG_FD
)
3701 ap
->flags
|= MR_LP_ADV_FULL_DUPLEX
;
3702 if (ap
->rxconfig
& ANEG_CFG_HD
)
3703 ap
->flags
|= MR_LP_ADV_HALF_DUPLEX
;
3704 if (ap
->rxconfig
& ANEG_CFG_PS1
)
3705 ap
->flags
|= MR_LP_ADV_SYM_PAUSE
;
3706 if (ap
->rxconfig
& ANEG_CFG_PS2
)
3707 ap
->flags
|= MR_LP_ADV_ASYM_PAUSE
;
3708 if (ap
->rxconfig
& ANEG_CFG_RF1
)
3709 ap
->flags
|= MR_LP_ADV_REMOTE_FAULT1
;
3710 if (ap
->rxconfig
& ANEG_CFG_RF2
)
3711 ap
->flags
|= MR_LP_ADV_REMOTE_FAULT2
;
3712 if (ap
->rxconfig
& ANEG_CFG_NP
)
3713 ap
->flags
|= MR_LP_ADV_NEXT_PAGE
;
3715 ap
->link_time
= ap
->cur_time
;
3717 ap
->flags
^= (MR_TOGGLE_TX
);
3718 if (ap
->rxconfig
& 0x0008)
3719 ap
->flags
|= MR_TOGGLE_RX
;
3720 if (ap
->rxconfig
& ANEG_CFG_NP
)
3721 ap
->flags
|= MR_NP_RX
;
3722 ap
->flags
|= MR_PAGE_RX
;
3724 ap
->state
= ANEG_STATE_COMPLETE_ACK
;
3725 ret
= ANEG_TIMER_ENAB
;
3728 case ANEG_STATE_COMPLETE_ACK
:
3729 if (ap
->ability_match
!= 0 &&
3730 ap
->rxconfig
== 0) {
3731 ap
->state
= ANEG_STATE_AN_ENABLE
;
3734 delta
= ap
->cur_time
- ap
->link_time
;
3735 if (delta
> ANEG_STATE_SETTLE_TIME
) {
3736 if (!(ap
->flags
& (MR_LP_ADV_NEXT_PAGE
))) {
3737 ap
->state
= ANEG_STATE_IDLE_DETECT_INIT
;
3739 if ((ap
->txconfig
& ANEG_CFG_NP
) == 0 &&
3740 !(ap
->flags
& MR_NP_RX
)) {
3741 ap
->state
= ANEG_STATE_IDLE_DETECT_INIT
;
3749 case ANEG_STATE_IDLE_DETECT_INIT
:
3750 ap
->link_time
= ap
->cur_time
;
3751 tp
->mac_mode
&= ~MAC_MODE_SEND_CONFIGS
;
3752 tw32_f(MAC_MODE
, tp
->mac_mode
);
3755 ap
->state
= ANEG_STATE_IDLE_DETECT
;
3756 ret
= ANEG_TIMER_ENAB
;
3759 case ANEG_STATE_IDLE_DETECT
:
3760 if (ap
->ability_match
!= 0 &&
3761 ap
->rxconfig
== 0) {
3762 ap
->state
= ANEG_STATE_AN_ENABLE
;
3765 delta
= ap
->cur_time
- ap
->link_time
;
3766 if (delta
> ANEG_STATE_SETTLE_TIME
) {
3767 /* XXX another gem from the Broadcom driver :( */
3768 ap
->state
= ANEG_STATE_LINK_OK
;
3772 case ANEG_STATE_LINK_OK
:
3773 ap
->flags
|= (MR_AN_COMPLETE
| MR_LINK_OK
);
3777 case ANEG_STATE_NEXT_PAGE_WAIT_INIT
:
3778 /* ??? unimplemented */
3781 case ANEG_STATE_NEXT_PAGE_WAIT
:
3782 /* ??? unimplemented */
3793 static int fiber_autoneg(struct tg3
*tp
, u32
*txflags
, u32
*rxflags
)
3796 struct tg3_fiber_aneginfo aninfo
;
3797 int status
= ANEG_FAILED
;
3801 tw32_f(MAC_TX_AUTO_NEG
, 0);
3803 tmp
= tp
->mac_mode
& ~MAC_MODE_PORT_MODE_MASK
;
3804 tw32_f(MAC_MODE
, tmp
| MAC_MODE_PORT_MODE_GMII
);
3807 tw32_f(MAC_MODE
, tp
->mac_mode
| MAC_MODE_SEND_CONFIGS
);
3810 memset(&aninfo
, 0, sizeof(aninfo
));
3811 aninfo
.flags
|= MR_AN_ENABLE
;
3812 aninfo
.state
= ANEG_STATE_UNKNOWN
;
3813 aninfo
.cur_time
= 0;
3815 while (++tick
< 195000) {
3816 status
= tg3_fiber_aneg_smachine(tp
, &aninfo
);
3817 if (status
== ANEG_DONE
|| status
== ANEG_FAILED
)
3823 tp
->mac_mode
&= ~MAC_MODE_SEND_CONFIGS
;
3824 tw32_f(MAC_MODE
, tp
->mac_mode
);
3827 *txflags
= aninfo
.txconfig
;
3828 *rxflags
= aninfo
.flags
;
3830 if (status
== ANEG_DONE
&&
3831 (aninfo
.flags
& (MR_AN_COMPLETE
| MR_LINK_OK
|
3832 MR_LP_ADV_FULL_DUPLEX
)))
3838 static void tg3_init_bcm8002(struct tg3
*tp
)
3840 u32 mac_status
= tr32(MAC_STATUS
);
3843 /* Reset when initting first time or we have a link. */
3844 if (tg3_flag(tp
, INIT_COMPLETE
) &&
3845 !(mac_status
& MAC_STATUS_PCS_SYNCED
))
3848 /* Set PLL lock range. */
3849 tg3_writephy(tp
, 0x16, 0x8007);
3852 tg3_writephy(tp
, MII_BMCR
, BMCR_RESET
);
3854 /* Wait for reset to complete. */
3855 /* XXX schedule_timeout() ... */
3856 for (i
= 0; i
< 500; i
++)
3859 /* Config mode; select PMA/Ch 1 regs. */
3860 tg3_writephy(tp
, 0x10, 0x8411);
3862 /* Enable auto-lock and comdet, select txclk for tx. */
3863 tg3_writephy(tp
, 0x11, 0x0a10);
3865 tg3_writephy(tp
, 0x18, 0x00a0);
3866 tg3_writephy(tp
, 0x16, 0x41ff);
3868 /* Assert and deassert POR. */
3869 tg3_writephy(tp
, 0x13, 0x0400);
3871 tg3_writephy(tp
, 0x13, 0x0000);
3873 tg3_writephy(tp
, 0x11, 0x0a50);
3875 tg3_writephy(tp
, 0x11, 0x0a10);
3877 /* Wait for signal to stabilize */
3878 /* XXX schedule_timeout() ... */
3879 for (i
= 0; i
< 15000; i
++)
3882 /* Deselect the channel register so we can read the PHYID
3885 tg3_writephy(tp
, 0x10, 0x8011);
3888 static int tg3_setup_fiber_hw_autoneg(struct tg3
*tp
, u32 mac_status
)
3891 u32 sg_dig_ctrl
, sg_dig_status
;
3892 u32 serdes_cfg
, expected_sg_dig_ctrl
;
3893 int workaround
, port_a
;
3894 int current_link_up
;
3897 expected_sg_dig_ctrl
= 0;
3900 current_link_up
= 0;
3902 if (tp
->pci_chip_rev_id
!= CHIPREV_ID_5704_A0
&&
3903 tp
->pci_chip_rev_id
!= CHIPREV_ID_5704_A1
) {
3905 if (tr32(TG3PCI_DUAL_MAC_CTRL
) & DUAL_MAC_CTRL_ID
)
3908 /* preserve bits 0-11,13,14 for signal pre-emphasis */
3909 /* preserve bits 20-23 for voltage regulator */
3910 serdes_cfg
= tr32(MAC_SERDES_CFG
) & 0x00f06fff;
3913 sg_dig_ctrl
= tr32(SG_DIG_CTRL
);
3915 if (tp
->link_config
.autoneg
!= AUTONEG_ENABLE
) {
3916 if (sg_dig_ctrl
& SG_DIG_USING_HW_AUTONEG
) {
3918 u32 val
= serdes_cfg
;
3924 tw32_f(MAC_SERDES_CFG
, val
);
3927 tw32_f(SG_DIG_CTRL
, SG_DIG_COMMON_SETUP
);
3929 if (mac_status
& MAC_STATUS_PCS_SYNCED
) {
3930 tg3_setup_flow_control(tp
, 0, 0);
3931 current_link_up
= 1;
3936 /* Want auto-negotiation. */
3937 expected_sg_dig_ctrl
= SG_DIG_USING_HW_AUTONEG
| SG_DIG_COMMON_SETUP
;
3939 flowctrl
= tg3_advert_flowctrl_1000X(tp
->link_config
.flowctrl
);
3940 if (flowctrl
& ADVERTISE_1000XPAUSE
)
3941 expected_sg_dig_ctrl
|= SG_DIG_PAUSE_CAP
;
3942 if (flowctrl
& ADVERTISE_1000XPSE_ASYM
)
3943 expected_sg_dig_ctrl
|= SG_DIG_ASYM_PAUSE
;
3945 if (sg_dig_ctrl
!= expected_sg_dig_ctrl
) {
3946 if ((tp
->phy_flags
& TG3_PHYFLG_PARALLEL_DETECT
) &&
3947 tp
->serdes_counter
&&
3948 ((mac_status
& (MAC_STATUS_PCS_SYNCED
|
3949 MAC_STATUS_RCVD_CFG
)) ==
3950 MAC_STATUS_PCS_SYNCED
)) {
3951 tp
->serdes_counter
--;
3952 current_link_up
= 1;
3957 tw32_f(MAC_SERDES_CFG
, serdes_cfg
| 0xc011000);
3958 tw32_f(SG_DIG_CTRL
, expected_sg_dig_ctrl
| SG_DIG_SOFT_RESET
);
3960 tw32_f(SG_DIG_CTRL
, expected_sg_dig_ctrl
);
3962 tp
->serdes_counter
= SERDES_AN_TIMEOUT_5704S
;
3963 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
3964 } else if (mac_status
& (MAC_STATUS_PCS_SYNCED
|
3965 MAC_STATUS_SIGNAL_DET
)) {
3966 sg_dig_status
= tr32(SG_DIG_STATUS
);
3967 mac_status
= tr32(MAC_STATUS
);
3969 if ((sg_dig_status
& SG_DIG_AUTONEG_COMPLETE
) &&
3970 (mac_status
& MAC_STATUS_PCS_SYNCED
)) {
3971 u32 local_adv
= 0, remote_adv
= 0;
3973 if (sg_dig_ctrl
& SG_DIG_PAUSE_CAP
)
3974 local_adv
|= ADVERTISE_1000XPAUSE
;
3975 if (sg_dig_ctrl
& SG_DIG_ASYM_PAUSE
)
3976 local_adv
|= ADVERTISE_1000XPSE_ASYM
;
3978 if (sg_dig_status
& SG_DIG_PARTNER_PAUSE_CAPABLE
)
3979 remote_adv
|= LPA_1000XPAUSE
;
3980 if (sg_dig_status
& SG_DIG_PARTNER_ASYM_PAUSE
)
3981 remote_adv
|= LPA_1000XPAUSE_ASYM
;
3983 tg3_setup_flow_control(tp
, local_adv
, remote_adv
);
3984 current_link_up
= 1;
3985 tp
->serdes_counter
= 0;
3986 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
3987 } else if (!(sg_dig_status
& SG_DIG_AUTONEG_COMPLETE
)) {
3988 if (tp
->serdes_counter
)
3989 tp
->serdes_counter
--;
3992 u32 val
= serdes_cfg
;
3999 tw32_f(MAC_SERDES_CFG
, val
);
4002 tw32_f(SG_DIG_CTRL
, SG_DIG_COMMON_SETUP
);
4005 /* Link parallel detection - link is up */
4006 /* only if we have PCS_SYNC and not */
4007 /* receiving config code words */
4008 mac_status
= tr32(MAC_STATUS
);
4009 if ((mac_status
& MAC_STATUS_PCS_SYNCED
) &&
4010 !(mac_status
& MAC_STATUS_RCVD_CFG
)) {
4011 tg3_setup_flow_control(tp
, 0, 0);
4012 current_link_up
= 1;
4014 TG3_PHYFLG_PARALLEL_DETECT
;
4015 tp
->serdes_counter
=
4016 SERDES_PARALLEL_DET_TIMEOUT
;
4018 goto restart_autoneg
;
4022 tp
->serdes_counter
= SERDES_AN_TIMEOUT_5704S
;
4023 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
4027 return current_link_up
;
4030 static int tg3_setup_fiber_by_hand(struct tg3
*tp
, u32 mac_status
)
4032 int current_link_up
= 0;
4034 if (!(mac_status
& MAC_STATUS_PCS_SYNCED
))
4037 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) {
4038 u32 txflags
, rxflags
;
4041 if (fiber_autoneg(tp
, &txflags
, &rxflags
)) {
4042 u32 local_adv
= 0, remote_adv
= 0;
4044 if (txflags
& ANEG_CFG_PS1
)
4045 local_adv
|= ADVERTISE_1000XPAUSE
;
4046 if (txflags
& ANEG_CFG_PS2
)
4047 local_adv
|= ADVERTISE_1000XPSE_ASYM
;
4049 if (rxflags
& MR_LP_ADV_SYM_PAUSE
)
4050 remote_adv
|= LPA_1000XPAUSE
;
4051 if (rxflags
& MR_LP_ADV_ASYM_PAUSE
)
4052 remote_adv
|= LPA_1000XPAUSE_ASYM
;
4054 tg3_setup_flow_control(tp
, local_adv
, remote_adv
);
4056 current_link_up
= 1;
4058 for (i
= 0; i
< 30; i
++) {
4061 (MAC_STATUS_SYNC_CHANGED
|
4062 MAC_STATUS_CFG_CHANGED
));
4064 if ((tr32(MAC_STATUS
) &
4065 (MAC_STATUS_SYNC_CHANGED
|
4066 MAC_STATUS_CFG_CHANGED
)) == 0)
4070 mac_status
= tr32(MAC_STATUS
);
4071 if (current_link_up
== 0 &&
4072 (mac_status
& MAC_STATUS_PCS_SYNCED
) &&
4073 !(mac_status
& MAC_STATUS_RCVD_CFG
))
4074 current_link_up
= 1;
4076 tg3_setup_flow_control(tp
, 0, 0);
4078 /* Forcing 1000FD link up. */
4079 current_link_up
= 1;
4081 tw32_f(MAC_MODE
, (tp
->mac_mode
| MAC_MODE_SEND_CONFIGS
));
4084 tw32_f(MAC_MODE
, tp
->mac_mode
);
4089 return current_link_up
;
4092 static int tg3_setup_fiber_phy(struct tg3
*tp
, int force_reset
)
4095 u16 orig_active_speed
;
4096 u8 orig_active_duplex
;
4098 int current_link_up
;
4101 orig_pause_cfg
= tp
->link_config
.active_flowctrl
;
4102 orig_active_speed
= tp
->link_config
.active_speed
;
4103 orig_active_duplex
= tp
->link_config
.active_duplex
;
4105 if (!tg3_flag(tp
, HW_AUTONEG
) &&
4106 netif_carrier_ok(tp
->dev
) &&
4107 tg3_flag(tp
, INIT_COMPLETE
)) {
4108 mac_status
= tr32(MAC_STATUS
);
4109 mac_status
&= (MAC_STATUS_PCS_SYNCED
|
4110 MAC_STATUS_SIGNAL_DET
|
4111 MAC_STATUS_CFG_CHANGED
|
4112 MAC_STATUS_RCVD_CFG
);
4113 if (mac_status
== (MAC_STATUS_PCS_SYNCED
|
4114 MAC_STATUS_SIGNAL_DET
)) {
4115 tw32_f(MAC_STATUS
, (MAC_STATUS_SYNC_CHANGED
|
4116 MAC_STATUS_CFG_CHANGED
));
4121 tw32_f(MAC_TX_AUTO_NEG
, 0);
4123 tp
->mac_mode
&= ~(MAC_MODE_PORT_MODE_MASK
| MAC_MODE_HALF_DUPLEX
);
4124 tp
->mac_mode
|= MAC_MODE_PORT_MODE_TBI
;
4125 tw32_f(MAC_MODE
, tp
->mac_mode
);
4128 if (tp
->phy_id
== TG3_PHY_ID_BCM8002
)
4129 tg3_init_bcm8002(tp
);
4131 /* Enable link change event even when serdes polling. */
4132 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
4135 current_link_up
= 0;
4136 mac_status
= tr32(MAC_STATUS
);
4138 if (tg3_flag(tp
, HW_AUTONEG
))
4139 current_link_up
= tg3_setup_fiber_hw_autoneg(tp
, mac_status
);
4141 current_link_up
= tg3_setup_fiber_by_hand(tp
, mac_status
);
4143 tp
->napi
[0].hw_status
->status
=
4144 (SD_STATUS_UPDATED
|
4145 (tp
->napi
[0].hw_status
->status
& ~SD_STATUS_LINK_CHG
));
4147 for (i
= 0; i
< 100; i
++) {
4148 tw32_f(MAC_STATUS
, (MAC_STATUS_SYNC_CHANGED
|
4149 MAC_STATUS_CFG_CHANGED
));
4151 if ((tr32(MAC_STATUS
) & (MAC_STATUS_SYNC_CHANGED
|
4152 MAC_STATUS_CFG_CHANGED
|
4153 MAC_STATUS_LNKSTATE_CHANGED
)) == 0)
4157 mac_status
= tr32(MAC_STATUS
);
4158 if ((mac_status
& MAC_STATUS_PCS_SYNCED
) == 0) {
4159 current_link_up
= 0;
4160 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
&&
4161 tp
->serdes_counter
== 0) {
4162 tw32_f(MAC_MODE
, (tp
->mac_mode
|
4163 MAC_MODE_SEND_CONFIGS
));
4165 tw32_f(MAC_MODE
, tp
->mac_mode
);
4169 if (current_link_up
== 1) {
4170 tp
->link_config
.active_speed
= SPEED_1000
;
4171 tp
->link_config
.active_duplex
= DUPLEX_FULL
;
4172 tw32(MAC_LED_CTRL
, (tp
->led_ctrl
|
4173 LED_CTRL_LNKLED_OVERRIDE
|
4174 LED_CTRL_1000MBPS_ON
));
4176 tp
->link_config
.active_speed
= SPEED_INVALID
;
4177 tp
->link_config
.active_duplex
= DUPLEX_INVALID
;
4178 tw32(MAC_LED_CTRL
, (tp
->led_ctrl
|
4179 LED_CTRL_LNKLED_OVERRIDE
|
4180 LED_CTRL_TRAFFIC_OVERRIDE
));
4183 if (current_link_up
!= netif_carrier_ok(tp
->dev
)) {
4184 if (current_link_up
)
4185 netif_carrier_on(tp
->dev
);
4187 netif_carrier_off(tp
->dev
);
4188 tg3_link_report(tp
);
4190 u32 now_pause_cfg
= tp
->link_config
.active_flowctrl
;
4191 if (orig_pause_cfg
!= now_pause_cfg
||
4192 orig_active_speed
!= tp
->link_config
.active_speed
||
4193 orig_active_duplex
!= tp
->link_config
.active_duplex
)
4194 tg3_link_report(tp
);
4200 static int tg3_setup_fiber_mii_phy(struct tg3
*tp
, int force_reset
)
4202 int current_link_up
, err
= 0;
4206 u32 local_adv
, remote_adv
;
4208 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
4209 tw32_f(MAC_MODE
, tp
->mac_mode
);
4215 (MAC_STATUS_SYNC_CHANGED
|
4216 MAC_STATUS_CFG_CHANGED
|
4217 MAC_STATUS_MI_COMPLETION
|
4218 MAC_STATUS_LNKSTATE_CHANGED
));
4224 current_link_up
= 0;
4225 current_speed
= SPEED_INVALID
;
4226 current_duplex
= DUPLEX_INVALID
;
4228 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4229 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4230 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
) {
4231 if (tr32(MAC_TX_STATUS
) & TX_STATUS_LINK_UP
)
4232 bmsr
|= BMSR_LSTATUS
;
4234 bmsr
&= ~BMSR_LSTATUS
;
4237 err
|= tg3_readphy(tp
, MII_BMCR
, &bmcr
);
4239 if ((tp
->link_config
.autoneg
== AUTONEG_ENABLE
) && !force_reset
&&
4240 (tp
->phy_flags
& TG3_PHYFLG_PARALLEL_DETECT
)) {
4241 /* do nothing, just check for link up at the end */
4242 } else if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) {
4245 err
|= tg3_readphy(tp
, MII_ADVERTISE
, &adv
);
4246 new_adv
= adv
& ~(ADVERTISE_1000XFULL
| ADVERTISE_1000XHALF
|
4247 ADVERTISE_1000XPAUSE
|
4248 ADVERTISE_1000XPSE_ASYM
|
4251 new_adv
|= tg3_advert_flowctrl_1000X(tp
->link_config
.flowctrl
);
4253 if (tp
->link_config
.advertising
& ADVERTISED_1000baseT_Half
)
4254 new_adv
|= ADVERTISE_1000XHALF
;
4255 if (tp
->link_config
.advertising
& ADVERTISED_1000baseT_Full
)
4256 new_adv
|= ADVERTISE_1000XFULL
;
4258 if ((new_adv
!= adv
) || !(bmcr
& BMCR_ANENABLE
)) {
4259 tg3_writephy(tp
, MII_ADVERTISE
, new_adv
);
4260 bmcr
|= BMCR_ANENABLE
| BMCR_ANRESTART
;
4261 tg3_writephy(tp
, MII_BMCR
, bmcr
);
4263 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
4264 tp
->serdes_counter
= SERDES_AN_TIMEOUT_5714S
;
4265 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
4272 bmcr
&= ~BMCR_SPEED1000
;
4273 new_bmcr
= bmcr
& ~(BMCR_ANENABLE
| BMCR_FULLDPLX
);
4275 if (tp
->link_config
.duplex
== DUPLEX_FULL
)
4276 new_bmcr
|= BMCR_FULLDPLX
;
4278 if (new_bmcr
!= bmcr
) {
4279 /* BMCR_SPEED1000 is a reserved bit that needs
4280 * to be set on write.
4282 new_bmcr
|= BMCR_SPEED1000
;
4284 /* Force a linkdown */
4285 if (netif_carrier_ok(tp
->dev
)) {
4288 err
|= tg3_readphy(tp
, MII_ADVERTISE
, &adv
);
4289 adv
&= ~(ADVERTISE_1000XFULL
|
4290 ADVERTISE_1000XHALF
|
4292 tg3_writephy(tp
, MII_ADVERTISE
, adv
);
4293 tg3_writephy(tp
, MII_BMCR
, bmcr
|
4297 netif_carrier_off(tp
->dev
);
4299 tg3_writephy(tp
, MII_BMCR
, new_bmcr
);
4301 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4302 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4303 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) ==
4305 if (tr32(MAC_TX_STATUS
) & TX_STATUS_LINK_UP
)
4306 bmsr
|= BMSR_LSTATUS
;
4308 bmsr
&= ~BMSR_LSTATUS
;
4310 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
4314 if (bmsr
& BMSR_LSTATUS
) {
4315 current_speed
= SPEED_1000
;
4316 current_link_up
= 1;
4317 if (bmcr
& BMCR_FULLDPLX
)
4318 current_duplex
= DUPLEX_FULL
;
4320 current_duplex
= DUPLEX_HALF
;
4325 if (bmcr
& BMCR_ANENABLE
) {
4328 err
|= tg3_readphy(tp
, MII_ADVERTISE
, &local_adv
);
4329 err
|= tg3_readphy(tp
, MII_LPA
, &remote_adv
);
4330 common
= local_adv
& remote_adv
;
4331 if (common
& (ADVERTISE_1000XHALF
|
4332 ADVERTISE_1000XFULL
)) {
4333 if (common
& ADVERTISE_1000XFULL
)
4334 current_duplex
= DUPLEX_FULL
;
4336 current_duplex
= DUPLEX_HALF
;
4337 } else if (!tg3_flag(tp
, 5780_CLASS
)) {
4338 /* Link is up via parallel detect */
4340 current_link_up
= 0;
4345 if (current_link_up
== 1 && current_duplex
== DUPLEX_FULL
)
4346 tg3_setup_flow_control(tp
, local_adv
, remote_adv
);
4348 tp
->mac_mode
&= ~MAC_MODE_HALF_DUPLEX
;
4349 if (tp
->link_config
.active_duplex
== DUPLEX_HALF
)
4350 tp
->mac_mode
|= MAC_MODE_HALF_DUPLEX
;
4352 tw32_f(MAC_MODE
, tp
->mac_mode
);
4355 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
4357 tp
->link_config
.active_speed
= current_speed
;
4358 tp
->link_config
.active_duplex
= current_duplex
;
4360 if (current_link_up
!= netif_carrier_ok(tp
->dev
)) {
4361 if (current_link_up
)
4362 netif_carrier_on(tp
->dev
);
4364 netif_carrier_off(tp
->dev
);
4365 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
4367 tg3_link_report(tp
);
4372 static void tg3_serdes_parallel_detect(struct tg3
*tp
)
4374 if (tp
->serdes_counter
) {
4375 /* Give autoneg time to complete. */
4376 tp
->serdes_counter
--;
4380 if (!netif_carrier_ok(tp
->dev
) &&
4381 (tp
->link_config
.autoneg
== AUTONEG_ENABLE
)) {
4384 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
4385 if (bmcr
& BMCR_ANENABLE
) {
4388 /* Select shadow register 0x1f */
4389 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x7c00);
4390 tg3_readphy(tp
, MII_TG3_MISC_SHDW
, &phy1
);
4392 /* Select expansion interrupt status register */
4393 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
4394 MII_TG3_DSP_EXP1_INT_STAT
);
4395 tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &phy2
);
4396 tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &phy2
);
4398 if ((phy1
& 0x10) && !(phy2
& 0x20)) {
4399 /* We have signal detect and not receiving
4400 * config code words, link is up by parallel
4404 bmcr
&= ~BMCR_ANENABLE
;
4405 bmcr
|= BMCR_SPEED1000
| BMCR_FULLDPLX
;
4406 tg3_writephy(tp
, MII_BMCR
, bmcr
);
4407 tp
->phy_flags
|= TG3_PHYFLG_PARALLEL_DETECT
;
4410 } else if (netif_carrier_ok(tp
->dev
) &&
4411 (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) &&
4412 (tp
->phy_flags
& TG3_PHYFLG_PARALLEL_DETECT
)) {
4415 /* Select expansion interrupt status register */
4416 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
4417 MII_TG3_DSP_EXP1_INT_STAT
);
4418 tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &phy2
);
4422 /* Config code words received, turn on autoneg. */
4423 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
4424 tg3_writephy(tp
, MII_BMCR
, bmcr
| BMCR_ANENABLE
);
4426 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
4432 static int tg3_setup_phy(struct tg3
*tp
, int force_reset
)
4437 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
4438 err
= tg3_setup_fiber_phy(tp
, force_reset
);
4439 else if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)
4440 err
= tg3_setup_fiber_mii_phy(tp
, force_reset
);
4442 err
= tg3_setup_copper_phy(tp
, force_reset
);
4444 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5784_AX
) {
4447 val
= tr32(TG3_CPMU_CLCK_STAT
) & CPMU_CLCK_STAT_MAC_CLCK_MASK
;
4448 if (val
== CPMU_CLCK_STAT_MAC_CLCK_62_5
)
4450 else if (val
== CPMU_CLCK_STAT_MAC_CLCK_6_25
)
4455 val
= tr32(GRC_MISC_CFG
) & ~GRC_MISC_CFG_PRESCALAR_MASK
;
4456 val
|= (scale
<< GRC_MISC_CFG_PRESCALAR_SHIFT
);
4457 tw32(GRC_MISC_CFG
, val
);
4460 val
= (2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
4461 (6 << TX_LENGTHS_IPG_SHIFT
);
4462 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
)
4463 val
|= tr32(MAC_TX_LENGTHS
) &
4464 (TX_LENGTHS_JMB_FRM_LEN_MSK
|
4465 TX_LENGTHS_CNT_DWN_VAL_MSK
);
4467 if (tp
->link_config
.active_speed
== SPEED_1000
&&
4468 tp
->link_config
.active_duplex
== DUPLEX_HALF
)
4469 tw32(MAC_TX_LENGTHS
, val
|
4470 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT
));
4472 tw32(MAC_TX_LENGTHS
, val
|
4473 (32 << TX_LENGTHS_SLOT_TIME_SHIFT
));
4475 if (!tg3_flag(tp
, 5705_PLUS
)) {
4476 if (netif_carrier_ok(tp
->dev
)) {
4477 tw32(HOSTCC_STAT_COAL_TICKS
,
4478 tp
->coal
.stats_block_coalesce_usecs
);
4480 tw32(HOSTCC_STAT_COAL_TICKS
, 0);
4484 if (tg3_flag(tp
, ASPM_WORKAROUND
)) {
4485 val
= tr32(PCIE_PWR_MGMT_THRESH
);
4486 if (!netif_carrier_ok(tp
->dev
))
4487 val
= (val
& ~PCIE_PWR_MGMT_L1_THRESH_MSK
) |
4490 val
|= PCIE_PWR_MGMT_L1_THRESH_MSK
;
4491 tw32(PCIE_PWR_MGMT_THRESH
, val
);
4497 static inline int tg3_irq_sync(struct tg3
*tp
)
4499 return tp
->irq_sync
;
4502 static inline void tg3_rd32_loop(struct tg3
*tp
, u32
*dst
, u32 off
, u32 len
)
4506 dst
= (u32
*)((u8
*)dst
+ off
);
4507 for (i
= 0; i
< len
; i
+= sizeof(u32
))
4508 *dst
++ = tr32(off
+ i
);
4511 static void tg3_dump_legacy_regs(struct tg3
*tp
, u32
*regs
)
4513 tg3_rd32_loop(tp
, regs
, TG3PCI_VENDOR
, 0xb0);
4514 tg3_rd32_loop(tp
, regs
, MAILBOX_INTERRUPT_0
, 0x200);
4515 tg3_rd32_loop(tp
, regs
, MAC_MODE
, 0x4f0);
4516 tg3_rd32_loop(tp
, regs
, SNDDATAI_MODE
, 0xe0);
4517 tg3_rd32_loop(tp
, regs
, SNDDATAC_MODE
, 0x04);
4518 tg3_rd32_loop(tp
, regs
, SNDBDS_MODE
, 0x80);
4519 tg3_rd32_loop(tp
, regs
, SNDBDI_MODE
, 0x48);
4520 tg3_rd32_loop(tp
, regs
, SNDBDC_MODE
, 0x04);
4521 tg3_rd32_loop(tp
, regs
, RCVLPC_MODE
, 0x20);
4522 tg3_rd32_loop(tp
, regs
, RCVLPC_SELLST_BASE
, 0x15c);
4523 tg3_rd32_loop(tp
, regs
, RCVDBDI_MODE
, 0x0c);
4524 tg3_rd32_loop(tp
, regs
, RCVDBDI_JUMBO_BD
, 0x3c);
4525 tg3_rd32_loop(tp
, regs
, RCVDBDI_BD_PROD_IDX_0
, 0x44);
4526 tg3_rd32_loop(tp
, regs
, RCVDCC_MODE
, 0x04);
4527 tg3_rd32_loop(tp
, regs
, RCVBDI_MODE
, 0x20);
4528 tg3_rd32_loop(tp
, regs
, RCVCC_MODE
, 0x14);
4529 tg3_rd32_loop(tp
, regs
, RCVLSC_MODE
, 0x08);
4530 tg3_rd32_loop(tp
, regs
, MBFREE_MODE
, 0x08);
4531 tg3_rd32_loop(tp
, regs
, HOSTCC_MODE
, 0x100);
4533 if (tg3_flag(tp
, SUPPORT_MSIX
))
4534 tg3_rd32_loop(tp
, regs
, HOSTCC_RXCOL_TICKS_VEC1
, 0x180);
4536 tg3_rd32_loop(tp
, regs
, MEMARB_MODE
, 0x10);
4537 tg3_rd32_loop(tp
, regs
, BUFMGR_MODE
, 0x58);
4538 tg3_rd32_loop(tp
, regs
, RDMAC_MODE
, 0x08);
4539 tg3_rd32_loop(tp
, regs
, WDMAC_MODE
, 0x08);
4540 tg3_rd32_loop(tp
, regs
, RX_CPU_MODE
, 0x04);
4541 tg3_rd32_loop(tp
, regs
, RX_CPU_STATE
, 0x04);
4542 tg3_rd32_loop(tp
, regs
, RX_CPU_PGMCTR
, 0x04);
4543 tg3_rd32_loop(tp
, regs
, RX_CPU_HWBKPT
, 0x04);
4545 if (!tg3_flag(tp
, 5705_PLUS
)) {
4546 tg3_rd32_loop(tp
, regs
, TX_CPU_MODE
, 0x04);
4547 tg3_rd32_loop(tp
, regs
, TX_CPU_STATE
, 0x04);
4548 tg3_rd32_loop(tp
, regs
, TX_CPU_PGMCTR
, 0x04);
4551 tg3_rd32_loop(tp
, regs
, GRCMBOX_INTERRUPT_0
, 0x110);
4552 tg3_rd32_loop(tp
, regs
, FTQ_RESET
, 0x120);
4553 tg3_rd32_loop(tp
, regs
, MSGINT_MODE
, 0x0c);
4554 tg3_rd32_loop(tp
, regs
, DMAC_MODE
, 0x04);
4555 tg3_rd32_loop(tp
, regs
, GRC_MODE
, 0x4c);
4557 if (tg3_flag(tp
, NVRAM
))
4558 tg3_rd32_loop(tp
, regs
, NVRAM_CMD
, 0x24);
4561 static void tg3_dump_state(struct tg3
*tp
)
4566 regs
= kzalloc(TG3_REG_BLK_SIZE
, GFP_ATOMIC
);
4568 netdev_err(tp
->dev
, "Failed allocating register dump buffer\n");
4572 if (tg3_flag(tp
, PCI_EXPRESS
)) {
4573 /* Read up to but not including private PCI registers */
4574 for (i
= 0; i
< TG3_PCIE_TLDLPL_PORT
; i
+= sizeof(u32
))
4575 regs
[i
/ sizeof(u32
)] = tr32(i
);
4577 tg3_dump_legacy_regs(tp
, regs
);
4579 for (i
= 0; i
< TG3_REG_BLK_SIZE
/ sizeof(u32
); i
+= 4) {
4580 if (!regs
[i
+ 0] && !regs
[i
+ 1] &&
4581 !regs
[i
+ 2] && !regs
[i
+ 3])
4584 netdev_err(tp
->dev
, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
4586 regs
[i
+ 0], regs
[i
+ 1], regs
[i
+ 2], regs
[i
+ 3]);
4591 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
4592 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
4594 /* SW status block */
4596 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
4598 tnapi
->hw_status
->status
,
4599 tnapi
->hw_status
->status_tag
,
4600 tnapi
->hw_status
->rx_jumbo_consumer
,
4601 tnapi
->hw_status
->rx_consumer
,
4602 tnapi
->hw_status
->rx_mini_consumer
,
4603 tnapi
->hw_status
->idx
[0].rx_producer
,
4604 tnapi
->hw_status
->idx
[0].tx_consumer
);
4607 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
4609 tnapi
->last_tag
, tnapi
->last_irq_tag
,
4610 tnapi
->tx_prod
, tnapi
->tx_cons
, tnapi
->tx_pending
,
4612 tnapi
->prodring
.rx_std_prod_idx
,
4613 tnapi
->prodring
.rx_std_cons_idx
,
4614 tnapi
->prodring
.rx_jmb_prod_idx
,
4615 tnapi
->prodring
.rx_jmb_cons_idx
);
4619 /* This is called whenever we suspect that the system chipset is re-
4620 * ordering the sequence of MMIO to the tx send mailbox. The symptom
4621 * is bogus tx completions. We try to recover by setting the
4622 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
4625 static void tg3_tx_recover(struct tg3
*tp
)
4627 BUG_ON(tg3_flag(tp
, MBOX_WRITE_REORDER
) ||
4628 tp
->write32_tx_mbox
== tg3_write_indirect_mbox
);
4630 netdev_warn(tp
->dev
,
4631 "The system may be re-ordering memory-mapped I/O "
4632 "cycles to the network device, attempting to recover. "
4633 "Please report the problem to the driver maintainer "
4634 "and include system chipset information.\n");
4636 spin_lock(&tp
->lock
);
4637 tg3_flag_set(tp
, TX_RECOVERY_PENDING
);
4638 spin_unlock(&tp
->lock
);
4641 static inline u32
tg3_tx_avail(struct tg3_napi
*tnapi
)
4643 /* Tell compiler to fetch tx indices from memory. */
4645 return tnapi
->tx_pending
-
4646 ((tnapi
->tx_prod
- tnapi
->tx_cons
) & (TG3_TX_RING_SIZE
- 1));
4649 /* Tigon3 never reports partial packet sends. So we do not
4650 * need special logic to handle SKBs that have not had all
4651 * of their frags sent yet, like SunGEM does.
4653 static void tg3_tx(struct tg3_napi
*tnapi
)
4655 struct tg3
*tp
= tnapi
->tp
;
4656 u32 hw_idx
= tnapi
->hw_status
->idx
[0].tx_consumer
;
4657 u32 sw_idx
= tnapi
->tx_cons
;
4658 struct netdev_queue
*txq
;
4659 int index
= tnapi
- tp
->napi
;
4661 if (tg3_flag(tp
, ENABLE_TSS
))
4664 txq
= netdev_get_tx_queue(tp
->dev
, index
);
4666 while (sw_idx
!= hw_idx
) {
4667 struct ring_info
*ri
= &tnapi
->tx_buffers
[sw_idx
];
4668 struct sk_buff
*skb
= ri
->skb
;
4671 if (unlikely(skb
== NULL
)) {
4676 pci_unmap_single(tp
->pdev
,
4677 dma_unmap_addr(ri
, mapping
),
4683 sw_idx
= NEXT_TX(sw_idx
);
4685 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
4686 ri
= &tnapi
->tx_buffers
[sw_idx
];
4687 if (unlikely(ri
->skb
!= NULL
|| sw_idx
== hw_idx
))
4690 pci_unmap_page(tp
->pdev
,
4691 dma_unmap_addr(ri
, mapping
),
4692 skb_shinfo(skb
)->frags
[i
].size
,
4694 sw_idx
= NEXT_TX(sw_idx
);
4699 if (unlikely(tx_bug
)) {
4705 tnapi
->tx_cons
= sw_idx
;
4707 /* Need to make the tx_cons update visible to tg3_start_xmit()
4708 * before checking for netif_queue_stopped(). Without the
4709 * memory barrier, there is a small possibility that tg3_start_xmit()
4710 * will miss it and cause the queue to be stopped forever.
4714 if (unlikely(netif_tx_queue_stopped(txq
) &&
4715 (tg3_tx_avail(tnapi
) > TG3_TX_WAKEUP_THRESH(tnapi
)))) {
4716 __netif_tx_lock(txq
, smp_processor_id());
4717 if (netif_tx_queue_stopped(txq
) &&
4718 (tg3_tx_avail(tnapi
) > TG3_TX_WAKEUP_THRESH(tnapi
)))
4719 netif_tx_wake_queue(txq
);
4720 __netif_tx_unlock(txq
);
4724 static void tg3_rx_skb_free(struct tg3
*tp
, struct ring_info
*ri
, u32 map_sz
)
4729 pci_unmap_single(tp
->pdev
, dma_unmap_addr(ri
, mapping
),
4730 map_sz
, PCI_DMA_FROMDEVICE
);
4731 dev_kfree_skb_any(ri
->skb
);
4735 /* Returns size of skb allocated or < 0 on error.
4737 * We only need to fill in the address because the other members
4738 * of the RX descriptor are invariant, see tg3_init_rings.
4740 * Note the purposeful assymetry of cpu vs. chip accesses. For
4741 * posting buffers we only dirty the first cache line of the RX
4742 * descriptor (containing the address). Whereas for the RX status
4743 * buffers the cpu only reads the last cacheline of the RX descriptor
4744 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
4746 static int tg3_alloc_rx_skb(struct tg3
*tp
, struct tg3_rx_prodring_set
*tpr
,
4747 u32 opaque_key
, u32 dest_idx_unmasked
)
4749 struct tg3_rx_buffer_desc
*desc
;
4750 struct ring_info
*map
;
4751 struct sk_buff
*skb
;
4753 int skb_size
, dest_idx
;
4755 switch (opaque_key
) {
4756 case RXD_OPAQUE_RING_STD
:
4757 dest_idx
= dest_idx_unmasked
& tp
->rx_std_ring_mask
;
4758 desc
= &tpr
->rx_std
[dest_idx
];
4759 map
= &tpr
->rx_std_buffers
[dest_idx
];
4760 skb_size
= tp
->rx_pkt_map_sz
;
4763 case RXD_OPAQUE_RING_JUMBO
:
4764 dest_idx
= dest_idx_unmasked
& tp
->rx_jmb_ring_mask
;
4765 desc
= &tpr
->rx_jmb
[dest_idx
].std
;
4766 map
= &tpr
->rx_jmb_buffers
[dest_idx
];
4767 skb_size
= TG3_RX_JMB_MAP_SZ
;
4774 /* Do not overwrite any of the map or rp information
4775 * until we are sure we can commit to a new buffer.
4777 * Callers depend upon this behavior and assume that
4778 * we leave everything unchanged if we fail.
4780 skb
= netdev_alloc_skb(tp
->dev
, skb_size
+ tp
->rx_offset
);
4784 skb_reserve(skb
, tp
->rx_offset
);
4786 mapping
= pci_map_single(tp
->pdev
, skb
->data
, skb_size
,
4787 PCI_DMA_FROMDEVICE
);
4788 if (pci_dma_mapping_error(tp
->pdev
, mapping
)) {
4794 dma_unmap_addr_set(map
, mapping
, mapping
);
4796 desc
->addr_hi
= ((u64
)mapping
>> 32);
4797 desc
->addr_lo
= ((u64
)mapping
& 0xffffffff);
4802 /* We only need to move over in the address because the other
4803 * members of the RX descriptor are invariant. See notes above
4804 * tg3_alloc_rx_skb for full details.
4806 static void tg3_recycle_rx(struct tg3_napi
*tnapi
,
4807 struct tg3_rx_prodring_set
*dpr
,
4808 u32 opaque_key
, int src_idx
,
4809 u32 dest_idx_unmasked
)
4811 struct tg3
*tp
= tnapi
->tp
;
4812 struct tg3_rx_buffer_desc
*src_desc
, *dest_desc
;
4813 struct ring_info
*src_map
, *dest_map
;
4814 struct tg3_rx_prodring_set
*spr
= &tp
->napi
[0].prodring
;
4817 switch (opaque_key
) {
4818 case RXD_OPAQUE_RING_STD
:
4819 dest_idx
= dest_idx_unmasked
& tp
->rx_std_ring_mask
;
4820 dest_desc
= &dpr
->rx_std
[dest_idx
];
4821 dest_map
= &dpr
->rx_std_buffers
[dest_idx
];
4822 src_desc
= &spr
->rx_std
[src_idx
];
4823 src_map
= &spr
->rx_std_buffers
[src_idx
];
4826 case RXD_OPAQUE_RING_JUMBO
:
4827 dest_idx
= dest_idx_unmasked
& tp
->rx_jmb_ring_mask
;
4828 dest_desc
= &dpr
->rx_jmb
[dest_idx
].std
;
4829 dest_map
= &dpr
->rx_jmb_buffers
[dest_idx
];
4830 src_desc
= &spr
->rx_jmb
[src_idx
].std
;
4831 src_map
= &spr
->rx_jmb_buffers
[src_idx
];
4838 dest_map
->skb
= src_map
->skb
;
4839 dma_unmap_addr_set(dest_map
, mapping
,
4840 dma_unmap_addr(src_map
, mapping
));
4841 dest_desc
->addr_hi
= src_desc
->addr_hi
;
4842 dest_desc
->addr_lo
= src_desc
->addr_lo
;
4844 /* Ensure that the update to the skb happens after the physical
4845 * addresses have been transferred to the new BD location.
4849 src_map
->skb
= NULL
;
4852 /* The RX ring scheme is composed of multiple rings which post fresh
4853 * buffers to the chip, and one special ring the chip uses to report
4854 * status back to the host.
4856 * The special ring reports the status of received packets to the
4857 * host. The chip does not write into the original descriptor the
4858 * RX buffer was obtained from. The chip simply takes the original
4859 * descriptor as provided by the host, updates the status and length
4860 * field, then writes this into the next status ring entry.
4862 * Each ring the host uses to post buffers to the chip is described
4863 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
4864 * it is first placed into the on-chip ram. When the packet's length
4865 * is known, it walks down the TG3_BDINFO entries to select the ring.
4866 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
4867 * which is within the range of the new packet's length is chosen.
4869 * The "separate ring for rx status" scheme may sound queer, but it makes
4870 * sense from a cache coherency perspective. If only the host writes
4871 * to the buffer post rings, and only the chip writes to the rx status
4872 * rings, then cache lines never move beyond shared-modified state.
4873 * If both the host and chip were to write into the same ring, cache line
4874 * eviction could occur since both entities want it in an exclusive state.
4876 static int tg3_rx(struct tg3_napi
*tnapi
, int budget
)
4878 struct tg3
*tp
= tnapi
->tp
;
4879 u32 work_mask
, rx_std_posted
= 0;
4880 u32 std_prod_idx
, jmb_prod_idx
;
4881 u32 sw_idx
= tnapi
->rx_rcb_ptr
;
4884 struct tg3_rx_prodring_set
*tpr
= &tnapi
->prodring
;
4886 hw_idx
= *(tnapi
->rx_rcb_prod_idx
);
4888 * We need to order the read of hw_idx and the read of
4889 * the opaque cookie.
4894 std_prod_idx
= tpr
->rx_std_prod_idx
;
4895 jmb_prod_idx
= tpr
->rx_jmb_prod_idx
;
4896 while (sw_idx
!= hw_idx
&& budget
> 0) {
4897 struct ring_info
*ri
;
4898 struct tg3_rx_buffer_desc
*desc
= &tnapi
->rx_rcb
[sw_idx
];
4900 struct sk_buff
*skb
;
4901 dma_addr_t dma_addr
;
4902 u32 opaque_key
, desc_idx
, *post_ptr
;
4904 desc_idx
= desc
->opaque
& RXD_OPAQUE_INDEX_MASK
;
4905 opaque_key
= desc
->opaque
& RXD_OPAQUE_RING_MASK
;
4906 if (opaque_key
== RXD_OPAQUE_RING_STD
) {
4907 ri
= &tp
->napi
[0].prodring
.rx_std_buffers
[desc_idx
];
4908 dma_addr
= dma_unmap_addr(ri
, mapping
);
4910 post_ptr
= &std_prod_idx
;
4912 } else if (opaque_key
== RXD_OPAQUE_RING_JUMBO
) {
4913 ri
= &tp
->napi
[0].prodring
.rx_jmb_buffers
[desc_idx
];
4914 dma_addr
= dma_unmap_addr(ri
, mapping
);
4916 post_ptr
= &jmb_prod_idx
;
4918 goto next_pkt_nopost
;
4920 work_mask
|= opaque_key
;
4922 if ((desc
->err_vlan
& RXD_ERR_MASK
) != 0 &&
4923 (desc
->err_vlan
!= RXD_ERR_ODD_NIBBLE_RCVD_MII
)) {
4925 tg3_recycle_rx(tnapi
, tpr
, opaque_key
,
4926 desc_idx
, *post_ptr
);
4928 /* Other statistics kept track of by card. */
4933 len
= ((desc
->idx_len
& RXD_LEN_MASK
) >> RXD_LEN_SHIFT
) -
4936 if (len
> TG3_RX_COPY_THRESH(tp
)) {
4939 skb_size
= tg3_alloc_rx_skb(tp
, tpr
, opaque_key
,
4944 pci_unmap_single(tp
->pdev
, dma_addr
, skb_size
,
4945 PCI_DMA_FROMDEVICE
);
4947 /* Ensure that the update to the skb happens
4948 * after the usage of the old DMA mapping.
4956 struct sk_buff
*copy_skb
;
4958 tg3_recycle_rx(tnapi
, tpr
, opaque_key
,
4959 desc_idx
, *post_ptr
);
4961 copy_skb
= netdev_alloc_skb(tp
->dev
, len
+
4963 if (copy_skb
== NULL
)
4964 goto drop_it_no_recycle
;
4966 skb_reserve(copy_skb
, TG3_RAW_IP_ALIGN
);
4967 skb_put(copy_skb
, len
);
4968 pci_dma_sync_single_for_cpu(tp
->pdev
, dma_addr
, len
, PCI_DMA_FROMDEVICE
);
4969 skb_copy_from_linear_data(skb
, copy_skb
->data
, len
);
4970 pci_dma_sync_single_for_device(tp
->pdev
, dma_addr
, len
, PCI_DMA_FROMDEVICE
);
4972 /* We'll reuse the original ring buffer. */
4976 if ((tp
->dev
->features
& NETIF_F_RXCSUM
) &&
4977 (desc
->type_flags
& RXD_FLAG_TCPUDP_CSUM
) &&
4978 (((desc
->ip_tcp_csum
& RXD_TCPCSUM_MASK
)
4979 >> RXD_TCPCSUM_SHIFT
) == 0xffff))
4980 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
4982 skb_checksum_none_assert(skb
);
4984 skb
->protocol
= eth_type_trans(skb
, tp
->dev
);
4986 if (len
> (tp
->dev
->mtu
+ ETH_HLEN
) &&
4987 skb
->protocol
!= htons(ETH_P_8021Q
)) {
4989 goto drop_it_no_recycle
;
4992 if (desc
->type_flags
& RXD_FLAG_VLAN
&&
4993 !(tp
->rx_mode
& RX_MODE_KEEP_VLAN_TAG
))
4994 __vlan_hwaccel_put_tag(skb
,
4995 desc
->err_vlan
& RXD_VLAN_MASK
);
4997 napi_gro_receive(&tnapi
->napi
, skb
);
5005 if (unlikely(rx_std_posted
>= tp
->rx_std_max_post
)) {
5006 tpr
->rx_std_prod_idx
= std_prod_idx
&
5007 tp
->rx_std_ring_mask
;
5008 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG
,
5009 tpr
->rx_std_prod_idx
);
5010 work_mask
&= ~RXD_OPAQUE_RING_STD
;
5015 sw_idx
&= tp
->rx_ret_ring_mask
;
5017 /* Refresh hw_idx to see if there is new work */
5018 if (sw_idx
== hw_idx
) {
5019 hw_idx
= *(tnapi
->rx_rcb_prod_idx
);
5024 /* ACK the status ring. */
5025 tnapi
->rx_rcb_ptr
= sw_idx
;
5026 tw32_rx_mbox(tnapi
->consmbox
, sw_idx
);
5028 /* Refill RX ring(s). */
5029 if (!tg3_flag(tp
, ENABLE_RSS
)) {
5030 if (work_mask
& RXD_OPAQUE_RING_STD
) {
5031 tpr
->rx_std_prod_idx
= std_prod_idx
&
5032 tp
->rx_std_ring_mask
;
5033 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG
,
5034 tpr
->rx_std_prod_idx
);
5036 if (work_mask
& RXD_OPAQUE_RING_JUMBO
) {
5037 tpr
->rx_jmb_prod_idx
= jmb_prod_idx
&
5038 tp
->rx_jmb_ring_mask
;
5039 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG
,
5040 tpr
->rx_jmb_prod_idx
);
5043 } else if (work_mask
) {
5044 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
5045 * updated before the producer indices can be updated.
5049 tpr
->rx_std_prod_idx
= std_prod_idx
& tp
->rx_std_ring_mask
;
5050 tpr
->rx_jmb_prod_idx
= jmb_prod_idx
& tp
->rx_jmb_ring_mask
;
5052 if (tnapi
!= &tp
->napi
[1])
5053 napi_schedule(&tp
->napi
[1].napi
);
5059 static void tg3_poll_link(struct tg3
*tp
)
5061 /* handle link change and other phy events */
5062 if (!(tg3_flag(tp
, USE_LINKCHG_REG
) || tg3_flag(tp
, POLL_SERDES
))) {
5063 struct tg3_hw_status
*sblk
= tp
->napi
[0].hw_status
;
5065 if (sblk
->status
& SD_STATUS_LINK_CHG
) {
5066 sblk
->status
= SD_STATUS_UPDATED
|
5067 (sblk
->status
& ~SD_STATUS_LINK_CHG
);
5068 spin_lock(&tp
->lock
);
5069 if (tg3_flag(tp
, USE_PHYLIB
)) {
5071 (MAC_STATUS_SYNC_CHANGED
|
5072 MAC_STATUS_CFG_CHANGED
|
5073 MAC_STATUS_MI_COMPLETION
|
5074 MAC_STATUS_LNKSTATE_CHANGED
));
5077 tg3_setup_phy(tp
, 0);
5078 spin_unlock(&tp
->lock
);
5083 static int tg3_rx_prodring_xfer(struct tg3
*tp
,
5084 struct tg3_rx_prodring_set
*dpr
,
5085 struct tg3_rx_prodring_set
*spr
)
5087 u32 si
, di
, cpycnt
, src_prod_idx
;
5091 src_prod_idx
= spr
->rx_std_prod_idx
;
5093 /* Make sure updates to the rx_std_buffers[] entries and the
5094 * standard producer index are seen in the correct order.
5098 if (spr
->rx_std_cons_idx
== src_prod_idx
)
5101 if (spr
->rx_std_cons_idx
< src_prod_idx
)
5102 cpycnt
= src_prod_idx
- spr
->rx_std_cons_idx
;
5104 cpycnt
= tp
->rx_std_ring_mask
+ 1 -
5105 spr
->rx_std_cons_idx
;
5107 cpycnt
= min(cpycnt
,
5108 tp
->rx_std_ring_mask
+ 1 - dpr
->rx_std_prod_idx
);
5110 si
= spr
->rx_std_cons_idx
;
5111 di
= dpr
->rx_std_prod_idx
;
5113 for (i
= di
; i
< di
+ cpycnt
; i
++) {
5114 if (dpr
->rx_std_buffers
[i
].skb
) {
5124 /* Ensure that updates to the rx_std_buffers ring and the
5125 * shadowed hardware producer ring from tg3_recycle_skb() are
5126 * ordered correctly WRT the skb check above.
5130 memcpy(&dpr
->rx_std_buffers
[di
],
5131 &spr
->rx_std_buffers
[si
],
5132 cpycnt
* sizeof(struct ring_info
));
5134 for (i
= 0; i
< cpycnt
; i
++, di
++, si
++) {
5135 struct tg3_rx_buffer_desc
*sbd
, *dbd
;
5136 sbd
= &spr
->rx_std
[si
];
5137 dbd
= &dpr
->rx_std
[di
];
5138 dbd
->addr_hi
= sbd
->addr_hi
;
5139 dbd
->addr_lo
= sbd
->addr_lo
;
5142 spr
->rx_std_cons_idx
= (spr
->rx_std_cons_idx
+ cpycnt
) &
5143 tp
->rx_std_ring_mask
;
5144 dpr
->rx_std_prod_idx
= (dpr
->rx_std_prod_idx
+ cpycnt
) &
5145 tp
->rx_std_ring_mask
;
5149 src_prod_idx
= spr
->rx_jmb_prod_idx
;
5151 /* Make sure updates to the rx_jmb_buffers[] entries and
5152 * the jumbo producer index are seen in the correct order.
5156 if (spr
->rx_jmb_cons_idx
== src_prod_idx
)
5159 if (spr
->rx_jmb_cons_idx
< src_prod_idx
)
5160 cpycnt
= src_prod_idx
- spr
->rx_jmb_cons_idx
;
5162 cpycnt
= tp
->rx_jmb_ring_mask
+ 1 -
5163 spr
->rx_jmb_cons_idx
;
5165 cpycnt
= min(cpycnt
,
5166 tp
->rx_jmb_ring_mask
+ 1 - dpr
->rx_jmb_prod_idx
);
5168 si
= spr
->rx_jmb_cons_idx
;
5169 di
= dpr
->rx_jmb_prod_idx
;
5171 for (i
= di
; i
< di
+ cpycnt
; i
++) {
5172 if (dpr
->rx_jmb_buffers
[i
].skb
) {
5182 /* Ensure that updates to the rx_jmb_buffers ring and the
5183 * shadowed hardware producer ring from tg3_recycle_skb() are
5184 * ordered correctly WRT the skb check above.
5188 memcpy(&dpr
->rx_jmb_buffers
[di
],
5189 &spr
->rx_jmb_buffers
[si
],
5190 cpycnt
* sizeof(struct ring_info
));
5192 for (i
= 0; i
< cpycnt
; i
++, di
++, si
++) {
5193 struct tg3_rx_buffer_desc
*sbd
, *dbd
;
5194 sbd
= &spr
->rx_jmb
[si
].std
;
5195 dbd
= &dpr
->rx_jmb
[di
].std
;
5196 dbd
->addr_hi
= sbd
->addr_hi
;
5197 dbd
->addr_lo
= sbd
->addr_lo
;
5200 spr
->rx_jmb_cons_idx
= (spr
->rx_jmb_cons_idx
+ cpycnt
) &
5201 tp
->rx_jmb_ring_mask
;
5202 dpr
->rx_jmb_prod_idx
= (dpr
->rx_jmb_prod_idx
+ cpycnt
) &
5203 tp
->rx_jmb_ring_mask
;
5209 static int tg3_poll_work(struct tg3_napi
*tnapi
, int work_done
, int budget
)
5211 struct tg3
*tp
= tnapi
->tp
;
5213 /* run TX completion thread */
5214 if (tnapi
->hw_status
->idx
[0].tx_consumer
!= tnapi
->tx_cons
) {
5216 if (unlikely(tg3_flag(tp
, TX_RECOVERY_PENDING
)))
5220 /* run RX thread, within the bounds set by NAPI.
5221 * All RX "locking" is done by ensuring outside
5222 * code synchronizes with tg3->napi.poll()
5224 if (*(tnapi
->rx_rcb_prod_idx
) != tnapi
->rx_rcb_ptr
)
5225 work_done
+= tg3_rx(tnapi
, budget
- work_done
);
5227 if (tg3_flag(tp
, ENABLE_RSS
) && tnapi
== &tp
->napi
[1]) {
5228 struct tg3_rx_prodring_set
*dpr
= &tp
->napi
[0].prodring
;
5230 u32 std_prod_idx
= dpr
->rx_std_prod_idx
;
5231 u32 jmb_prod_idx
= dpr
->rx_jmb_prod_idx
;
5233 for (i
= 1; i
< tp
->irq_cnt
; i
++)
5234 err
|= tg3_rx_prodring_xfer(tp
, dpr
,
5235 &tp
->napi
[i
].prodring
);
5239 if (std_prod_idx
!= dpr
->rx_std_prod_idx
)
5240 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG
,
5241 dpr
->rx_std_prod_idx
);
5243 if (jmb_prod_idx
!= dpr
->rx_jmb_prod_idx
)
5244 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG
,
5245 dpr
->rx_jmb_prod_idx
);
5250 tw32_f(HOSTCC_MODE
, tp
->coal_now
);
5256 static int tg3_poll_msix(struct napi_struct
*napi
, int budget
)
5258 struct tg3_napi
*tnapi
= container_of(napi
, struct tg3_napi
, napi
);
5259 struct tg3
*tp
= tnapi
->tp
;
5261 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
5264 work_done
= tg3_poll_work(tnapi
, work_done
, budget
);
5266 if (unlikely(tg3_flag(tp
, TX_RECOVERY_PENDING
)))
5269 if (unlikely(work_done
>= budget
))
5272 /* tp->last_tag is used in tg3_int_reenable() below
5273 * to tell the hw how much work has been processed,
5274 * so we must read it before checking for more work.
5276 tnapi
->last_tag
= sblk
->status_tag
;
5277 tnapi
->last_irq_tag
= tnapi
->last_tag
;
5280 /* check for RX/TX work to do */
5281 if (likely(sblk
->idx
[0].tx_consumer
== tnapi
->tx_cons
&&
5282 *(tnapi
->rx_rcb_prod_idx
) == tnapi
->rx_rcb_ptr
)) {
5283 napi_complete(napi
);
5284 /* Reenable interrupts. */
5285 tw32_mailbox(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
5294 /* work_done is guaranteed to be less than budget. */
5295 napi_complete(napi
);
5296 schedule_work(&tp
->reset_task
);
5300 static void tg3_process_error(struct tg3
*tp
)
5303 bool real_error
= false;
5305 if (tg3_flag(tp
, ERROR_PROCESSED
))
5308 /* Check Flow Attention register */
5309 val
= tr32(HOSTCC_FLOW_ATTN
);
5310 if (val
& ~HOSTCC_FLOW_ATTN_MBUF_LWM
) {
5311 netdev_err(tp
->dev
, "FLOW Attention error. Resetting chip.\n");
5315 if (tr32(MSGINT_STATUS
) & ~MSGINT_STATUS_MSI_REQ
) {
5316 netdev_err(tp
->dev
, "MSI Status error. Resetting chip.\n");
5320 if (tr32(RDMAC_STATUS
) || tr32(WDMAC_STATUS
)) {
5321 netdev_err(tp
->dev
, "DMA Status error. Resetting chip.\n");
5330 tg3_flag_set(tp
, ERROR_PROCESSED
);
5331 schedule_work(&tp
->reset_task
);
5334 static int tg3_poll(struct napi_struct
*napi
, int budget
)
5336 struct tg3_napi
*tnapi
= container_of(napi
, struct tg3_napi
, napi
);
5337 struct tg3
*tp
= tnapi
->tp
;
5339 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
5342 if (sblk
->status
& SD_STATUS_ERROR
)
5343 tg3_process_error(tp
);
5347 work_done
= tg3_poll_work(tnapi
, work_done
, budget
);
5349 if (unlikely(tg3_flag(tp
, TX_RECOVERY_PENDING
)))
5352 if (unlikely(work_done
>= budget
))
5355 if (tg3_flag(tp
, TAGGED_STATUS
)) {
5356 /* tp->last_tag is used in tg3_int_reenable() below
5357 * to tell the hw how much work has been processed,
5358 * so we must read it before checking for more work.
5360 tnapi
->last_tag
= sblk
->status_tag
;
5361 tnapi
->last_irq_tag
= tnapi
->last_tag
;
5364 sblk
->status
&= ~SD_STATUS_UPDATED
;
5366 if (likely(!tg3_has_work(tnapi
))) {
5367 napi_complete(napi
);
5368 tg3_int_reenable(tnapi
);
5376 /* work_done is guaranteed to be less than budget. */
5377 napi_complete(napi
);
5378 schedule_work(&tp
->reset_task
);
5382 static void tg3_napi_disable(struct tg3
*tp
)
5386 for (i
= tp
->irq_cnt
- 1; i
>= 0; i
--)
5387 napi_disable(&tp
->napi
[i
].napi
);
5390 static void tg3_napi_enable(struct tg3
*tp
)
5394 for (i
= 0; i
< tp
->irq_cnt
; i
++)
5395 napi_enable(&tp
->napi
[i
].napi
);
5398 static void tg3_napi_init(struct tg3
*tp
)
5402 netif_napi_add(tp
->dev
, &tp
->napi
[0].napi
, tg3_poll
, 64);
5403 for (i
= 1; i
< tp
->irq_cnt
; i
++)
5404 netif_napi_add(tp
->dev
, &tp
->napi
[i
].napi
, tg3_poll_msix
, 64);
5407 static void tg3_napi_fini(struct tg3
*tp
)
5411 for (i
= 0; i
< tp
->irq_cnt
; i
++)
5412 netif_napi_del(&tp
->napi
[i
].napi
);
5415 static inline void tg3_netif_stop(struct tg3
*tp
)
5417 tp
->dev
->trans_start
= jiffies
; /* prevent tx timeout */
5418 tg3_napi_disable(tp
);
5419 netif_tx_disable(tp
->dev
);
5422 static inline void tg3_netif_start(struct tg3
*tp
)
5424 /* NOTE: unconditional netif_tx_wake_all_queues is only
5425 * appropriate so long as all callers are assured to
5426 * have free tx slots (such as after tg3_init_hw)
5428 netif_tx_wake_all_queues(tp
->dev
);
5430 tg3_napi_enable(tp
);
5431 tp
->napi
[0].hw_status
->status
|= SD_STATUS_UPDATED
;
5432 tg3_enable_ints(tp
);
5435 static void tg3_irq_quiesce(struct tg3
*tp
)
5439 BUG_ON(tp
->irq_sync
);
5444 for (i
= 0; i
< tp
->irq_cnt
; i
++)
5445 synchronize_irq(tp
->napi
[i
].irq_vec
);
5448 /* Fully shutdown all tg3 driver activity elsewhere in the system.
5449 * If irq_sync is non-zero, then the IRQ handler must be synchronized
5450 * with as well. Most of the time, this is not necessary except when
5451 * shutting down the device.
5453 static inline void tg3_full_lock(struct tg3
*tp
, int irq_sync
)
5455 spin_lock_bh(&tp
->lock
);
5457 tg3_irq_quiesce(tp
);
5460 static inline void tg3_full_unlock(struct tg3
*tp
)
5462 spin_unlock_bh(&tp
->lock
);
5465 /* One-shot MSI handler - Chip automatically disables interrupt
5466 * after sending MSI so driver doesn't have to do it.
5468 static irqreturn_t
tg3_msi_1shot(int irq
, void *dev_id
)
5470 struct tg3_napi
*tnapi
= dev_id
;
5471 struct tg3
*tp
= tnapi
->tp
;
5473 prefetch(tnapi
->hw_status
);
5475 prefetch(&tnapi
->rx_rcb
[tnapi
->rx_rcb_ptr
]);
5477 if (likely(!tg3_irq_sync(tp
)))
5478 napi_schedule(&tnapi
->napi
);
5483 /* MSI ISR - No need to check for interrupt sharing and no need to
5484 * flush status block and interrupt mailbox. PCI ordering rules
5485 * guarantee that MSI will arrive after the status block.
5487 static irqreturn_t
tg3_msi(int irq
, void *dev_id
)
5489 struct tg3_napi
*tnapi
= dev_id
;
5490 struct tg3
*tp
= tnapi
->tp
;
5492 prefetch(tnapi
->hw_status
);
5494 prefetch(&tnapi
->rx_rcb
[tnapi
->rx_rcb_ptr
]);
5496 * Writing any value to intr-mbox-0 clears PCI INTA# and
5497 * chip-internal interrupt pending events.
5498 * Writing non-zero to intr-mbox-0 additional tells the
5499 * NIC to stop sending us irqs, engaging "in-intr-handler"
5502 tw32_mailbox(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
, 0x00000001);
5503 if (likely(!tg3_irq_sync(tp
)))
5504 napi_schedule(&tnapi
->napi
);
5506 return IRQ_RETVAL(1);
5509 static irqreturn_t
tg3_interrupt(int irq
, void *dev_id
)
5511 struct tg3_napi
*tnapi
= dev_id
;
5512 struct tg3
*tp
= tnapi
->tp
;
5513 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
5514 unsigned int handled
= 1;
5516 /* In INTx mode, it is possible for the interrupt to arrive at
5517 * the CPU before the status block posted prior to the interrupt.
5518 * Reading the PCI State register will confirm whether the
5519 * interrupt is ours and will flush the status block.
5521 if (unlikely(!(sblk
->status
& SD_STATUS_UPDATED
))) {
5522 if (tg3_flag(tp
, CHIP_RESETTING
) ||
5523 (tr32(TG3PCI_PCISTATE
) & PCISTATE_INT_NOT_ACTIVE
)) {
5530 * Writing any value to intr-mbox-0 clears PCI INTA# and
5531 * chip-internal interrupt pending events.
5532 * Writing non-zero to intr-mbox-0 additional tells the
5533 * NIC to stop sending us irqs, engaging "in-intr-handler"
5536 * Flush the mailbox to de-assert the IRQ immediately to prevent
5537 * spurious interrupts. The flush impacts performance but
5538 * excessive spurious interrupts can be worse in some cases.
5540 tw32_mailbox_f(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
, 0x00000001);
5541 if (tg3_irq_sync(tp
))
5543 sblk
->status
&= ~SD_STATUS_UPDATED
;
5544 if (likely(tg3_has_work(tnapi
))) {
5545 prefetch(&tnapi
->rx_rcb
[tnapi
->rx_rcb_ptr
]);
5546 napi_schedule(&tnapi
->napi
);
5548 /* No work, shared interrupt perhaps? re-enable
5549 * interrupts, and flush that PCI write
5551 tw32_mailbox_f(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
,
5555 return IRQ_RETVAL(handled
);
5558 static irqreturn_t
tg3_interrupt_tagged(int irq
, void *dev_id
)
5560 struct tg3_napi
*tnapi
= dev_id
;
5561 struct tg3
*tp
= tnapi
->tp
;
5562 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
5563 unsigned int handled
= 1;
5565 /* In INTx mode, it is possible for the interrupt to arrive at
5566 * the CPU before the status block posted prior to the interrupt.
5567 * Reading the PCI State register will confirm whether the
5568 * interrupt is ours and will flush the status block.
5570 if (unlikely(sblk
->status_tag
== tnapi
->last_irq_tag
)) {
5571 if (tg3_flag(tp
, CHIP_RESETTING
) ||
5572 (tr32(TG3PCI_PCISTATE
) & PCISTATE_INT_NOT_ACTIVE
)) {
5579 * writing any value to intr-mbox-0 clears PCI INTA# and
5580 * chip-internal interrupt pending events.
5581 * writing non-zero to intr-mbox-0 additional tells the
5582 * NIC to stop sending us irqs, engaging "in-intr-handler"
5585 * Flush the mailbox to de-assert the IRQ immediately to prevent
5586 * spurious interrupts. The flush impacts performance but
5587 * excessive spurious interrupts can be worse in some cases.
5589 tw32_mailbox_f(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
, 0x00000001);
5592 * In a shared interrupt configuration, sometimes other devices'
5593 * interrupts will scream. We record the current status tag here
5594 * so that the above check can report that the screaming interrupts
5595 * are unhandled. Eventually they will be silenced.
5597 tnapi
->last_irq_tag
= sblk
->status_tag
;
5599 if (tg3_irq_sync(tp
))
5602 prefetch(&tnapi
->rx_rcb
[tnapi
->rx_rcb_ptr
]);
5604 napi_schedule(&tnapi
->napi
);
5607 return IRQ_RETVAL(handled
);
5610 /* ISR for interrupt test */
5611 static irqreturn_t
tg3_test_isr(int irq
, void *dev_id
)
5613 struct tg3_napi
*tnapi
= dev_id
;
5614 struct tg3
*tp
= tnapi
->tp
;
5615 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
5617 if ((sblk
->status
& SD_STATUS_UPDATED
) ||
5618 !(tr32(TG3PCI_PCISTATE
) & PCISTATE_INT_NOT_ACTIVE
)) {
5619 tg3_disable_ints(tp
);
5620 return IRQ_RETVAL(1);
5622 return IRQ_RETVAL(0);
5625 static int tg3_init_hw(struct tg3
*, int);
5626 static int tg3_halt(struct tg3
*, int, int);
5628 /* Restart hardware after configuration changes, self-test, etc.
5629 * Invoked with tp->lock held.
5631 static int tg3_restart_hw(struct tg3
*tp
, int reset_phy
)
5632 __releases(tp
->lock
)
5633 __acquires(tp
->lock
)
5637 err
= tg3_init_hw(tp
, reset_phy
);
5640 "Failed to re-initialize device, aborting\n");
5641 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
5642 tg3_full_unlock(tp
);
5643 del_timer_sync(&tp
->timer
);
5645 tg3_napi_enable(tp
);
5647 tg3_full_lock(tp
, 0);
5652 #ifdef CONFIG_NET_POLL_CONTROLLER
5653 static void tg3_poll_controller(struct net_device
*dev
)
5656 struct tg3
*tp
= netdev_priv(dev
);
5658 for (i
= 0; i
< tp
->irq_cnt
; i
++)
5659 tg3_interrupt(tp
->napi
[i
].irq_vec
, &tp
->napi
[i
]);
5663 static void tg3_reset_task(struct work_struct
*work
)
5665 struct tg3
*tp
= container_of(work
, struct tg3
, reset_task
);
5667 unsigned int restart_timer
;
5669 tg3_full_lock(tp
, 0);
5671 if (!netif_running(tp
->dev
)) {
5672 tg3_full_unlock(tp
);
5676 tg3_full_unlock(tp
);
5682 tg3_full_lock(tp
, 1);
5684 restart_timer
= tg3_flag(tp
, RESTART_TIMER
);
5685 tg3_flag_clear(tp
, RESTART_TIMER
);
5687 if (tg3_flag(tp
, TX_RECOVERY_PENDING
)) {
5688 tp
->write32_tx_mbox
= tg3_write32_tx_mbox
;
5689 tp
->write32_rx_mbox
= tg3_write_flush_reg32
;
5690 tg3_flag_set(tp
, MBOX_WRITE_REORDER
);
5691 tg3_flag_clear(tp
, TX_RECOVERY_PENDING
);
5694 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 0);
5695 err
= tg3_init_hw(tp
, 1);
5699 tg3_netif_start(tp
);
5702 mod_timer(&tp
->timer
, jiffies
+ 1);
5705 tg3_full_unlock(tp
);
5711 static void tg3_tx_timeout(struct net_device
*dev
)
5713 struct tg3
*tp
= netdev_priv(dev
);
5715 if (netif_msg_tx_err(tp
)) {
5716 netdev_err(dev
, "transmit timed out, resetting\n");
5720 schedule_work(&tp
->reset_task
);
5723 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
5724 static inline int tg3_4g_overflow_test(dma_addr_t mapping
, int len
)
5726 u32 base
= (u32
) mapping
& 0xffffffff;
5728 return (base
> 0xffffdcc0) && (base
+ len
+ 8 < base
);
5731 /* Test for DMA addresses > 40-bit */
5732 static inline int tg3_40bit_overflow_test(struct tg3
*tp
, dma_addr_t mapping
,
5735 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
5736 if (tg3_flag(tp
, 40BIT_DMA_BUG
))
5737 return ((u64
) mapping
+ len
) > DMA_BIT_MASK(40);
5744 static void tg3_set_txd(struct tg3_napi
*tnapi
, int entry
,
5745 dma_addr_t mapping
, int len
, u32 flags
,
5748 struct tg3_tx_buffer_desc
*txd
= &tnapi
->tx_ring
[entry
];
5749 int is_end
= (mss_and_is_end
& 0x1);
5750 u32 mss
= (mss_and_is_end
>> 1);
5754 flags
|= TXD_FLAG_END
;
5755 if (flags
& TXD_FLAG_VLAN
) {
5756 vlan_tag
= flags
>> 16;
5759 vlan_tag
|= (mss
<< TXD_MSS_SHIFT
);
5761 txd
->addr_hi
= ((u64
) mapping
>> 32);
5762 txd
->addr_lo
= ((u64
) mapping
& 0xffffffff);
5763 txd
->len_flags
= (len
<< TXD_LEN_SHIFT
) | flags
;
5764 txd
->vlan_tag
= vlan_tag
<< TXD_VLAN_TAG_SHIFT
;
5767 static void tg3_skb_error_unmap(struct tg3_napi
*tnapi
,
5768 struct sk_buff
*skb
, int last
)
5771 u32 entry
= tnapi
->tx_prod
;
5772 struct ring_info
*txb
= &tnapi
->tx_buffers
[entry
];
5774 pci_unmap_single(tnapi
->tp
->pdev
,
5775 dma_unmap_addr(txb
, mapping
),
5778 for (i
= 0; i
< last
; i
++) {
5779 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
5781 entry
= NEXT_TX(entry
);
5782 txb
= &tnapi
->tx_buffers
[entry
];
5784 pci_unmap_page(tnapi
->tp
->pdev
,
5785 dma_unmap_addr(txb
, mapping
),
5786 frag
->size
, PCI_DMA_TODEVICE
);
5790 /* Workaround 4GB and 40-bit hardware DMA bugs. */
5791 static int tigon3_dma_hwbug_workaround(struct tg3_napi
*tnapi
,
5792 struct sk_buff
*skb
,
5793 u32 base_flags
, u32 mss
)
5795 struct tg3
*tp
= tnapi
->tp
;
5796 struct sk_buff
*new_skb
;
5797 dma_addr_t new_addr
= 0;
5798 u32 entry
= tnapi
->tx_prod
;
5801 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5701
)
5802 new_skb
= skb_copy(skb
, GFP_ATOMIC
);
5804 int more_headroom
= 4 - ((unsigned long)skb
->data
& 3);
5806 new_skb
= skb_copy_expand(skb
,
5807 skb_headroom(skb
) + more_headroom
,
5808 skb_tailroom(skb
), GFP_ATOMIC
);
5814 /* New SKB is guaranteed to be linear. */
5815 new_addr
= pci_map_single(tp
->pdev
, new_skb
->data
, new_skb
->len
,
5817 /* Make sure the mapping succeeded */
5818 if (pci_dma_mapping_error(tp
->pdev
, new_addr
)) {
5820 dev_kfree_skb(new_skb
);
5822 /* Make sure new skb does not cross any 4G boundaries.
5823 * Drop the packet if it does.
5825 } else if (tg3_4g_overflow_test(new_addr
, new_skb
->len
)) {
5826 pci_unmap_single(tp
->pdev
, new_addr
, new_skb
->len
,
5829 dev_kfree_skb(new_skb
);
5831 tnapi
->tx_buffers
[entry
].skb
= new_skb
;
5832 dma_unmap_addr_set(&tnapi
->tx_buffers
[entry
],
5835 tg3_set_txd(tnapi
, entry
, new_addr
, new_skb
->len
,
5836 base_flags
, 1 | (mss
<< 1));
5845 static netdev_tx_t
tg3_start_xmit(struct sk_buff
*, struct net_device
*);
5847 /* Use GSO to workaround a rare TSO bug that may be triggered when the
5848 * TSO header is greater than 80 bytes.
5850 static int tg3_tso_bug(struct tg3
*tp
, struct sk_buff
*skb
)
5852 struct sk_buff
*segs
, *nskb
;
5853 u32 frag_cnt_est
= skb_shinfo(skb
)->gso_segs
* 3;
5855 /* Estimate the number of fragments in the worst case */
5856 if (unlikely(tg3_tx_avail(&tp
->napi
[0]) <= frag_cnt_est
)) {
5857 netif_stop_queue(tp
->dev
);
5859 /* netif_tx_stop_queue() must be done before checking
5860 * checking tx index in tg3_tx_avail() below, because in
5861 * tg3_tx(), we update tx index before checking for
5862 * netif_tx_queue_stopped().
5865 if (tg3_tx_avail(&tp
->napi
[0]) <= frag_cnt_est
)
5866 return NETDEV_TX_BUSY
;
5868 netif_wake_queue(tp
->dev
);
5871 segs
= skb_gso_segment(skb
, tp
->dev
->features
& ~NETIF_F_TSO
);
5873 goto tg3_tso_bug_end
;
5879 tg3_start_xmit(nskb
, tp
->dev
);
5885 return NETDEV_TX_OK
;
5888 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
5889 * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
5891 static netdev_tx_t
tg3_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
5893 struct tg3
*tp
= netdev_priv(dev
);
5894 u32 len
, entry
, base_flags
, mss
;
5895 int i
= -1, would_hit_hwbug
;
5897 struct tg3_napi
*tnapi
;
5898 struct netdev_queue
*txq
;
5901 txq
= netdev_get_tx_queue(dev
, skb_get_queue_mapping(skb
));
5902 tnapi
= &tp
->napi
[skb_get_queue_mapping(skb
)];
5903 if (tg3_flag(tp
, ENABLE_TSS
))
5906 /* We are running in BH disabled context with netif_tx_lock
5907 * and TX reclaim runs via tp->napi.poll inside of a software
5908 * interrupt. Furthermore, IRQ processing runs lockless so we have
5909 * no IRQ context deadlocks to worry about either. Rejoice!
5911 if (unlikely(tg3_tx_avail(tnapi
) <= (skb_shinfo(skb
)->nr_frags
+ 1))) {
5912 if (!netif_tx_queue_stopped(txq
)) {
5913 netif_tx_stop_queue(txq
);
5915 /* This is a hard error, log it. */
5917 "BUG! Tx Ring full when queue awake!\n");
5919 return NETDEV_TX_BUSY
;
5922 entry
= tnapi
->tx_prod
;
5924 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
5925 base_flags
|= TXD_FLAG_TCPUDP_CSUM
;
5927 mss
= skb_shinfo(skb
)->gso_size
;
5930 u32 tcp_opt_len
, hdr_len
;
5932 if (skb_header_cloned(skb
) &&
5933 pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
)) {
5939 tcp_opt_len
= tcp_optlen(skb
);
5941 if (skb_is_gso_v6(skb
)) {
5942 hdr_len
= skb_headlen(skb
) - ETH_HLEN
;
5946 ip_tcp_len
= ip_hdrlen(skb
) + sizeof(struct tcphdr
);
5947 hdr_len
= ip_tcp_len
+ tcp_opt_len
;
5950 iph
->tot_len
= htons(mss
+ hdr_len
);
5953 if (unlikely((ETH_HLEN
+ hdr_len
) > 80) &&
5954 tg3_flag(tp
, TSO_BUG
))
5955 return tg3_tso_bug(tp
, skb
);
5957 base_flags
|= (TXD_FLAG_CPU_PRE_DMA
|
5958 TXD_FLAG_CPU_POST_DMA
);
5960 if (tg3_flag(tp
, HW_TSO_1
) ||
5961 tg3_flag(tp
, HW_TSO_2
) ||
5962 tg3_flag(tp
, HW_TSO_3
)) {
5963 tcp_hdr(skb
)->check
= 0;
5964 base_flags
&= ~TXD_FLAG_TCPUDP_CSUM
;
5966 tcp_hdr(skb
)->check
= ~csum_tcpudp_magic(iph
->saddr
,
5971 if (tg3_flag(tp
, HW_TSO_3
)) {
5972 mss
|= (hdr_len
& 0xc) << 12;
5974 base_flags
|= 0x00000010;
5975 base_flags
|= (hdr_len
& 0x3e0) << 5;
5976 } else if (tg3_flag(tp
, HW_TSO_2
))
5977 mss
|= hdr_len
<< 9;
5978 else if (tg3_flag(tp
, HW_TSO_1
) ||
5979 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) {
5980 if (tcp_opt_len
|| iph
->ihl
> 5) {
5983 tsflags
= (iph
->ihl
- 5) + (tcp_opt_len
>> 2);
5984 mss
|= (tsflags
<< 11);
5987 if (tcp_opt_len
|| iph
->ihl
> 5) {
5990 tsflags
= (iph
->ihl
- 5) + (tcp_opt_len
>> 2);
5991 base_flags
|= tsflags
<< 12;
5996 if (vlan_tx_tag_present(skb
))
5997 base_flags
|= (TXD_FLAG_VLAN
|
5998 (vlan_tx_tag_get(skb
) << 16));
6000 if (tg3_flag(tp
, USE_JUMBO_BDFLAG
) &&
6001 !mss
&& skb
->len
> VLAN_ETH_FRAME_LEN
)
6002 base_flags
|= TXD_FLAG_JMB_PKT
;
6004 len
= skb_headlen(skb
);
6006 mapping
= pci_map_single(tp
->pdev
, skb
->data
, len
, PCI_DMA_TODEVICE
);
6007 if (pci_dma_mapping_error(tp
->pdev
, mapping
)) {
6012 tnapi
->tx_buffers
[entry
].skb
= skb
;
6013 dma_unmap_addr_set(&tnapi
->tx_buffers
[entry
], mapping
, mapping
);
6015 would_hit_hwbug
= 0;
6017 if (tg3_flag(tp
, SHORT_DMA_BUG
) && len
<= 8)
6018 would_hit_hwbug
= 1;
6020 if (tg3_4g_overflow_test(mapping
, len
))
6021 would_hit_hwbug
= 1;
6023 if (tg3_40bit_overflow_test(tp
, mapping
, len
))
6024 would_hit_hwbug
= 1;
6026 if (tg3_flag(tp
, 5701_DMA_BUG
))
6027 would_hit_hwbug
= 1;
6029 tg3_set_txd(tnapi
, entry
, mapping
, len
, base_flags
,
6030 (skb_shinfo(skb
)->nr_frags
== 0) | (mss
<< 1));
6032 entry
= NEXT_TX(entry
);
6034 /* Now loop through additional data fragments, and queue them. */
6035 if (skb_shinfo(skb
)->nr_frags
> 0) {
6036 last
= skb_shinfo(skb
)->nr_frags
- 1;
6037 for (i
= 0; i
<= last
; i
++) {
6038 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
6041 mapping
= pci_map_page(tp
->pdev
,
6044 len
, PCI_DMA_TODEVICE
);
6046 tnapi
->tx_buffers
[entry
].skb
= NULL
;
6047 dma_unmap_addr_set(&tnapi
->tx_buffers
[entry
], mapping
,
6049 if (pci_dma_mapping_error(tp
->pdev
, mapping
))
6052 if (tg3_flag(tp
, SHORT_DMA_BUG
) &&
6054 would_hit_hwbug
= 1;
6056 if (tg3_4g_overflow_test(mapping
, len
))
6057 would_hit_hwbug
= 1;
6059 if (tg3_40bit_overflow_test(tp
, mapping
, len
))
6060 would_hit_hwbug
= 1;
6062 if (tg3_flag(tp
, HW_TSO_1
) ||
6063 tg3_flag(tp
, HW_TSO_2
) ||
6064 tg3_flag(tp
, HW_TSO_3
))
6065 tg3_set_txd(tnapi
, entry
, mapping
, len
,
6066 base_flags
, (i
== last
)|(mss
<< 1));
6068 tg3_set_txd(tnapi
, entry
, mapping
, len
,
6069 base_flags
, (i
== last
));
6071 entry
= NEXT_TX(entry
);
6075 if (would_hit_hwbug
) {
6076 tg3_skb_error_unmap(tnapi
, skb
, i
);
6078 /* If the workaround fails due to memory/mapping
6079 * failure, silently drop this packet.
6081 if (tigon3_dma_hwbug_workaround(tnapi
, skb
, base_flags
, mss
))
6084 entry
= NEXT_TX(tnapi
->tx_prod
);
6087 /* Packets are ready, update Tx producer idx local and on card. */
6088 tw32_tx_mbox(tnapi
->prodmbox
, entry
);
6090 skb_tx_timestamp(skb
);
6092 tnapi
->tx_prod
= entry
;
6093 if (unlikely(tg3_tx_avail(tnapi
) <= (MAX_SKB_FRAGS
+ 1))) {
6094 netif_tx_stop_queue(txq
);
6096 /* netif_tx_stop_queue() must be done before checking
6097 * checking tx index in tg3_tx_avail() below, because in
6098 * tg3_tx(), we update tx index before checking for
6099 * netif_tx_queue_stopped().
6102 if (tg3_tx_avail(tnapi
) > TG3_TX_WAKEUP_THRESH(tnapi
))
6103 netif_tx_wake_queue(txq
);
6109 return NETDEV_TX_OK
;
6112 tg3_skb_error_unmap(tnapi
, skb
, i
);
6114 tnapi
->tx_buffers
[tnapi
->tx_prod
].skb
= NULL
;
6115 return NETDEV_TX_OK
;
6118 static void tg3_set_loopback(struct net_device
*dev
, u32 features
)
6120 struct tg3
*tp
= netdev_priv(dev
);
6122 if (features
& NETIF_F_LOOPBACK
) {
6123 if (tp
->mac_mode
& MAC_MODE_PORT_INT_LPBACK
)
6127 * Clear MAC_MODE_HALF_DUPLEX or you won't get packets back in
6128 * loopback mode if Half-Duplex mode was negotiated earlier.
6130 tp
->mac_mode
&= ~MAC_MODE_HALF_DUPLEX
;
6132 /* Enable internal MAC loopback mode */
6133 tp
->mac_mode
|= MAC_MODE_PORT_INT_LPBACK
;
6134 spin_lock_bh(&tp
->lock
);
6135 tw32(MAC_MODE
, tp
->mac_mode
);
6136 netif_carrier_on(tp
->dev
);
6137 spin_unlock_bh(&tp
->lock
);
6138 netdev_info(dev
, "Internal MAC loopback mode enabled.\n");
6140 if (!(tp
->mac_mode
& MAC_MODE_PORT_INT_LPBACK
))
6143 /* Disable internal MAC loopback mode */
6144 tp
->mac_mode
&= ~MAC_MODE_PORT_INT_LPBACK
;
6145 spin_lock_bh(&tp
->lock
);
6146 tw32(MAC_MODE
, tp
->mac_mode
);
6147 /* Force link status check */
6148 tg3_setup_phy(tp
, 1);
6149 spin_unlock_bh(&tp
->lock
);
6150 netdev_info(dev
, "Internal MAC loopback mode disabled.\n");
6154 static u32
tg3_fix_features(struct net_device
*dev
, u32 features
)
6156 struct tg3
*tp
= netdev_priv(dev
);
6158 if (dev
->mtu
> ETH_DATA_LEN
&& tg3_flag(tp
, 5780_CLASS
))
6159 features
&= ~NETIF_F_ALL_TSO
;
6164 static int tg3_set_features(struct net_device
*dev
, u32 features
)
6166 u32 changed
= dev
->features
^ features
;
6168 if ((changed
& NETIF_F_LOOPBACK
) && netif_running(dev
))
6169 tg3_set_loopback(dev
, features
);
6174 static inline void tg3_set_mtu(struct net_device
*dev
, struct tg3
*tp
,
6179 if (new_mtu
> ETH_DATA_LEN
) {
6180 if (tg3_flag(tp
, 5780_CLASS
)) {
6181 netdev_update_features(dev
);
6182 tg3_flag_clear(tp
, TSO_CAPABLE
);
6184 tg3_flag_set(tp
, JUMBO_RING_ENABLE
);
6187 if (tg3_flag(tp
, 5780_CLASS
)) {
6188 tg3_flag_set(tp
, TSO_CAPABLE
);
6189 netdev_update_features(dev
);
6191 tg3_flag_clear(tp
, JUMBO_RING_ENABLE
);
6195 static int tg3_change_mtu(struct net_device
*dev
, int new_mtu
)
6197 struct tg3
*tp
= netdev_priv(dev
);
6200 if (new_mtu
< TG3_MIN_MTU
|| new_mtu
> TG3_MAX_MTU(tp
))
6203 if (!netif_running(dev
)) {
6204 /* We'll just catch it later when the
6207 tg3_set_mtu(dev
, tp
, new_mtu
);
6215 tg3_full_lock(tp
, 1);
6217 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
6219 tg3_set_mtu(dev
, tp
, new_mtu
);
6221 err
= tg3_restart_hw(tp
, 0);
6224 tg3_netif_start(tp
);
6226 tg3_full_unlock(tp
);
6234 static void tg3_rx_prodring_free(struct tg3
*tp
,
6235 struct tg3_rx_prodring_set
*tpr
)
6239 if (tpr
!= &tp
->napi
[0].prodring
) {
6240 for (i
= tpr
->rx_std_cons_idx
; i
!= tpr
->rx_std_prod_idx
;
6241 i
= (i
+ 1) & tp
->rx_std_ring_mask
)
6242 tg3_rx_skb_free(tp
, &tpr
->rx_std_buffers
[i
],
6245 if (tg3_flag(tp
, JUMBO_CAPABLE
)) {
6246 for (i
= tpr
->rx_jmb_cons_idx
;
6247 i
!= tpr
->rx_jmb_prod_idx
;
6248 i
= (i
+ 1) & tp
->rx_jmb_ring_mask
) {
6249 tg3_rx_skb_free(tp
, &tpr
->rx_jmb_buffers
[i
],
6257 for (i
= 0; i
<= tp
->rx_std_ring_mask
; i
++)
6258 tg3_rx_skb_free(tp
, &tpr
->rx_std_buffers
[i
],
6261 if (tg3_flag(tp
, JUMBO_CAPABLE
) && !tg3_flag(tp
, 5780_CLASS
)) {
6262 for (i
= 0; i
<= tp
->rx_jmb_ring_mask
; i
++)
6263 tg3_rx_skb_free(tp
, &tpr
->rx_jmb_buffers
[i
],
6268 /* Initialize rx rings for packet processing.
6270 * The chip has been shut down and the driver detached from
6271 * the networking, so no interrupts or new tx packets will
6272 * end up in the driver. tp->{tx,}lock are held and thus
6275 static int tg3_rx_prodring_alloc(struct tg3
*tp
,
6276 struct tg3_rx_prodring_set
*tpr
)
6278 u32 i
, rx_pkt_dma_sz
;
6280 tpr
->rx_std_cons_idx
= 0;
6281 tpr
->rx_std_prod_idx
= 0;
6282 tpr
->rx_jmb_cons_idx
= 0;
6283 tpr
->rx_jmb_prod_idx
= 0;
6285 if (tpr
!= &tp
->napi
[0].prodring
) {
6286 memset(&tpr
->rx_std_buffers
[0], 0,
6287 TG3_RX_STD_BUFF_RING_SIZE(tp
));
6288 if (tpr
->rx_jmb_buffers
)
6289 memset(&tpr
->rx_jmb_buffers
[0], 0,
6290 TG3_RX_JMB_BUFF_RING_SIZE(tp
));
6294 /* Zero out all descriptors. */
6295 memset(tpr
->rx_std
, 0, TG3_RX_STD_RING_BYTES(tp
));
6297 rx_pkt_dma_sz
= TG3_RX_STD_DMA_SZ
;
6298 if (tg3_flag(tp
, 5780_CLASS
) &&
6299 tp
->dev
->mtu
> ETH_DATA_LEN
)
6300 rx_pkt_dma_sz
= TG3_RX_JMB_DMA_SZ
;
6301 tp
->rx_pkt_map_sz
= TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz
);
6303 /* Initialize invariants of the rings, we only set this
6304 * stuff once. This works because the card does not
6305 * write into the rx buffer posting rings.
6307 for (i
= 0; i
<= tp
->rx_std_ring_mask
; i
++) {
6308 struct tg3_rx_buffer_desc
*rxd
;
6310 rxd
= &tpr
->rx_std
[i
];
6311 rxd
->idx_len
= rx_pkt_dma_sz
<< RXD_LEN_SHIFT
;
6312 rxd
->type_flags
= (RXD_FLAG_END
<< RXD_FLAGS_SHIFT
);
6313 rxd
->opaque
= (RXD_OPAQUE_RING_STD
|
6314 (i
<< RXD_OPAQUE_INDEX_SHIFT
));
6317 /* Now allocate fresh SKBs for each rx ring. */
6318 for (i
= 0; i
< tp
->rx_pending
; i
++) {
6319 if (tg3_alloc_rx_skb(tp
, tpr
, RXD_OPAQUE_RING_STD
, i
) < 0) {
6320 netdev_warn(tp
->dev
,
6321 "Using a smaller RX standard ring. Only "
6322 "%d out of %d buffers were allocated "
6323 "successfully\n", i
, tp
->rx_pending
);
6331 if (!tg3_flag(tp
, JUMBO_CAPABLE
) || tg3_flag(tp
, 5780_CLASS
))
6334 memset(tpr
->rx_jmb
, 0, TG3_RX_JMB_RING_BYTES(tp
));
6336 if (!tg3_flag(tp
, JUMBO_RING_ENABLE
))
6339 for (i
= 0; i
<= tp
->rx_jmb_ring_mask
; i
++) {
6340 struct tg3_rx_buffer_desc
*rxd
;
6342 rxd
= &tpr
->rx_jmb
[i
].std
;
6343 rxd
->idx_len
= TG3_RX_JMB_DMA_SZ
<< RXD_LEN_SHIFT
;
6344 rxd
->type_flags
= (RXD_FLAG_END
<< RXD_FLAGS_SHIFT
) |
6346 rxd
->opaque
= (RXD_OPAQUE_RING_JUMBO
|
6347 (i
<< RXD_OPAQUE_INDEX_SHIFT
));
6350 for (i
= 0; i
< tp
->rx_jumbo_pending
; i
++) {
6351 if (tg3_alloc_rx_skb(tp
, tpr
, RXD_OPAQUE_RING_JUMBO
, i
) < 0) {
6352 netdev_warn(tp
->dev
,
6353 "Using a smaller RX jumbo ring. Only %d "
6354 "out of %d buffers were allocated "
6355 "successfully\n", i
, tp
->rx_jumbo_pending
);
6358 tp
->rx_jumbo_pending
= i
;
6367 tg3_rx_prodring_free(tp
, tpr
);
6371 static void tg3_rx_prodring_fini(struct tg3
*tp
,
6372 struct tg3_rx_prodring_set
*tpr
)
6374 kfree(tpr
->rx_std_buffers
);
6375 tpr
->rx_std_buffers
= NULL
;
6376 kfree(tpr
->rx_jmb_buffers
);
6377 tpr
->rx_jmb_buffers
= NULL
;
6379 dma_free_coherent(&tp
->pdev
->dev
, TG3_RX_STD_RING_BYTES(tp
),
6380 tpr
->rx_std
, tpr
->rx_std_mapping
);
6384 dma_free_coherent(&tp
->pdev
->dev
, TG3_RX_JMB_RING_BYTES(tp
),
6385 tpr
->rx_jmb
, tpr
->rx_jmb_mapping
);
6390 static int tg3_rx_prodring_init(struct tg3
*tp
,
6391 struct tg3_rx_prodring_set
*tpr
)
6393 tpr
->rx_std_buffers
= kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp
),
6395 if (!tpr
->rx_std_buffers
)
6398 tpr
->rx_std
= dma_alloc_coherent(&tp
->pdev
->dev
,
6399 TG3_RX_STD_RING_BYTES(tp
),
6400 &tpr
->rx_std_mapping
,
6405 if (tg3_flag(tp
, JUMBO_CAPABLE
) && !tg3_flag(tp
, 5780_CLASS
)) {
6406 tpr
->rx_jmb_buffers
= kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp
),
6408 if (!tpr
->rx_jmb_buffers
)
6411 tpr
->rx_jmb
= dma_alloc_coherent(&tp
->pdev
->dev
,
6412 TG3_RX_JMB_RING_BYTES(tp
),
6413 &tpr
->rx_jmb_mapping
,
6422 tg3_rx_prodring_fini(tp
, tpr
);
6426 /* Free up pending packets in all rx/tx rings.
6428 * The chip has been shut down and the driver detached from
6429 * the networking, so no interrupts or new tx packets will
6430 * end up in the driver. tp->{tx,}lock is not held and we are not
6431 * in an interrupt context and thus may sleep.
6433 static void tg3_free_rings(struct tg3
*tp
)
6437 for (j
= 0; j
< tp
->irq_cnt
; j
++) {
6438 struct tg3_napi
*tnapi
= &tp
->napi
[j
];
6440 tg3_rx_prodring_free(tp
, &tnapi
->prodring
);
6442 if (!tnapi
->tx_buffers
)
6445 for (i
= 0; i
< TG3_TX_RING_SIZE
; ) {
6446 struct ring_info
*txp
;
6447 struct sk_buff
*skb
;
6450 txp
= &tnapi
->tx_buffers
[i
];
6458 pci_unmap_single(tp
->pdev
,
6459 dma_unmap_addr(txp
, mapping
),
6466 for (k
= 0; k
< skb_shinfo(skb
)->nr_frags
; k
++) {
6467 txp
= &tnapi
->tx_buffers
[i
& (TG3_TX_RING_SIZE
- 1)];
6468 pci_unmap_page(tp
->pdev
,
6469 dma_unmap_addr(txp
, mapping
),
6470 skb_shinfo(skb
)->frags
[k
].size
,
6475 dev_kfree_skb_any(skb
);
6480 /* Initialize tx/rx rings for packet processing.
6482 * The chip has been shut down and the driver detached from
6483 * the networking, so no interrupts or new tx packets will
6484 * end up in the driver. tp->{tx,}lock are held and thus
6487 static int tg3_init_rings(struct tg3
*tp
)
6491 /* Free up all the SKBs. */
6494 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
6495 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
6497 tnapi
->last_tag
= 0;
6498 tnapi
->last_irq_tag
= 0;
6499 tnapi
->hw_status
->status
= 0;
6500 tnapi
->hw_status
->status_tag
= 0;
6501 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
6506 memset(tnapi
->tx_ring
, 0, TG3_TX_RING_BYTES
);
6508 tnapi
->rx_rcb_ptr
= 0;
6510 memset(tnapi
->rx_rcb
, 0, TG3_RX_RCB_RING_BYTES(tp
));
6512 if (tg3_rx_prodring_alloc(tp
, &tnapi
->prodring
)) {
6522 * Must not be invoked with interrupt sources disabled and
6523 * the hardware shutdown down.
6525 static void tg3_free_consistent(struct tg3
*tp
)
6529 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
6530 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
6532 if (tnapi
->tx_ring
) {
6533 dma_free_coherent(&tp
->pdev
->dev
, TG3_TX_RING_BYTES
,
6534 tnapi
->tx_ring
, tnapi
->tx_desc_mapping
);
6535 tnapi
->tx_ring
= NULL
;
6538 kfree(tnapi
->tx_buffers
);
6539 tnapi
->tx_buffers
= NULL
;
6541 if (tnapi
->rx_rcb
) {
6542 dma_free_coherent(&tp
->pdev
->dev
,
6543 TG3_RX_RCB_RING_BYTES(tp
),
6545 tnapi
->rx_rcb_mapping
);
6546 tnapi
->rx_rcb
= NULL
;
6549 tg3_rx_prodring_fini(tp
, &tnapi
->prodring
);
6551 if (tnapi
->hw_status
) {
6552 dma_free_coherent(&tp
->pdev
->dev
, TG3_HW_STATUS_SIZE
,
6554 tnapi
->status_mapping
);
6555 tnapi
->hw_status
= NULL
;
6560 dma_free_coherent(&tp
->pdev
->dev
, sizeof(struct tg3_hw_stats
),
6561 tp
->hw_stats
, tp
->stats_mapping
);
6562 tp
->hw_stats
= NULL
;
6567 * Must not be invoked with interrupt sources disabled and
6568 * the hardware shutdown down. Can sleep.
6570 static int tg3_alloc_consistent(struct tg3
*tp
)
6574 tp
->hw_stats
= dma_alloc_coherent(&tp
->pdev
->dev
,
6575 sizeof(struct tg3_hw_stats
),
6581 memset(tp
->hw_stats
, 0, sizeof(struct tg3_hw_stats
));
6583 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
6584 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
6585 struct tg3_hw_status
*sblk
;
6587 tnapi
->hw_status
= dma_alloc_coherent(&tp
->pdev
->dev
,
6589 &tnapi
->status_mapping
,
6591 if (!tnapi
->hw_status
)
6594 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
6595 sblk
= tnapi
->hw_status
;
6597 if (tg3_rx_prodring_init(tp
, &tnapi
->prodring
))
6600 /* If multivector TSS is enabled, vector 0 does not handle
6601 * tx interrupts. Don't allocate any resources for it.
6603 if ((!i
&& !tg3_flag(tp
, ENABLE_TSS
)) ||
6604 (i
&& tg3_flag(tp
, ENABLE_TSS
))) {
6605 tnapi
->tx_buffers
= kzalloc(sizeof(struct ring_info
) *
6608 if (!tnapi
->tx_buffers
)
6611 tnapi
->tx_ring
= dma_alloc_coherent(&tp
->pdev
->dev
,
6613 &tnapi
->tx_desc_mapping
,
6615 if (!tnapi
->tx_ring
)
6620 * When RSS is enabled, the status block format changes
6621 * slightly. The "rx_jumbo_consumer", "reserved",
6622 * and "rx_mini_consumer" members get mapped to the
6623 * other three rx return ring producer indexes.
6627 tnapi
->rx_rcb_prod_idx
= &sblk
->idx
[0].rx_producer
;
6630 tnapi
->rx_rcb_prod_idx
= &sblk
->rx_jumbo_consumer
;
6633 tnapi
->rx_rcb_prod_idx
= &sblk
->reserved
;
6636 tnapi
->rx_rcb_prod_idx
= &sblk
->rx_mini_consumer
;
6641 * If multivector RSS is enabled, vector 0 does not handle
6642 * rx or tx interrupts. Don't allocate any resources for it.
6644 if (!i
&& tg3_flag(tp
, ENABLE_RSS
))
6647 tnapi
->rx_rcb
= dma_alloc_coherent(&tp
->pdev
->dev
,
6648 TG3_RX_RCB_RING_BYTES(tp
),
6649 &tnapi
->rx_rcb_mapping
,
6654 memset(tnapi
->rx_rcb
, 0, TG3_RX_RCB_RING_BYTES(tp
));
6660 tg3_free_consistent(tp
);
6664 #define MAX_WAIT_CNT 1000
6666 /* To stop a block, clear the enable bit and poll till it
6667 * clears. tp->lock is held.
6669 static int tg3_stop_block(struct tg3
*tp
, unsigned long ofs
, u32 enable_bit
, int silent
)
6674 if (tg3_flag(tp
, 5705_PLUS
)) {
6681 /* We can't enable/disable these bits of the
6682 * 5705/5750, just say success.
6695 for (i
= 0; i
< MAX_WAIT_CNT
; i
++) {
6698 if ((val
& enable_bit
) == 0)
6702 if (i
== MAX_WAIT_CNT
&& !silent
) {
6703 dev_err(&tp
->pdev
->dev
,
6704 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
6712 /* tp->lock is held. */
6713 static int tg3_abort_hw(struct tg3
*tp
, int silent
)
6717 tg3_disable_ints(tp
);
6719 tp
->rx_mode
&= ~RX_MODE_ENABLE
;
6720 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
6723 err
= tg3_stop_block(tp
, RCVBDI_MODE
, RCVBDI_MODE_ENABLE
, silent
);
6724 err
|= tg3_stop_block(tp
, RCVLPC_MODE
, RCVLPC_MODE_ENABLE
, silent
);
6725 err
|= tg3_stop_block(tp
, RCVLSC_MODE
, RCVLSC_MODE_ENABLE
, silent
);
6726 err
|= tg3_stop_block(tp
, RCVDBDI_MODE
, RCVDBDI_MODE_ENABLE
, silent
);
6727 err
|= tg3_stop_block(tp
, RCVDCC_MODE
, RCVDCC_MODE_ENABLE
, silent
);
6728 err
|= tg3_stop_block(tp
, RCVCC_MODE
, RCVCC_MODE_ENABLE
, silent
);
6730 err
|= tg3_stop_block(tp
, SNDBDS_MODE
, SNDBDS_MODE_ENABLE
, silent
);
6731 err
|= tg3_stop_block(tp
, SNDBDI_MODE
, SNDBDI_MODE_ENABLE
, silent
);
6732 err
|= tg3_stop_block(tp
, SNDDATAI_MODE
, SNDDATAI_MODE_ENABLE
, silent
);
6733 err
|= tg3_stop_block(tp
, RDMAC_MODE
, RDMAC_MODE_ENABLE
, silent
);
6734 err
|= tg3_stop_block(tp
, SNDDATAC_MODE
, SNDDATAC_MODE_ENABLE
, silent
);
6735 err
|= tg3_stop_block(tp
, DMAC_MODE
, DMAC_MODE_ENABLE
, silent
);
6736 err
|= tg3_stop_block(tp
, SNDBDC_MODE
, SNDBDC_MODE_ENABLE
, silent
);
6738 tp
->mac_mode
&= ~MAC_MODE_TDE_ENABLE
;
6739 tw32_f(MAC_MODE
, tp
->mac_mode
);
6742 tp
->tx_mode
&= ~TX_MODE_ENABLE
;
6743 tw32_f(MAC_TX_MODE
, tp
->tx_mode
);
6745 for (i
= 0; i
< MAX_WAIT_CNT
; i
++) {
6747 if (!(tr32(MAC_TX_MODE
) & TX_MODE_ENABLE
))
6750 if (i
>= MAX_WAIT_CNT
) {
6751 dev_err(&tp
->pdev
->dev
,
6752 "%s timed out, TX_MODE_ENABLE will not clear "
6753 "MAC_TX_MODE=%08x\n", __func__
, tr32(MAC_TX_MODE
));
6757 err
|= tg3_stop_block(tp
, HOSTCC_MODE
, HOSTCC_MODE_ENABLE
, silent
);
6758 err
|= tg3_stop_block(tp
, WDMAC_MODE
, WDMAC_MODE_ENABLE
, silent
);
6759 err
|= tg3_stop_block(tp
, MBFREE_MODE
, MBFREE_MODE_ENABLE
, silent
);
6761 tw32(FTQ_RESET
, 0xffffffff);
6762 tw32(FTQ_RESET
, 0x00000000);
6764 err
|= tg3_stop_block(tp
, BUFMGR_MODE
, BUFMGR_MODE_ENABLE
, silent
);
6765 err
|= tg3_stop_block(tp
, MEMARB_MODE
, MEMARB_MODE_ENABLE
, silent
);
6767 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
6768 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
6769 if (tnapi
->hw_status
)
6770 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
6773 memset(tp
->hw_stats
, 0, sizeof(struct tg3_hw_stats
));
6778 static void tg3_ape_send_event(struct tg3
*tp
, u32 event
)
6783 /* NCSI does not support APE events */
6784 if (tg3_flag(tp
, APE_HAS_NCSI
))
6787 apedata
= tg3_ape_read32(tp
, TG3_APE_SEG_SIG
);
6788 if (apedata
!= APE_SEG_SIG_MAGIC
)
6791 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_STATUS
);
6792 if (!(apedata
& APE_FW_STATUS_READY
))
6795 /* Wait for up to 1 millisecond for APE to service previous event. */
6796 for (i
= 0; i
< 10; i
++) {
6797 if (tg3_ape_lock(tp
, TG3_APE_LOCK_MEM
))
6800 apedata
= tg3_ape_read32(tp
, TG3_APE_EVENT_STATUS
);
6802 if (!(apedata
& APE_EVENT_STATUS_EVENT_PENDING
))
6803 tg3_ape_write32(tp
, TG3_APE_EVENT_STATUS
,
6804 event
| APE_EVENT_STATUS_EVENT_PENDING
);
6806 tg3_ape_unlock(tp
, TG3_APE_LOCK_MEM
);
6808 if (!(apedata
& APE_EVENT_STATUS_EVENT_PENDING
))
6814 if (!(apedata
& APE_EVENT_STATUS_EVENT_PENDING
))
6815 tg3_ape_write32(tp
, TG3_APE_EVENT
, APE_EVENT_1
);
6818 static void tg3_ape_driver_state_change(struct tg3
*tp
, int kind
)
6823 if (!tg3_flag(tp
, ENABLE_APE
))
6827 case RESET_KIND_INIT
:
6828 tg3_ape_write32(tp
, TG3_APE_HOST_SEG_SIG
,
6829 APE_HOST_SEG_SIG_MAGIC
);
6830 tg3_ape_write32(tp
, TG3_APE_HOST_SEG_LEN
,
6831 APE_HOST_SEG_LEN_MAGIC
);
6832 apedata
= tg3_ape_read32(tp
, TG3_APE_HOST_INIT_COUNT
);
6833 tg3_ape_write32(tp
, TG3_APE_HOST_INIT_COUNT
, ++apedata
);
6834 tg3_ape_write32(tp
, TG3_APE_HOST_DRIVER_ID
,
6835 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM
, TG3_MIN_NUM
));
6836 tg3_ape_write32(tp
, TG3_APE_HOST_BEHAVIOR
,
6837 APE_HOST_BEHAV_NO_PHYLOCK
);
6838 tg3_ape_write32(tp
, TG3_APE_HOST_DRVR_STATE
,
6839 TG3_APE_HOST_DRVR_STATE_START
);
6841 event
= APE_EVENT_STATUS_STATE_START
;
6843 case RESET_KIND_SHUTDOWN
:
6844 /* With the interface we are currently using,
6845 * APE does not track driver state. Wiping
6846 * out the HOST SEGMENT SIGNATURE forces
6847 * the APE to assume OS absent status.
6849 tg3_ape_write32(tp
, TG3_APE_HOST_SEG_SIG
, 0x0);
6851 if (device_may_wakeup(&tp
->pdev
->dev
) &&
6852 tg3_flag(tp
, WOL_ENABLE
)) {
6853 tg3_ape_write32(tp
, TG3_APE_HOST_WOL_SPEED
,
6854 TG3_APE_HOST_WOL_SPEED_AUTO
);
6855 apedata
= TG3_APE_HOST_DRVR_STATE_WOL
;
6857 apedata
= TG3_APE_HOST_DRVR_STATE_UNLOAD
;
6859 tg3_ape_write32(tp
, TG3_APE_HOST_DRVR_STATE
, apedata
);
6861 event
= APE_EVENT_STATUS_STATE_UNLOAD
;
6863 case RESET_KIND_SUSPEND
:
6864 event
= APE_EVENT_STATUS_STATE_SUSPEND
;
6870 event
|= APE_EVENT_STATUS_DRIVER_EVNT
| APE_EVENT_STATUS_STATE_CHNGE
;
6872 tg3_ape_send_event(tp
, event
);
6875 /* tp->lock is held. */
6876 static void tg3_write_sig_pre_reset(struct tg3
*tp
, int kind
)
6878 tg3_write_mem(tp
, NIC_SRAM_FIRMWARE_MBOX
,
6879 NIC_SRAM_FIRMWARE_MBOX_MAGIC1
);
6881 if (tg3_flag(tp
, ASF_NEW_HANDSHAKE
)) {
6883 case RESET_KIND_INIT
:
6884 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
6888 case RESET_KIND_SHUTDOWN
:
6889 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
6893 case RESET_KIND_SUSPEND
:
6894 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
6903 if (kind
== RESET_KIND_INIT
||
6904 kind
== RESET_KIND_SUSPEND
)
6905 tg3_ape_driver_state_change(tp
, kind
);
6908 /* tp->lock is held. */
6909 static void tg3_write_sig_post_reset(struct tg3
*tp
, int kind
)
6911 if (tg3_flag(tp
, ASF_NEW_HANDSHAKE
)) {
6913 case RESET_KIND_INIT
:
6914 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
6915 DRV_STATE_START_DONE
);
6918 case RESET_KIND_SHUTDOWN
:
6919 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
6920 DRV_STATE_UNLOAD_DONE
);
6928 if (kind
== RESET_KIND_SHUTDOWN
)
6929 tg3_ape_driver_state_change(tp
, kind
);
6932 /* tp->lock is held. */
6933 static void tg3_write_sig_legacy(struct tg3
*tp
, int kind
)
6935 if (tg3_flag(tp
, ENABLE_ASF
)) {
6937 case RESET_KIND_INIT
:
6938 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
6942 case RESET_KIND_SHUTDOWN
:
6943 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
6947 case RESET_KIND_SUSPEND
:
6948 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
6958 static int tg3_poll_fw(struct tg3
*tp
)
6963 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
6964 /* Wait up to 20ms for init done. */
6965 for (i
= 0; i
< 200; i
++) {
6966 if (tr32(VCPU_STATUS
) & VCPU_STATUS_INIT_DONE
)
6973 /* Wait for firmware initialization to complete. */
6974 for (i
= 0; i
< 100000; i
++) {
6975 tg3_read_mem(tp
, NIC_SRAM_FIRMWARE_MBOX
, &val
);
6976 if (val
== ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1
)
6981 /* Chip might not be fitted with firmware. Some Sun onboard
6982 * parts are configured like that. So don't signal the timeout
6983 * of the above loop as an error, but do report the lack of
6984 * running firmware once.
6986 if (i
>= 100000 && !tg3_flag(tp
, NO_FWARE_REPORTED
)) {
6987 tg3_flag_set(tp
, NO_FWARE_REPORTED
);
6989 netdev_info(tp
->dev
, "No firmware running\n");
6992 if (tp
->pci_chip_rev_id
== CHIPREV_ID_57765_A0
) {
6993 /* The 57765 A0 needs a little more
6994 * time to do some important work.
7002 /* Save PCI command register before chip reset */
7003 static void tg3_save_pci_state(struct tg3
*tp
)
7005 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &tp
->pci_cmd
);
7008 /* Restore PCI state after chip reset */
7009 static void tg3_restore_pci_state(struct tg3
*tp
)
7013 /* Re-enable indirect register accesses. */
7014 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
7015 tp
->misc_host_ctrl
);
7017 /* Set MAX PCI retry to zero. */
7018 val
= (PCISTATE_ROM_ENABLE
| PCISTATE_ROM_RETRY_ENABLE
);
7019 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5704_A0
&&
7020 tg3_flag(tp
, PCIX_MODE
))
7021 val
|= PCISTATE_RETRY_SAME_DMA
;
7022 /* Allow reads and writes to the APE register and memory space. */
7023 if (tg3_flag(tp
, ENABLE_APE
))
7024 val
|= PCISTATE_ALLOW_APE_CTLSPC_WR
|
7025 PCISTATE_ALLOW_APE_SHMEM_WR
|
7026 PCISTATE_ALLOW_APE_PSPACE_WR
;
7027 pci_write_config_dword(tp
->pdev
, TG3PCI_PCISTATE
, val
);
7029 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, tp
->pci_cmd
);
7031 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5785
) {
7032 if (tg3_flag(tp
, PCI_EXPRESS
))
7033 pcie_set_readrq(tp
->pdev
, tp
->pcie_readrq
);
7035 pci_write_config_byte(tp
->pdev
, PCI_CACHE_LINE_SIZE
,
7036 tp
->pci_cacheline_sz
);
7037 pci_write_config_byte(tp
->pdev
, PCI_LATENCY_TIMER
,
7042 /* Make sure PCI-X relaxed ordering bit is clear. */
7043 if (tg3_flag(tp
, PCIX_MODE
)) {
7046 pci_read_config_word(tp
->pdev
, tp
->pcix_cap
+ PCI_X_CMD
,
7048 pcix_cmd
&= ~PCI_X_CMD_ERO
;
7049 pci_write_config_word(tp
->pdev
, tp
->pcix_cap
+ PCI_X_CMD
,
7053 if (tg3_flag(tp
, 5780_CLASS
)) {
7055 /* Chip reset on 5780 will reset MSI enable bit,
7056 * so need to restore it.
7058 if (tg3_flag(tp
, USING_MSI
)) {
7061 pci_read_config_word(tp
->pdev
,
7062 tp
->msi_cap
+ PCI_MSI_FLAGS
,
7064 pci_write_config_word(tp
->pdev
,
7065 tp
->msi_cap
+ PCI_MSI_FLAGS
,
7066 ctrl
| PCI_MSI_FLAGS_ENABLE
);
7067 val
= tr32(MSGINT_MODE
);
7068 tw32(MSGINT_MODE
, val
| MSGINT_MODE_ENABLE
);
7073 static void tg3_stop_fw(struct tg3
*);
7075 /* tp->lock is held. */
7076 static int tg3_chip_reset(struct tg3
*tp
)
7079 void (*write_op
)(struct tg3
*, u32
, u32
);
7084 tg3_ape_lock(tp
, TG3_APE_LOCK_GRC
);
7086 /* No matching tg3_nvram_unlock() after this because
7087 * chip reset below will undo the nvram lock.
7089 tp
->nvram_lock_cnt
= 0;
7091 /* GRC_MISC_CFG core clock reset will clear the memory
7092 * enable bit in PCI register 4 and the MSI enable bit
7093 * on some chips, so we save relevant registers here.
7095 tg3_save_pci_state(tp
);
7097 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5752
||
7098 tg3_flag(tp
, 5755_PLUS
))
7099 tw32(GRC_FASTBOOT_PC
, 0);
7102 * We must avoid the readl() that normally takes place.
7103 * It locks machines, causes machine checks, and other
7104 * fun things. So, temporarily disable the 5701
7105 * hardware workaround, while we do the reset.
7107 write_op
= tp
->write32
;
7108 if (write_op
== tg3_write_flush_reg32
)
7109 tp
->write32
= tg3_write32
;
7111 /* Prevent the irq handler from reading or writing PCI registers
7112 * during chip reset when the memory enable bit in the PCI command
7113 * register may be cleared. The chip does not generate interrupt
7114 * at this time, but the irq handler may still be called due to irq
7115 * sharing or irqpoll.
7117 tg3_flag_set(tp
, CHIP_RESETTING
);
7118 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
7119 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
7120 if (tnapi
->hw_status
) {
7121 tnapi
->hw_status
->status
= 0;
7122 tnapi
->hw_status
->status_tag
= 0;
7124 tnapi
->last_tag
= 0;
7125 tnapi
->last_irq_tag
= 0;
7129 for (i
= 0; i
< tp
->irq_cnt
; i
++)
7130 synchronize_irq(tp
->napi
[i
].irq_vec
);
7132 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
) {
7133 val
= tr32(TG3_PCIE_LNKCTL
) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN
;
7134 tw32(TG3_PCIE_LNKCTL
, val
| TG3_PCIE_LNKCTL_L1_PLL_PD_DIS
);
7138 val
= GRC_MISC_CFG_CORECLK_RESET
;
7140 if (tg3_flag(tp
, PCI_EXPRESS
)) {
7141 /* Force PCIe 1.0a mode */
7142 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5785
&&
7143 !tg3_flag(tp
, 57765_PLUS
) &&
7144 tr32(TG3_PCIE_PHY_TSTCTL
) ==
7145 (TG3_PCIE_PHY_TSTCTL_PCIE10
| TG3_PCIE_PHY_TSTCTL_PSCRAM
))
7146 tw32(TG3_PCIE_PHY_TSTCTL
, TG3_PCIE_PHY_TSTCTL_PSCRAM
);
7148 if (tp
->pci_chip_rev_id
!= CHIPREV_ID_5750_A0
) {
7149 tw32(GRC_MISC_CFG
, (1 << 29));
7154 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
7155 tw32(VCPU_STATUS
, tr32(VCPU_STATUS
) | VCPU_STATUS_DRV_RESET
);
7156 tw32(GRC_VCPU_EXT_CTRL
,
7157 tr32(GRC_VCPU_EXT_CTRL
) & ~GRC_VCPU_EXT_CTRL_HALT_CPU
);
7160 /* Manage gphy power for all CPMU absent PCIe devices. */
7161 if (tg3_flag(tp
, 5705_PLUS
) && !tg3_flag(tp
, CPMU_PRESENT
))
7162 val
|= GRC_MISC_CFG_KEEP_GPHY_POWER
;
7164 tw32(GRC_MISC_CFG
, val
);
7166 /* restore 5701 hardware bug workaround write method */
7167 tp
->write32
= write_op
;
7169 /* Unfortunately, we have to delay before the PCI read back.
7170 * Some 575X chips even will not respond to a PCI cfg access
7171 * when the reset command is given to the chip.
7173 * How do these hardware designers expect things to work
7174 * properly if the PCI write is posted for a long period
7175 * of time? It is always necessary to have some method by
7176 * which a register read back can occur to push the write
7177 * out which does the reset.
7179 * For most tg3 variants the trick below was working.
7184 /* Flush PCI posted writes. The normal MMIO registers
7185 * are inaccessible at this time so this is the only
7186 * way to make this reliably (actually, this is no longer
7187 * the case, see above). I tried to use indirect
7188 * register read/write but this upset some 5701 variants.
7190 pci_read_config_dword(tp
->pdev
, PCI_COMMAND
, &val
);
7194 if (tg3_flag(tp
, PCI_EXPRESS
) && tp
->pcie_cap
) {
7197 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5750_A0
) {
7201 /* Wait for link training to complete. */
7202 for (i
= 0; i
< 5000; i
++)
7205 pci_read_config_dword(tp
->pdev
, 0xc4, &cfg_val
);
7206 pci_write_config_dword(tp
->pdev
, 0xc4,
7207 cfg_val
| (1 << 15));
7210 /* Clear the "no snoop" and "relaxed ordering" bits. */
7211 pci_read_config_word(tp
->pdev
,
7212 tp
->pcie_cap
+ PCI_EXP_DEVCTL
,
7214 val16
&= ~(PCI_EXP_DEVCTL_RELAX_EN
|
7215 PCI_EXP_DEVCTL_NOSNOOP_EN
);
7217 * Older PCIe devices only support the 128 byte
7218 * MPS setting. Enforce the restriction.
7220 if (!tg3_flag(tp
, CPMU_PRESENT
))
7221 val16
&= ~PCI_EXP_DEVCTL_PAYLOAD
;
7222 pci_write_config_word(tp
->pdev
,
7223 tp
->pcie_cap
+ PCI_EXP_DEVCTL
,
7226 pcie_set_readrq(tp
->pdev
, tp
->pcie_readrq
);
7228 /* Clear error status */
7229 pci_write_config_word(tp
->pdev
,
7230 tp
->pcie_cap
+ PCI_EXP_DEVSTA
,
7231 PCI_EXP_DEVSTA_CED
|
7232 PCI_EXP_DEVSTA_NFED
|
7233 PCI_EXP_DEVSTA_FED
|
7234 PCI_EXP_DEVSTA_URD
);
7237 tg3_restore_pci_state(tp
);
7239 tg3_flag_clear(tp
, CHIP_RESETTING
);
7240 tg3_flag_clear(tp
, ERROR_PROCESSED
);
7243 if (tg3_flag(tp
, 5780_CLASS
))
7244 val
= tr32(MEMARB_MODE
);
7245 tw32(MEMARB_MODE
, val
| MEMARB_MODE_ENABLE
);
7247 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5750_A3
) {
7249 tw32(0x5000, 0x400);
7252 tw32(GRC_MODE
, tp
->grc_mode
);
7254 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5705_A0
) {
7257 tw32(0xc4, val
| (1 << 15));
7260 if ((tp
->nic_sram_data_cfg
& NIC_SRAM_DATA_CFG_MINI_PCI
) != 0 &&
7261 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) {
7262 tp
->pci_clock_ctrl
|= CLOCK_CTRL_CLKRUN_OENABLE
;
7263 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5705_A0
)
7264 tp
->pci_clock_ctrl
|= CLOCK_CTRL_FORCE_CLKRUN
;
7265 tw32(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
);
7268 if (tg3_flag(tp
, ENABLE_APE
))
7269 tp
->mac_mode
= MAC_MODE_APE_TX_EN
|
7270 MAC_MODE_APE_RX_EN
|
7271 MAC_MODE_TDE_ENABLE
;
7273 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
7274 tp
->mac_mode
|= MAC_MODE_PORT_MODE_TBI
;
7276 } else if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) {
7277 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
7282 tw32_f(MAC_MODE
, val
);
7285 tg3_ape_unlock(tp
, TG3_APE_LOCK_GRC
);
7287 err
= tg3_poll_fw(tp
);
7293 if (tg3_flag(tp
, PCI_EXPRESS
) &&
7294 tp
->pci_chip_rev_id
!= CHIPREV_ID_5750_A0
&&
7295 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5785
&&
7296 !tg3_flag(tp
, 57765_PLUS
)) {
7299 tw32(0x7c00, val
| (1 << 25));
7302 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
) {
7303 val
= tr32(TG3_CPMU_CLCK_ORIDE
);
7304 tw32(TG3_CPMU_CLCK_ORIDE
, val
& ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN
);
7307 /* Reprobe ASF enable state. */
7308 tg3_flag_clear(tp
, ENABLE_ASF
);
7309 tg3_flag_clear(tp
, ASF_NEW_HANDSHAKE
);
7310 tg3_read_mem(tp
, NIC_SRAM_DATA_SIG
, &val
);
7311 if (val
== NIC_SRAM_DATA_SIG_MAGIC
) {
7314 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG
, &nic_cfg
);
7315 if (nic_cfg
& NIC_SRAM_DATA_CFG_ASF_ENABLE
) {
7316 tg3_flag_set(tp
, ENABLE_ASF
);
7317 tp
->last_event_jiffies
= jiffies
;
7318 if (tg3_flag(tp
, 5750_PLUS
))
7319 tg3_flag_set(tp
, ASF_NEW_HANDSHAKE
);
7326 /* tp->lock is held. */
7327 static void tg3_stop_fw(struct tg3
*tp
)
7329 if (tg3_flag(tp
, ENABLE_ASF
) && !tg3_flag(tp
, ENABLE_APE
)) {
7330 /* Wait for RX cpu to ACK the previous event. */
7331 tg3_wait_for_event_ack(tp
);
7333 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_MBOX
, FWCMD_NICDRV_PAUSE_FW
);
7335 tg3_generate_fw_event(tp
);
7337 /* Wait for RX cpu to ACK this event. */
7338 tg3_wait_for_event_ack(tp
);
7342 /* tp->lock is held. */
7343 static int tg3_halt(struct tg3
*tp
, int kind
, int silent
)
7349 tg3_write_sig_pre_reset(tp
, kind
);
7351 tg3_abort_hw(tp
, silent
);
7352 err
= tg3_chip_reset(tp
);
7354 __tg3_set_mac_addr(tp
, 0);
7356 tg3_write_sig_legacy(tp
, kind
);
7357 tg3_write_sig_post_reset(tp
, kind
);
7365 #define RX_CPU_SCRATCH_BASE 0x30000
7366 #define RX_CPU_SCRATCH_SIZE 0x04000
7367 #define TX_CPU_SCRATCH_BASE 0x34000
7368 #define TX_CPU_SCRATCH_SIZE 0x04000
7370 /* tp->lock is held. */
7371 static int tg3_halt_cpu(struct tg3
*tp
, u32 offset
)
7375 BUG_ON(offset
== TX_CPU_BASE
&& tg3_flag(tp
, 5705_PLUS
));
7377 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
7378 u32 val
= tr32(GRC_VCPU_EXT_CTRL
);
7380 tw32(GRC_VCPU_EXT_CTRL
, val
| GRC_VCPU_EXT_CTRL_HALT_CPU
);
7383 if (offset
== RX_CPU_BASE
) {
7384 for (i
= 0; i
< 10000; i
++) {
7385 tw32(offset
+ CPU_STATE
, 0xffffffff);
7386 tw32(offset
+ CPU_MODE
, CPU_MODE_HALT
);
7387 if (tr32(offset
+ CPU_MODE
) & CPU_MODE_HALT
)
7391 tw32(offset
+ CPU_STATE
, 0xffffffff);
7392 tw32_f(offset
+ CPU_MODE
, CPU_MODE_HALT
);
7395 for (i
= 0; i
< 10000; i
++) {
7396 tw32(offset
+ CPU_STATE
, 0xffffffff);
7397 tw32(offset
+ CPU_MODE
, CPU_MODE_HALT
);
7398 if (tr32(offset
+ CPU_MODE
) & CPU_MODE_HALT
)
7404 netdev_err(tp
->dev
, "%s timed out, %s CPU\n",
7405 __func__
, offset
== RX_CPU_BASE
? "RX" : "TX");
7409 /* Clear firmware's nvram arbitration. */
7410 if (tg3_flag(tp
, NVRAM
))
7411 tw32(NVRAM_SWARB
, SWARB_REQ_CLR0
);
7416 unsigned int fw_base
;
7417 unsigned int fw_len
;
7418 const __be32
*fw_data
;
7421 /* tp->lock is held. */
7422 static int tg3_load_firmware_cpu(struct tg3
*tp
, u32 cpu_base
, u32 cpu_scratch_base
,
7423 int cpu_scratch_size
, struct fw_info
*info
)
7425 int err
, lock_err
, i
;
7426 void (*write_op
)(struct tg3
*, u32
, u32
);
7428 if (cpu_base
== TX_CPU_BASE
&& tg3_flag(tp
, 5705_PLUS
)) {
7430 "%s: Trying to load TX cpu firmware which is 5705\n",
7435 if (tg3_flag(tp
, 5705_PLUS
))
7436 write_op
= tg3_write_mem
;
7438 write_op
= tg3_write_indirect_reg32
;
7440 /* It is possible that bootcode is still loading at this point.
7441 * Get the nvram lock first before halting the cpu.
7443 lock_err
= tg3_nvram_lock(tp
);
7444 err
= tg3_halt_cpu(tp
, cpu_base
);
7446 tg3_nvram_unlock(tp
);
7450 for (i
= 0; i
< cpu_scratch_size
; i
+= sizeof(u32
))
7451 write_op(tp
, cpu_scratch_base
+ i
, 0);
7452 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
7453 tw32(cpu_base
+ CPU_MODE
, tr32(cpu_base
+CPU_MODE
)|CPU_MODE_HALT
);
7454 for (i
= 0; i
< (info
->fw_len
/ sizeof(u32
)); i
++)
7455 write_op(tp
, (cpu_scratch_base
+
7456 (info
->fw_base
& 0xffff) +
7458 be32_to_cpu(info
->fw_data
[i
]));
7466 /* tp->lock is held. */
7467 static int tg3_load_5701_a0_firmware_fix(struct tg3
*tp
)
7469 struct fw_info info
;
7470 const __be32
*fw_data
;
7473 fw_data
= (void *)tp
->fw
->data
;
7475 /* Firmware blob starts with version numbers, followed by
7476 start address and length. We are setting complete length.
7477 length = end_address_of_bss - start_address_of_text.
7478 Remainder is the blob to be loaded contiguously
7479 from start address. */
7481 info
.fw_base
= be32_to_cpu(fw_data
[1]);
7482 info
.fw_len
= tp
->fw
->size
- 12;
7483 info
.fw_data
= &fw_data
[3];
7485 err
= tg3_load_firmware_cpu(tp
, RX_CPU_BASE
,
7486 RX_CPU_SCRATCH_BASE
, RX_CPU_SCRATCH_SIZE
,
7491 err
= tg3_load_firmware_cpu(tp
, TX_CPU_BASE
,
7492 TX_CPU_SCRATCH_BASE
, TX_CPU_SCRATCH_SIZE
,
7497 /* Now startup only the RX cpu. */
7498 tw32(RX_CPU_BASE
+ CPU_STATE
, 0xffffffff);
7499 tw32_f(RX_CPU_BASE
+ CPU_PC
, info
.fw_base
);
7501 for (i
= 0; i
< 5; i
++) {
7502 if (tr32(RX_CPU_BASE
+ CPU_PC
) == info
.fw_base
)
7504 tw32(RX_CPU_BASE
+ CPU_STATE
, 0xffffffff);
7505 tw32(RX_CPU_BASE
+ CPU_MODE
, CPU_MODE_HALT
);
7506 tw32_f(RX_CPU_BASE
+ CPU_PC
, info
.fw_base
);
7510 netdev_err(tp
->dev
, "%s fails to set RX CPU PC, is %08x "
7511 "should be %08x\n", __func__
,
7512 tr32(RX_CPU_BASE
+ CPU_PC
), info
.fw_base
);
7515 tw32(RX_CPU_BASE
+ CPU_STATE
, 0xffffffff);
7516 tw32_f(RX_CPU_BASE
+ CPU_MODE
, 0x00000000);
7521 /* tp->lock is held. */
7522 static int tg3_load_tso_firmware(struct tg3
*tp
)
7524 struct fw_info info
;
7525 const __be32
*fw_data
;
7526 unsigned long cpu_base
, cpu_scratch_base
, cpu_scratch_size
;
7529 if (tg3_flag(tp
, HW_TSO_1
) ||
7530 tg3_flag(tp
, HW_TSO_2
) ||
7531 tg3_flag(tp
, HW_TSO_3
))
7534 fw_data
= (void *)tp
->fw
->data
;
7536 /* Firmware blob starts with version numbers, followed by
7537 start address and length. We are setting complete length.
7538 length = end_address_of_bss - start_address_of_text.
7539 Remainder is the blob to be loaded contiguously
7540 from start address. */
7542 info
.fw_base
= be32_to_cpu(fw_data
[1]);
7543 cpu_scratch_size
= tp
->fw_len
;
7544 info
.fw_len
= tp
->fw
->size
- 12;
7545 info
.fw_data
= &fw_data
[3];
7547 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) {
7548 cpu_base
= RX_CPU_BASE
;
7549 cpu_scratch_base
= NIC_SRAM_MBUF_POOL_BASE5705
;
7551 cpu_base
= TX_CPU_BASE
;
7552 cpu_scratch_base
= TX_CPU_SCRATCH_BASE
;
7553 cpu_scratch_size
= TX_CPU_SCRATCH_SIZE
;
7556 err
= tg3_load_firmware_cpu(tp
, cpu_base
,
7557 cpu_scratch_base
, cpu_scratch_size
,
7562 /* Now startup the cpu. */
7563 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
7564 tw32_f(cpu_base
+ CPU_PC
, info
.fw_base
);
7566 for (i
= 0; i
< 5; i
++) {
7567 if (tr32(cpu_base
+ CPU_PC
) == info
.fw_base
)
7569 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
7570 tw32(cpu_base
+ CPU_MODE
, CPU_MODE_HALT
);
7571 tw32_f(cpu_base
+ CPU_PC
, info
.fw_base
);
7576 "%s fails to set CPU PC, is %08x should be %08x\n",
7577 __func__
, tr32(cpu_base
+ CPU_PC
), info
.fw_base
);
7580 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
7581 tw32_f(cpu_base
+ CPU_MODE
, 0x00000000);
7586 static int tg3_set_mac_addr(struct net_device
*dev
, void *p
)
7588 struct tg3
*tp
= netdev_priv(dev
);
7589 struct sockaddr
*addr
= p
;
7590 int err
= 0, skip_mac_1
= 0;
7592 if (!is_valid_ether_addr(addr
->sa_data
))
7595 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
7597 if (!netif_running(dev
))
7600 if (tg3_flag(tp
, ENABLE_ASF
)) {
7601 u32 addr0_high
, addr0_low
, addr1_high
, addr1_low
;
7603 addr0_high
= tr32(MAC_ADDR_0_HIGH
);
7604 addr0_low
= tr32(MAC_ADDR_0_LOW
);
7605 addr1_high
= tr32(MAC_ADDR_1_HIGH
);
7606 addr1_low
= tr32(MAC_ADDR_1_LOW
);
7608 /* Skip MAC addr 1 if ASF is using it. */
7609 if ((addr0_high
!= addr1_high
|| addr0_low
!= addr1_low
) &&
7610 !(addr1_high
== 0 && addr1_low
== 0))
7613 spin_lock_bh(&tp
->lock
);
7614 __tg3_set_mac_addr(tp
, skip_mac_1
);
7615 spin_unlock_bh(&tp
->lock
);
7620 /* tp->lock is held. */
7621 static void tg3_set_bdinfo(struct tg3
*tp
, u32 bdinfo_addr
,
7622 dma_addr_t mapping
, u32 maxlen_flags
,
7626 (bdinfo_addr
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_HIGH
),
7627 ((u64
) mapping
>> 32));
7629 (bdinfo_addr
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_LOW
),
7630 ((u64
) mapping
& 0xffffffff));
7632 (bdinfo_addr
+ TG3_BDINFO_MAXLEN_FLAGS
),
7635 if (!tg3_flag(tp
, 5705_PLUS
))
7637 (bdinfo_addr
+ TG3_BDINFO_NIC_ADDR
),
7641 static void __tg3_set_rx_mode(struct net_device
*);
7642 static void __tg3_set_coalesce(struct tg3
*tp
, struct ethtool_coalesce
*ec
)
7646 if (!tg3_flag(tp
, ENABLE_TSS
)) {
7647 tw32(HOSTCC_TXCOL_TICKS
, ec
->tx_coalesce_usecs
);
7648 tw32(HOSTCC_TXMAX_FRAMES
, ec
->tx_max_coalesced_frames
);
7649 tw32(HOSTCC_TXCOAL_MAXF_INT
, ec
->tx_max_coalesced_frames_irq
);
7651 tw32(HOSTCC_TXCOL_TICKS
, 0);
7652 tw32(HOSTCC_TXMAX_FRAMES
, 0);
7653 tw32(HOSTCC_TXCOAL_MAXF_INT
, 0);
7656 if (!tg3_flag(tp
, ENABLE_RSS
)) {
7657 tw32(HOSTCC_RXCOL_TICKS
, ec
->rx_coalesce_usecs
);
7658 tw32(HOSTCC_RXMAX_FRAMES
, ec
->rx_max_coalesced_frames
);
7659 tw32(HOSTCC_RXCOAL_MAXF_INT
, ec
->rx_max_coalesced_frames_irq
);
7661 tw32(HOSTCC_RXCOL_TICKS
, 0);
7662 tw32(HOSTCC_RXMAX_FRAMES
, 0);
7663 tw32(HOSTCC_RXCOAL_MAXF_INT
, 0);
7666 if (!tg3_flag(tp
, 5705_PLUS
)) {
7667 u32 val
= ec
->stats_block_coalesce_usecs
;
7669 tw32(HOSTCC_RXCOAL_TICK_INT
, ec
->rx_coalesce_usecs_irq
);
7670 tw32(HOSTCC_TXCOAL_TICK_INT
, ec
->tx_coalesce_usecs_irq
);
7672 if (!netif_carrier_ok(tp
->dev
))
7675 tw32(HOSTCC_STAT_COAL_TICKS
, val
);
7678 for (i
= 0; i
< tp
->irq_cnt
- 1; i
++) {
7681 reg
= HOSTCC_RXCOL_TICKS_VEC1
+ i
* 0x18;
7682 tw32(reg
, ec
->rx_coalesce_usecs
);
7683 reg
= HOSTCC_RXMAX_FRAMES_VEC1
+ i
* 0x18;
7684 tw32(reg
, ec
->rx_max_coalesced_frames
);
7685 reg
= HOSTCC_RXCOAL_MAXF_INT_VEC1
+ i
* 0x18;
7686 tw32(reg
, ec
->rx_max_coalesced_frames_irq
);
7688 if (tg3_flag(tp
, ENABLE_TSS
)) {
7689 reg
= HOSTCC_TXCOL_TICKS_VEC1
+ i
* 0x18;
7690 tw32(reg
, ec
->tx_coalesce_usecs
);
7691 reg
= HOSTCC_TXMAX_FRAMES_VEC1
+ i
* 0x18;
7692 tw32(reg
, ec
->tx_max_coalesced_frames
);
7693 reg
= HOSTCC_TXCOAL_MAXF_INT_VEC1
+ i
* 0x18;
7694 tw32(reg
, ec
->tx_max_coalesced_frames_irq
);
7698 for (; i
< tp
->irq_max
- 1; i
++) {
7699 tw32(HOSTCC_RXCOL_TICKS_VEC1
+ i
* 0x18, 0);
7700 tw32(HOSTCC_RXMAX_FRAMES_VEC1
+ i
* 0x18, 0);
7701 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1
+ i
* 0x18, 0);
7703 if (tg3_flag(tp
, ENABLE_TSS
)) {
7704 tw32(HOSTCC_TXCOL_TICKS_VEC1
+ i
* 0x18, 0);
7705 tw32(HOSTCC_TXMAX_FRAMES_VEC1
+ i
* 0x18, 0);
7706 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1
+ i
* 0x18, 0);
7711 /* tp->lock is held. */
7712 static void tg3_rings_reset(struct tg3
*tp
)
7715 u32 stblk
, txrcb
, rxrcb
, limit
;
7716 struct tg3_napi
*tnapi
= &tp
->napi
[0];
7718 /* Disable all transmit rings but the first. */
7719 if (!tg3_flag(tp
, 5705_PLUS
))
7720 limit
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
* 16;
7721 else if (tg3_flag(tp
, 5717_PLUS
))
7722 limit
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
* 4;
7723 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
)
7724 limit
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
* 2;
7726 limit
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
;
7728 for (txrcb
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
;
7729 txrcb
< limit
; txrcb
+= TG3_BDINFO_SIZE
)
7730 tg3_write_mem(tp
, txrcb
+ TG3_BDINFO_MAXLEN_FLAGS
,
7731 BDINFO_FLAGS_DISABLED
);
7734 /* Disable all receive return rings but the first. */
7735 if (tg3_flag(tp
, 5717_PLUS
))
7736 limit
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
* 17;
7737 else if (!tg3_flag(tp
, 5705_PLUS
))
7738 limit
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
* 16;
7739 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
||
7740 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
)
7741 limit
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
* 4;
7743 limit
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
;
7745 for (rxrcb
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
;
7746 rxrcb
< limit
; rxrcb
+= TG3_BDINFO_SIZE
)
7747 tg3_write_mem(tp
, rxrcb
+ TG3_BDINFO_MAXLEN_FLAGS
,
7748 BDINFO_FLAGS_DISABLED
);
7750 /* Disable interrupts */
7751 tw32_mailbox_f(tp
->napi
[0].int_mbox
, 1);
7752 tp
->napi
[0].chk_msi_cnt
= 0;
7753 tp
->napi
[0].last_rx_cons
= 0;
7754 tp
->napi
[0].last_tx_cons
= 0;
7756 /* Zero mailbox registers. */
7757 if (tg3_flag(tp
, SUPPORT_MSIX
)) {
7758 for (i
= 1; i
< tp
->irq_max
; i
++) {
7759 tp
->napi
[i
].tx_prod
= 0;
7760 tp
->napi
[i
].tx_cons
= 0;
7761 if (tg3_flag(tp
, ENABLE_TSS
))
7762 tw32_mailbox(tp
->napi
[i
].prodmbox
, 0);
7763 tw32_rx_mbox(tp
->napi
[i
].consmbox
, 0);
7764 tw32_mailbox_f(tp
->napi
[i
].int_mbox
, 1);
7765 tp
->napi
[0].chk_msi_cnt
= 0;
7766 tp
->napi
[i
].last_rx_cons
= 0;
7767 tp
->napi
[i
].last_tx_cons
= 0;
7769 if (!tg3_flag(tp
, ENABLE_TSS
))
7770 tw32_mailbox(tp
->napi
[0].prodmbox
, 0);
7772 tp
->napi
[0].tx_prod
= 0;
7773 tp
->napi
[0].tx_cons
= 0;
7774 tw32_mailbox(tp
->napi
[0].prodmbox
, 0);
7775 tw32_rx_mbox(tp
->napi
[0].consmbox
, 0);
7778 /* Make sure the NIC-based send BD rings are disabled. */
7779 if (!tg3_flag(tp
, 5705_PLUS
)) {
7780 u32 mbox
= MAILBOX_SNDNIC_PROD_IDX_0
+ TG3_64BIT_REG_LOW
;
7781 for (i
= 0; i
< 16; i
++)
7782 tw32_tx_mbox(mbox
+ i
* 8, 0);
7785 txrcb
= NIC_SRAM_SEND_RCB
;
7786 rxrcb
= NIC_SRAM_RCV_RET_RCB
;
7788 /* Clear status block in ram. */
7789 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
7791 /* Set status block DMA address */
7792 tw32(HOSTCC_STATUS_BLK_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
7793 ((u64
) tnapi
->status_mapping
>> 32));
7794 tw32(HOSTCC_STATUS_BLK_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
7795 ((u64
) tnapi
->status_mapping
& 0xffffffff));
7797 if (tnapi
->tx_ring
) {
7798 tg3_set_bdinfo(tp
, txrcb
, tnapi
->tx_desc_mapping
,
7799 (TG3_TX_RING_SIZE
<<
7800 BDINFO_FLAGS_MAXLEN_SHIFT
),
7801 NIC_SRAM_TX_BUFFER_DESC
);
7802 txrcb
+= TG3_BDINFO_SIZE
;
7805 if (tnapi
->rx_rcb
) {
7806 tg3_set_bdinfo(tp
, rxrcb
, tnapi
->rx_rcb_mapping
,
7807 (tp
->rx_ret_ring_mask
+ 1) <<
7808 BDINFO_FLAGS_MAXLEN_SHIFT
, 0);
7809 rxrcb
+= TG3_BDINFO_SIZE
;
7812 stblk
= HOSTCC_STATBLCK_RING1
;
7814 for (i
= 1, tnapi
++; i
< tp
->irq_cnt
; i
++, tnapi
++) {
7815 u64 mapping
= (u64
)tnapi
->status_mapping
;
7816 tw32(stblk
+ TG3_64BIT_REG_HIGH
, mapping
>> 32);
7817 tw32(stblk
+ TG3_64BIT_REG_LOW
, mapping
& 0xffffffff);
7819 /* Clear status block in ram. */
7820 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
7822 if (tnapi
->tx_ring
) {
7823 tg3_set_bdinfo(tp
, txrcb
, tnapi
->tx_desc_mapping
,
7824 (TG3_TX_RING_SIZE
<<
7825 BDINFO_FLAGS_MAXLEN_SHIFT
),
7826 NIC_SRAM_TX_BUFFER_DESC
);
7827 txrcb
+= TG3_BDINFO_SIZE
;
7830 tg3_set_bdinfo(tp
, rxrcb
, tnapi
->rx_rcb_mapping
,
7831 ((tp
->rx_ret_ring_mask
+ 1) <<
7832 BDINFO_FLAGS_MAXLEN_SHIFT
), 0);
7835 rxrcb
+= TG3_BDINFO_SIZE
;
7839 static void tg3_setup_rxbd_thresholds(struct tg3
*tp
)
7841 u32 val
, bdcache_maxcnt
, host_rep_thresh
, nic_rep_thresh
;
7843 if (!tg3_flag(tp
, 5750_PLUS
) ||
7844 tg3_flag(tp
, 5780_CLASS
) ||
7845 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5750
||
7846 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5752
)
7847 bdcache_maxcnt
= TG3_SRAM_RX_STD_BDCACHE_SIZE_5700
;
7848 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
||
7849 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5787
)
7850 bdcache_maxcnt
= TG3_SRAM_RX_STD_BDCACHE_SIZE_5755
;
7852 bdcache_maxcnt
= TG3_SRAM_RX_STD_BDCACHE_SIZE_5906
;
7854 nic_rep_thresh
= min(bdcache_maxcnt
/ 2, tp
->rx_std_max_post
);
7855 host_rep_thresh
= max_t(u32
, tp
->rx_pending
/ 8, 1);
7857 val
= min(nic_rep_thresh
, host_rep_thresh
);
7858 tw32(RCVBDI_STD_THRESH
, val
);
7860 if (tg3_flag(tp
, 57765_PLUS
))
7861 tw32(STD_REPLENISH_LWM
, bdcache_maxcnt
);
7863 if (!tg3_flag(tp
, JUMBO_CAPABLE
) || tg3_flag(tp
, 5780_CLASS
))
7866 if (!tg3_flag(tp
, 5705_PLUS
))
7867 bdcache_maxcnt
= TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700
;
7869 bdcache_maxcnt
= TG3_SRAM_RX_JMB_BDCACHE_SIZE_5717
;
7871 host_rep_thresh
= max_t(u32
, tp
->rx_jumbo_pending
/ 8, 1);
7873 val
= min(bdcache_maxcnt
/ 2, host_rep_thresh
);
7874 tw32(RCVBDI_JUMBO_THRESH
, val
);
7876 if (tg3_flag(tp
, 57765_PLUS
))
7877 tw32(JMB_REPLENISH_LWM
, bdcache_maxcnt
);
7880 /* tp->lock is held. */
7881 static int tg3_reset_hw(struct tg3
*tp
, int reset_phy
)
7883 u32 val
, rdmac_mode
;
7885 struct tg3_rx_prodring_set
*tpr
= &tp
->napi
[0].prodring
;
7887 tg3_disable_ints(tp
);
7891 tg3_write_sig_pre_reset(tp
, RESET_KIND_INIT
);
7893 if (tg3_flag(tp
, INIT_COMPLETE
))
7894 tg3_abort_hw(tp
, 1);
7896 /* Enable MAC control of LPI */
7897 if (tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
) {
7898 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL
,
7899 TG3_CPMU_EEE_LNKIDL_PCIE_NL0
|
7900 TG3_CPMU_EEE_LNKIDL_UART_IDL
);
7902 tw32_f(TG3_CPMU_EEE_CTRL
,
7903 TG3_CPMU_EEE_CTRL_EXIT_20_1_US
);
7905 val
= TG3_CPMU_EEEMD_ERLY_L1_XIT_DET
|
7906 TG3_CPMU_EEEMD_LPI_IN_TX
|
7907 TG3_CPMU_EEEMD_LPI_IN_RX
|
7908 TG3_CPMU_EEEMD_EEE_ENABLE
;
7910 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5717
)
7911 val
|= TG3_CPMU_EEEMD_SND_IDX_DET_EN
;
7913 if (tg3_flag(tp
, ENABLE_APE
))
7914 val
|= TG3_CPMU_EEEMD_APE_TX_DET_EN
;
7916 tw32_f(TG3_CPMU_EEE_MODE
, val
);
7918 tw32_f(TG3_CPMU_EEE_DBTMR1
,
7919 TG3_CPMU_DBTMR1_PCIEXIT_2047US
|
7920 TG3_CPMU_DBTMR1_LNKIDLE_2047US
);
7922 tw32_f(TG3_CPMU_EEE_DBTMR2
,
7923 TG3_CPMU_DBTMR2_APE_TX_2047US
|
7924 TG3_CPMU_DBTMR2_TXIDXEQ_2047US
);
7930 err
= tg3_chip_reset(tp
);
7934 tg3_write_sig_legacy(tp
, RESET_KIND_INIT
);
7936 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5784_AX
) {
7937 val
= tr32(TG3_CPMU_CTRL
);
7938 val
&= ~(CPMU_CTRL_LINK_AWARE_MODE
| CPMU_CTRL_LINK_IDLE_MODE
);
7939 tw32(TG3_CPMU_CTRL
, val
);
7941 val
= tr32(TG3_CPMU_LSPD_10MB_CLK
);
7942 val
&= ~CPMU_LSPD_10MB_MACCLK_MASK
;
7943 val
|= CPMU_LSPD_10MB_MACCLK_6_25
;
7944 tw32(TG3_CPMU_LSPD_10MB_CLK
, val
);
7946 val
= tr32(TG3_CPMU_LNK_AWARE_PWRMD
);
7947 val
&= ~CPMU_LNK_AWARE_MACCLK_MASK
;
7948 val
|= CPMU_LNK_AWARE_MACCLK_6_25
;
7949 tw32(TG3_CPMU_LNK_AWARE_PWRMD
, val
);
7951 val
= tr32(TG3_CPMU_HST_ACC
);
7952 val
&= ~CPMU_HST_ACC_MACCLK_MASK
;
7953 val
|= CPMU_HST_ACC_MACCLK_6_25
;
7954 tw32(TG3_CPMU_HST_ACC
, val
);
7957 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
) {
7958 val
= tr32(PCIE_PWR_MGMT_THRESH
) & ~PCIE_PWR_MGMT_L1_THRESH_MSK
;
7959 val
|= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN
|
7960 PCIE_PWR_MGMT_L1_THRESH_4MS
;
7961 tw32(PCIE_PWR_MGMT_THRESH
, val
);
7963 val
= tr32(TG3_PCIE_EIDLE_DELAY
) & ~TG3_PCIE_EIDLE_DELAY_MASK
;
7964 tw32(TG3_PCIE_EIDLE_DELAY
, val
| TG3_PCIE_EIDLE_DELAY_13_CLKS
);
7966 tw32(TG3_CORR_ERR_STAT
, TG3_CORR_ERR_STAT_CLEAR
);
7968 val
= tr32(TG3_PCIE_LNKCTL
) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN
;
7969 tw32(TG3_PCIE_LNKCTL
, val
| TG3_PCIE_LNKCTL_L1_PLL_PD_DIS
);
7972 if (tg3_flag(tp
, L1PLLPD_EN
)) {
7973 u32 grc_mode
= tr32(GRC_MODE
);
7975 /* Access the lower 1K of PL PCIE block registers. */
7976 val
= grc_mode
& ~GRC_MODE_PCIE_PORT_MASK
;
7977 tw32(GRC_MODE
, val
| GRC_MODE_PCIE_PL_SEL
);
7979 val
= tr32(TG3_PCIE_TLDLPL_PORT
+ TG3_PCIE_PL_LO_PHYCTL1
);
7980 tw32(TG3_PCIE_TLDLPL_PORT
+ TG3_PCIE_PL_LO_PHYCTL1
,
7981 val
| TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN
);
7983 tw32(GRC_MODE
, grc_mode
);
7986 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
) {
7987 if (tp
->pci_chip_rev_id
== CHIPREV_ID_57765_A0
) {
7988 u32 grc_mode
= tr32(GRC_MODE
);
7990 /* Access the lower 1K of PL PCIE block registers. */
7991 val
= grc_mode
& ~GRC_MODE_PCIE_PORT_MASK
;
7992 tw32(GRC_MODE
, val
| GRC_MODE_PCIE_PL_SEL
);
7994 val
= tr32(TG3_PCIE_TLDLPL_PORT
+
7995 TG3_PCIE_PL_LO_PHYCTL5
);
7996 tw32(TG3_PCIE_TLDLPL_PORT
+ TG3_PCIE_PL_LO_PHYCTL5
,
7997 val
| TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ
);
7999 tw32(GRC_MODE
, grc_mode
);
8002 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) != CHIPREV_57765_AX
) {
8003 u32 grc_mode
= tr32(GRC_MODE
);
8005 /* Access the lower 1K of DL PCIE block registers. */
8006 val
= grc_mode
& ~GRC_MODE_PCIE_PORT_MASK
;
8007 tw32(GRC_MODE
, val
| GRC_MODE_PCIE_DL_SEL
);
8009 val
= tr32(TG3_PCIE_TLDLPL_PORT
+
8010 TG3_PCIE_DL_LO_FTSMAX
);
8011 val
&= ~TG3_PCIE_DL_LO_FTSMAX_MSK
;
8012 tw32(TG3_PCIE_TLDLPL_PORT
+ TG3_PCIE_DL_LO_FTSMAX
,
8013 val
| TG3_PCIE_DL_LO_FTSMAX_VAL
);
8015 tw32(GRC_MODE
, grc_mode
);
8018 val
= tr32(TG3_CPMU_LSPD_10MB_CLK
);
8019 val
&= ~CPMU_LSPD_10MB_MACCLK_MASK
;
8020 val
|= CPMU_LSPD_10MB_MACCLK_6_25
;
8021 tw32(TG3_CPMU_LSPD_10MB_CLK
, val
);
8024 /* This works around an issue with Athlon chipsets on
8025 * B3 tigon3 silicon. This bit has no effect on any
8026 * other revision. But do not set this on PCI Express
8027 * chips and don't even touch the clocks if the CPMU is present.
8029 if (!tg3_flag(tp
, CPMU_PRESENT
)) {
8030 if (!tg3_flag(tp
, PCI_EXPRESS
))
8031 tp
->pci_clock_ctrl
|= CLOCK_CTRL_DELAY_PCI_GRANT
;
8032 tw32_f(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
);
8035 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5704_A0
&&
8036 tg3_flag(tp
, PCIX_MODE
)) {
8037 val
= tr32(TG3PCI_PCISTATE
);
8038 val
|= PCISTATE_RETRY_SAME_DMA
;
8039 tw32(TG3PCI_PCISTATE
, val
);
8042 if (tg3_flag(tp
, ENABLE_APE
)) {
8043 /* Allow reads and writes to the
8044 * APE register and memory space.
8046 val
= tr32(TG3PCI_PCISTATE
);
8047 val
|= PCISTATE_ALLOW_APE_CTLSPC_WR
|
8048 PCISTATE_ALLOW_APE_SHMEM_WR
|
8049 PCISTATE_ALLOW_APE_PSPACE_WR
;
8050 tw32(TG3PCI_PCISTATE
, val
);
8053 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5704_BX
) {
8054 /* Enable some hw fixes. */
8055 val
= tr32(TG3PCI_MSI_DATA
);
8056 val
|= (1 << 26) | (1 << 28) | (1 << 29);
8057 tw32(TG3PCI_MSI_DATA
, val
);
8060 /* Descriptor ring init may make accesses to the
8061 * NIC SRAM area to setup the TX descriptors, so we
8062 * can only do this after the hardware has been
8063 * successfully reset.
8065 err
= tg3_init_rings(tp
);
8069 if (tg3_flag(tp
, 57765_PLUS
)) {
8070 val
= tr32(TG3PCI_DMA_RW_CTRL
) &
8071 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT
;
8072 if (tp
->pci_chip_rev_id
== CHIPREV_ID_57765_A0
)
8073 val
&= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK
;
8074 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_57765
&&
8075 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5717
)
8076 val
|= DMA_RWCTRL_TAGGED_STAT_WA
;
8077 tw32(TG3PCI_DMA_RW_CTRL
, val
| tp
->dma_rwctrl
);
8078 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5784
&&
8079 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5761
) {
8080 /* This value is determined during the probe time DMA
8081 * engine test, tg3_test_dma.
8083 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
8086 tp
->grc_mode
&= ~(GRC_MODE_HOST_SENDBDS
|
8087 GRC_MODE_4X_NIC_SEND_RINGS
|
8088 GRC_MODE_NO_TX_PHDR_CSUM
|
8089 GRC_MODE_NO_RX_PHDR_CSUM
);
8090 tp
->grc_mode
|= GRC_MODE_HOST_SENDBDS
;
8092 /* Pseudo-header checksum is done by hardware logic and not
8093 * the offload processers, so make the chip do the pseudo-
8094 * header checksums on receive. For transmit it is more
8095 * convenient to do the pseudo-header checksum in software
8096 * as Linux does that on transmit for us in all cases.
8098 tp
->grc_mode
|= GRC_MODE_NO_TX_PHDR_CSUM
;
8102 (GRC_MODE_IRQ_ON_MAC_ATTN
| GRC_MODE_HOST_STACKUP
));
8104 /* Setup the timer prescalar register. Clock is always 66Mhz. */
8105 val
= tr32(GRC_MISC_CFG
);
8107 val
|= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT
);
8108 tw32(GRC_MISC_CFG
, val
);
8110 /* Initialize MBUF/DESC pool. */
8111 if (tg3_flag(tp
, 5750_PLUS
)) {
8113 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5705
) {
8114 tw32(BUFMGR_MB_POOL_ADDR
, NIC_SRAM_MBUF_POOL_BASE
);
8115 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
)
8116 tw32(BUFMGR_MB_POOL_SIZE
, NIC_SRAM_MBUF_POOL_SIZE64
);
8118 tw32(BUFMGR_MB_POOL_SIZE
, NIC_SRAM_MBUF_POOL_SIZE96
);
8119 tw32(BUFMGR_DMA_DESC_POOL_ADDR
, NIC_SRAM_DMA_DESC_POOL_BASE
);
8120 tw32(BUFMGR_DMA_DESC_POOL_SIZE
, NIC_SRAM_DMA_DESC_POOL_SIZE
);
8121 } else if (tg3_flag(tp
, TSO_CAPABLE
)) {
8124 fw_len
= tp
->fw_len
;
8125 fw_len
= (fw_len
+ (0x80 - 1)) & ~(0x80 - 1);
8126 tw32(BUFMGR_MB_POOL_ADDR
,
8127 NIC_SRAM_MBUF_POOL_BASE5705
+ fw_len
);
8128 tw32(BUFMGR_MB_POOL_SIZE
,
8129 NIC_SRAM_MBUF_POOL_SIZE5705
- fw_len
- 0xa00);
8132 if (tp
->dev
->mtu
<= ETH_DATA_LEN
) {
8133 tw32(BUFMGR_MB_RDMA_LOW_WATER
,
8134 tp
->bufmgr_config
.mbuf_read_dma_low_water
);
8135 tw32(BUFMGR_MB_MACRX_LOW_WATER
,
8136 tp
->bufmgr_config
.mbuf_mac_rx_low_water
);
8137 tw32(BUFMGR_MB_HIGH_WATER
,
8138 tp
->bufmgr_config
.mbuf_high_water
);
8140 tw32(BUFMGR_MB_RDMA_LOW_WATER
,
8141 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
);
8142 tw32(BUFMGR_MB_MACRX_LOW_WATER
,
8143 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
);
8144 tw32(BUFMGR_MB_HIGH_WATER
,
8145 tp
->bufmgr_config
.mbuf_high_water_jumbo
);
8147 tw32(BUFMGR_DMA_LOW_WATER
,
8148 tp
->bufmgr_config
.dma_low_water
);
8149 tw32(BUFMGR_DMA_HIGH_WATER
,
8150 tp
->bufmgr_config
.dma_high_water
);
8152 val
= BUFMGR_MODE_ENABLE
| BUFMGR_MODE_ATTN_ENABLE
;
8153 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
)
8154 val
|= BUFMGR_MODE_NO_TX_UNDERRUN
;
8155 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
||
8156 tp
->pci_chip_rev_id
== CHIPREV_ID_5719_A0
||
8157 tp
->pci_chip_rev_id
== CHIPREV_ID_5720_A0
)
8158 val
|= BUFMGR_MODE_MBLOW_ATTN_ENAB
;
8159 tw32(BUFMGR_MODE
, val
);
8160 for (i
= 0; i
< 2000; i
++) {
8161 if (tr32(BUFMGR_MODE
) & BUFMGR_MODE_ENABLE
)
8166 netdev_err(tp
->dev
, "%s cannot enable BUFMGR\n", __func__
);
8170 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5906_A1
)
8171 tw32(ISO_PKT_TX
, (tr32(ISO_PKT_TX
) & ~0x3) | 0x2);
8173 tg3_setup_rxbd_thresholds(tp
);
8175 /* Initialize TG3_BDINFO's at:
8176 * RCVDBDI_STD_BD: standard eth size rx ring
8177 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
8178 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
8181 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
8182 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
8183 * ring attribute flags
8184 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
8186 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
8187 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
8189 * The size of each ring is fixed in the firmware, but the location is
8192 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
8193 ((u64
) tpr
->rx_std_mapping
>> 32));
8194 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
8195 ((u64
) tpr
->rx_std_mapping
& 0xffffffff));
8196 if (!tg3_flag(tp
, 5717_PLUS
))
8197 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_NIC_ADDR
,
8198 NIC_SRAM_RX_BUFFER_DESC
);
8200 /* Disable the mini ring */
8201 if (!tg3_flag(tp
, 5705_PLUS
))
8202 tw32(RCVDBDI_MINI_BD
+ TG3_BDINFO_MAXLEN_FLAGS
,
8203 BDINFO_FLAGS_DISABLED
);
8205 /* Program the jumbo buffer descriptor ring control
8206 * blocks on those devices that have them.
8208 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
||
8209 (tg3_flag(tp
, JUMBO_CAPABLE
) && !tg3_flag(tp
, 5780_CLASS
))) {
8211 if (tg3_flag(tp
, JUMBO_RING_ENABLE
)) {
8212 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
8213 ((u64
) tpr
->rx_jmb_mapping
>> 32));
8214 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
8215 ((u64
) tpr
->rx_jmb_mapping
& 0xffffffff));
8216 val
= TG3_RX_JMB_RING_SIZE(tp
) <<
8217 BDINFO_FLAGS_MAXLEN_SHIFT
;
8218 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_MAXLEN_FLAGS
,
8219 val
| BDINFO_FLAGS_USE_EXT_RECV
);
8220 if (!tg3_flag(tp
, USE_JUMBO_BDFLAG
) ||
8221 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
)
8222 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_NIC_ADDR
,
8223 NIC_SRAM_RX_JUMBO_BUFFER_DESC
);
8225 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_MAXLEN_FLAGS
,
8226 BDINFO_FLAGS_DISABLED
);
8229 if (tg3_flag(tp
, 57765_PLUS
)) {
8230 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
)
8231 val
= TG3_RX_STD_MAX_SIZE_5700
;
8233 val
= TG3_RX_STD_MAX_SIZE_5717
;
8234 val
<<= BDINFO_FLAGS_MAXLEN_SHIFT
;
8235 val
|= (TG3_RX_STD_DMA_SZ
<< 2);
8237 val
= TG3_RX_STD_DMA_SZ
<< BDINFO_FLAGS_MAXLEN_SHIFT
;
8239 val
= TG3_RX_STD_MAX_SIZE_5700
<< BDINFO_FLAGS_MAXLEN_SHIFT
;
8241 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_MAXLEN_FLAGS
, val
);
8243 tpr
->rx_std_prod_idx
= tp
->rx_pending
;
8244 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG
, tpr
->rx_std_prod_idx
);
8246 tpr
->rx_jmb_prod_idx
=
8247 tg3_flag(tp
, JUMBO_RING_ENABLE
) ? tp
->rx_jumbo_pending
: 0;
8248 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG
, tpr
->rx_jmb_prod_idx
);
8250 tg3_rings_reset(tp
);
8252 /* Initialize MAC address and backoff seed. */
8253 __tg3_set_mac_addr(tp
, 0);
8255 /* MTU + ethernet header + FCS + optional VLAN tag */
8256 tw32(MAC_RX_MTU_SIZE
,
8257 tp
->dev
->mtu
+ ETH_HLEN
+ ETH_FCS_LEN
+ VLAN_HLEN
);
8259 /* The slot time is changed by tg3_setup_phy if we
8260 * run at gigabit with half duplex.
8262 val
= (2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
8263 (6 << TX_LENGTHS_IPG_SHIFT
) |
8264 (32 << TX_LENGTHS_SLOT_TIME_SHIFT
);
8266 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
)
8267 val
|= tr32(MAC_TX_LENGTHS
) &
8268 (TX_LENGTHS_JMB_FRM_LEN_MSK
|
8269 TX_LENGTHS_CNT_DWN_VAL_MSK
);
8271 tw32(MAC_TX_LENGTHS
, val
);
8273 /* Receive rules. */
8274 tw32(MAC_RCV_RULE_CFG
, RCV_RULE_CFG_DEFAULT_CLASS
);
8275 tw32(RCVLPC_CONFIG
, 0x0181);
8277 /* Calculate RDMAC_MODE setting early, we need it to determine
8278 * the RCVLPC_STATE_ENABLE mask.
8280 rdmac_mode
= (RDMAC_MODE_ENABLE
| RDMAC_MODE_TGTABORT_ENAB
|
8281 RDMAC_MODE_MSTABORT_ENAB
| RDMAC_MODE_PARITYERR_ENAB
|
8282 RDMAC_MODE_ADDROFLOW_ENAB
| RDMAC_MODE_FIFOOFLOW_ENAB
|
8283 RDMAC_MODE_FIFOURUN_ENAB
| RDMAC_MODE_FIFOOREAD_ENAB
|
8284 RDMAC_MODE_LNGREAD_ENAB
);
8286 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
)
8287 rdmac_mode
|= RDMAC_MODE_MULT_DMA_RD_DIS
;
8289 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
||
8290 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
||
8291 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
)
8292 rdmac_mode
|= RDMAC_MODE_BD_SBD_CRPT_ENAB
|
8293 RDMAC_MODE_MBUF_RBD_CRPT_ENAB
|
8294 RDMAC_MODE_MBUF_SBD_CRPT_ENAB
;
8296 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
&&
8297 tp
->pci_chip_rev_id
!= CHIPREV_ID_5705_A0
) {
8298 if (tg3_flag(tp
, TSO_CAPABLE
) &&
8299 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) {
8300 rdmac_mode
|= RDMAC_MODE_FIFO_SIZE_128
;
8301 } else if (!(tr32(TG3PCI_PCISTATE
) & PCISTATE_BUS_SPEED_HIGH
) &&
8302 !tg3_flag(tp
, IS_5788
)) {
8303 rdmac_mode
|= RDMAC_MODE_FIFO_LONG_BURST
;
8307 if (tg3_flag(tp
, PCI_EXPRESS
))
8308 rdmac_mode
|= RDMAC_MODE_FIFO_LONG_BURST
;
8310 if (tg3_flag(tp
, HW_TSO_1
) ||
8311 tg3_flag(tp
, HW_TSO_2
) ||
8312 tg3_flag(tp
, HW_TSO_3
))
8313 rdmac_mode
|= RDMAC_MODE_IPV4_LSO_EN
;
8315 if (tg3_flag(tp
, 57765_PLUS
) ||
8316 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
||
8317 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
)
8318 rdmac_mode
|= RDMAC_MODE_IPV6_LSO_EN
;
8320 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
)
8321 rdmac_mode
|= tr32(RDMAC_MODE
) & RDMAC_MODE_H2BNC_VLAN_DET
;
8323 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
||
8324 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
||
8325 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
||
8326 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
||
8327 tg3_flag(tp
, 57765_PLUS
)) {
8328 val
= tr32(TG3_RDMA_RSRVCTRL_REG
);
8329 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
||
8330 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
) {
8331 val
&= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK
|
8332 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK
|
8333 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK
);
8334 val
|= TG3_RDMA_RSRVCTRL_TXMRGN_320B
|
8335 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K
|
8336 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K
;
8338 tw32(TG3_RDMA_RSRVCTRL_REG
,
8339 val
| TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX
);
8342 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
||
8343 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
) {
8344 val
= tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL
);
8345 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL
, val
|
8346 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K
|
8347 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K
);
8350 /* Receive/send statistics. */
8351 if (tg3_flag(tp
, 5750_PLUS
)) {
8352 val
= tr32(RCVLPC_STATS_ENABLE
);
8353 val
&= ~RCVLPC_STATSENAB_DACK_FIX
;
8354 tw32(RCVLPC_STATS_ENABLE
, val
);
8355 } else if ((rdmac_mode
& RDMAC_MODE_FIFO_SIZE_128
) &&
8356 tg3_flag(tp
, TSO_CAPABLE
)) {
8357 val
= tr32(RCVLPC_STATS_ENABLE
);
8358 val
&= ~RCVLPC_STATSENAB_LNGBRST_RFIX
;
8359 tw32(RCVLPC_STATS_ENABLE
, val
);
8361 tw32(RCVLPC_STATS_ENABLE
, 0xffffff);
8363 tw32(RCVLPC_STATSCTRL
, RCVLPC_STATSCTRL_ENABLE
);
8364 tw32(SNDDATAI_STATSENAB
, 0xffffff);
8365 tw32(SNDDATAI_STATSCTRL
,
8366 (SNDDATAI_SCTRL_ENABLE
|
8367 SNDDATAI_SCTRL_FASTUPD
));
8369 /* Setup host coalescing engine. */
8370 tw32(HOSTCC_MODE
, 0);
8371 for (i
= 0; i
< 2000; i
++) {
8372 if (!(tr32(HOSTCC_MODE
) & HOSTCC_MODE_ENABLE
))
8377 __tg3_set_coalesce(tp
, &tp
->coal
);
8379 if (!tg3_flag(tp
, 5705_PLUS
)) {
8380 /* Status/statistics block address. See tg3_timer,
8381 * the tg3_periodic_fetch_stats call there, and
8382 * tg3_get_stats to see how this works for 5705/5750 chips.
8384 tw32(HOSTCC_STATS_BLK_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
8385 ((u64
) tp
->stats_mapping
>> 32));
8386 tw32(HOSTCC_STATS_BLK_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
8387 ((u64
) tp
->stats_mapping
& 0xffffffff));
8388 tw32(HOSTCC_STATS_BLK_NIC_ADDR
, NIC_SRAM_STATS_BLK
);
8390 tw32(HOSTCC_STATUS_BLK_NIC_ADDR
, NIC_SRAM_STATUS_BLK
);
8392 /* Clear statistics and status block memory areas */
8393 for (i
= NIC_SRAM_STATS_BLK
;
8394 i
< NIC_SRAM_STATUS_BLK
+ TG3_HW_STATUS_SIZE
;
8396 tg3_write_mem(tp
, i
, 0);
8401 tw32(HOSTCC_MODE
, HOSTCC_MODE_ENABLE
| tp
->coalesce_mode
);
8403 tw32(RCVCC_MODE
, RCVCC_MODE_ENABLE
| RCVCC_MODE_ATTN_ENABLE
);
8404 tw32(RCVLPC_MODE
, RCVLPC_MODE_ENABLE
);
8405 if (!tg3_flag(tp
, 5705_PLUS
))
8406 tw32(RCVLSC_MODE
, RCVLSC_MODE_ENABLE
| RCVLSC_MODE_ATTN_ENABLE
);
8408 if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) {
8409 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
8410 /* reset to prevent losing 1st rx packet intermittently */
8411 tw32_f(MAC_RX_MODE
, RX_MODE_RESET
);
8415 if (tg3_flag(tp
, ENABLE_APE
))
8416 tp
->mac_mode
= MAC_MODE_APE_TX_EN
| MAC_MODE_APE_RX_EN
;
8419 tp
->mac_mode
|= MAC_MODE_TXSTAT_ENABLE
| MAC_MODE_RXSTAT_ENABLE
|
8420 MAC_MODE_TDE_ENABLE
| MAC_MODE_RDE_ENABLE
| MAC_MODE_FHDE_ENABLE
;
8421 if (!tg3_flag(tp
, 5705_PLUS
) &&
8422 !(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
8423 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
)
8424 tp
->mac_mode
|= MAC_MODE_LINK_POLARITY
;
8425 tw32_f(MAC_MODE
, tp
->mac_mode
| MAC_MODE_RXSTAT_CLEAR
| MAC_MODE_TXSTAT_CLEAR
);
8428 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
8429 * If TG3_FLAG_IS_NIC is zero, we should read the
8430 * register to preserve the GPIO settings for LOMs. The GPIOs,
8431 * whether used as inputs or outputs, are set by boot code after
8434 if (!tg3_flag(tp
, IS_NIC
)) {
8437 gpio_mask
= GRC_LCLCTRL_GPIO_OE0
| GRC_LCLCTRL_GPIO_OE1
|
8438 GRC_LCLCTRL_GPIO_OE2
| GRC_LCLCTRL_GPIO_OUTPUT0
|
8439 GRC_LCLCTRL_GPIO_OUTPUT1
| GRC_LCLCTRL_GPIO_OUTPUT2
;
8441 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5752
)
8442 gpio_mask
|= GRC_LCLCTRL_GPIO_OE3
|
8443 GRC_LCLCTRL_GPIO_OUTPUT3
;
8445 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
)
8446 gpio_mask
|= GRC_LCLCTRL_GPIO_UART_SEL
;
8448 tp
->grc_local_ctrl
&= ~gpio_mask
;
8449 tp
->grc_local_ctrl
|= tr32(GRC_LOCAL_CTRL
) & gpio_mask
;
8451 /* GPIO1 must be driven high for eeprom write protect */
8452 if (tg3_flag(tp
, EEPROM_WRITE_PROT
))
8453 tp
->grc_local_ctrl
|= (GRC_LCLCTRL_GPIO_OE1
|
8454 GRC_LCLCTRL_GPIO_OUTPUT1
);
8456 tw32_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
);
8459 if (tg3_flag(tp
, USING_MSIX
) && tp
->irq_cnt
> 1) {
8460 val
= tr32(MSGINT_MODE
);
8461 val
|= MSGINT_MODE_MULTIVEC_EN
| MSGINT_MODE_ENABLE
;
8462 tw32(MSGINT_MODE
, val
);
8465 if (!tg3_flag(tp
, 5705_PLUS
)) {
8466 tw32_f(DMAC_MODE
, DMAC_MODE_ENABLE
);
8470 val
= (WDMAC_MODE_ENABLE
| WDMAC_MODE_TGTABORT_ENAB
|
8471 WDMAC_MODE_MSTABORT_ENAB
| WDMAC_MODE_PARITYERR_ENAB
|
8472 WDMAC_MODE_ADDROFLOW_ENAB
| WDMAC_MODE_FIFOOFLOW_ENAB
|
8473 WDMAC_MODE_FIFOURUN_ENAB
| WDMAC_MODE_FIFOOREAD_ENAB
|
8474 WDMAC_MODE_LNGREAD_ENAB
);
8476 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
&&
8477 tp
->pci_chip_rev_id
!= CHIPREV_ID_5705_A0
) {
8478 if (tg3_flag(tp
, TSO_CAPABLE
) &&
8479 (tp
->pci_chip_rev_id
== CHIPREV_ID_5705_A1
||
8480 tp
->pci_chip_rev_id
== CHIPREV_ID_5705_A2
)) {
8482 } else if (!(tr32(TG3PCI_PCISTATE
) & PCISTATE_BUS_SPEED_HIGH
) &&
8483 !tg3_flag(tp
, IS_5788
)) {
8484 val
|= WDMAC_MODE_RX_ACCEL
;
8488 /* Enable host coalescing bug fix */
8489 if (tg3_flag(tp
, 5755_PLUS
))
8490 val
|= WDMAC_MODE_STATUS_TAG_FIX
;
8492 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
)
8493 val
|= WDMAC_MODE_BURST_ALL_DATA
;
8495 tw32_f(WDMAC_MODE
, val
);
8498 if (tg3_flag(tp
, PCIX_MODE
)) {
8501 pci_read_config_word(tp
->pdev
, tp
->pcix_cap
+ PCI_X_CMD
,
8503 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
) {
8504 pcix_cmd
&= ~PCI_X_CMD_MAX_READ
;
8505 pcix_cmd
|= PCI_X_CMD_READ_2K
;
8506 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
) {
8507 pcix_cmd
&= ~(PCI_X_CMD_MAX_SPLIT
| PCI_X_CMD_MAX_READ
);
8508 pcix_cmd
|= PCI_X_CMD_READ_2K
;
8510 pci_write_config_word(tp
->pdev
, tp
->pcix_cap
+ PCI_X_CMD
,
8514 tw32_f(RDMAC_MODE
, rdmac_mode
);
8517 tw32(RCVDCC_MODE
, RCVDCC_MODE_ENABLE
| RCVDCC_MODE_ATTN_ENABLE
);
8518 if (!tg3_flag(tp
, 5705_PLUS
))
8519 tw32(MBFREE_MODE
, MBFREE_MODE_ENABLE
);
8521 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
)
8523 SNDDATAC_MODE_ENABLE
| SNDDATAC_MODE_CDELAY
);
8525 tw32(SNDDATAC_MODE
, SNDDATAC_MODE_ENABLE
);
8527 tw32(SNDBDC_MODE
, SNDBDC_MODE_ENABLE
| SNDBDC_MODE_ATTN_ENABLE
);
8528 tw32(RCVBDI_MODE
, RCVBDI_MODE_ENABLE
| RCVBDI_MODE_RCB_ATTN_ENAB
);
8529 val
= RCVDBDI_MODE_ENABLE
| RCVDBDI_MODE_INV_RING_SZ
;
8530 if (tg3_flag(tp
, LRG_PROD_RING_CAP
))
8531 val
|= RCVDBDI_MODE_LRG_RING_SZ
;
8532 tw32(RCVDBDI_MODE
, val
);
8533 tw32(SNDDATAI_MODE
, SNDDATAI_MODE_ENABLE
);
8534 if (tg3_flag(tp
, HW_TSO_1
) ||
8535 tg3_flag(tp
, HW_TSO_2
) ||
8536 tg3_flag(tp
, HW_TSO_3
))
8537 tw32(SNDDATAI_MODE
, SNDDATAI_MODE_ENABLE
| 0x8);
8538 val
= SNDBDI_MODE_ENABLE
| SNDBDI_MODE_ATTN_ENABLE
;
8539 if (tg3_flag(tp
, ENABLE_TSS
))
8540 val
|= SNDBDI_MODE_MULTI_TXQ_EN
;
8541 tw32(SNDBDI_MODE
, val
);
8542 tw32(SNDBDS_MODE
, SNDBDS_MODE_ENABLE
| SNDBDS_MODE_ATTN_ENABLE
);
8544 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5701_A0
) {
8545 err
= tg3_load_5701_a0_firmware_fix(tp
);
8550 if (tg3_flag(tp
, TSO_CAPABLE
)) {
8551 err
= tg3_load_tso_firmware(tp
);
8556 tp
->tx_mode
= TX_MODE_ENABLE
;
8558 if (tg3_flag(tp
, 5755_PLUS
) ||
8559 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
)
8560 tp
->tx_mode
|= TX_MODE_MBUF_LOCKUP_FIX
;
8562 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
) {
8563 val
= TX_MODE_JMB_FRM_LEN
| TX_MODE_CNT_DN_MODE
;
8564 tp
->tx_mode
&= ~val
;
8565 tp
->tx_mode
|= tr32(MAC_TX_MODE
) & val
;
8568 tw32_f(MAC_TX_MODE
, tp
->tx_mode
);
8571 if (tg3_flag(tp
, ENABLE_RSS
)) {
8572 u32 reg
= MAC_RSS_INDIR_TBL_0
;
8573 u8
*ent
= (u8
*)&val
;
8575 /* Setup the indirection table */
8576 for (i
= 0; i
< TG3_RSS_INDIR_TBL_SIZE
; i
++) {
8577 int idx
= i
% sizeof(val
);
8579 ent
[idx
] = i
% (tp
->irq_cnt
- 1);
8580 if (idx
== sizeof(val
) - 1) {
8586 /* Setup the "secret" hash key. */
8587 tw32(MAC_RSS_HASH_KEY_0
, 0x5f865437);
8588 tw32(MAC_RSS_HASH_KEY_1
, 0xe4ac62cc);
8589 tw32(MAC_RSS_HASH_KEY_2
, 0x50103a45);
8590 tw32(MAC_RSS_HASH_KEY_3
, 0x36621985);
8591 tw32(MAC_RSS_HASH_KEY_4
, 0xbf14c0e8);
8592 tw32(MAC_RSS_HASH_KEY_5
, 0x1bc27a1e);
8593 tw32(MAC_RSS_HASH_KEY_6
, 0x84f4b556);
8594 tw32(MAC_RSS_HASH_KEY_7
, 0x094ea6fe);
8595 tw32(MAC_RSS_HASH_KEY_8
, 0x7dda01e7);
8596 tw32(MAC_RSS_HASH_KEY_9
, 0xc04d7481);
8599 tp
->rx_mode
= RX_MODE_ENABLE
;
8600 if (tg3_flag(tp
, 5755_PLUS
))
8601 tp
->rx_mode
|= RX_MODE_IPV6_CSUM_ENABLE
;
8603 if (tg3_flag(tp
, ENABLE_RSS
))
8604 tp
->rx_mode
|= RX_MODE_RSS_ENABLE
|
8605 RX_MODE_RSS_ITBL_HASH_BITS_7
|
8606 RX_MODE_RSS_IPV6_HASH_EN
|
8607 RX_MODE_RSS_TCP_IPV6_HASH_EN
|
8608 RX_MODE_RSS_IPV4_HASH_EN
|
8609 RX_MODE_RSS_TCP_IPV4_HASH_EN
;
8611 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
8614 tw32(MAC_LED_CTRL
, tp
->led_ctrl
);
8616 tw32(MAC_MI_STAT
, MAC_MI_STAT_LNKSTAT_ATTN_ENAB
);
8617 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
8618 tw32_f(MAC_RX_MODE
, RX_MODE_RESET
);
8621 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
8624 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
8625 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
) &&
8626 !(tp
->phy_flags
& TG3_PHYFLG_SERDES_PREEMPHASIS
)) {
8627 /* Set drive transmission level to 1.2V */
8628 /* only if the signal pre-emphasis bit is not set */
8629 val
= tr32(MAC_SERDES_CFG
);
8632 tw32(MAC_SERDES_CFG
, val
);
8634 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5703_A1
)
8635 tw32(MAC_SERDES_CFG
, 0x616000);
8638 /* Prevent chip from dropping frames when flow control
8641 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
)
8645 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME
, val
);
8647 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
&&
8648 (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)) {
8649 /* Use hardware link auto-negotiation */
8650 tg3_flag_set(tp
, HW_AUTONEG
);
8653 if ((tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) &&
8654 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
) {
8657 tmp
= tr32(SERDES_RX_CTRL
);
8658 tw32(SERDES_RX_CTRL
, tmp
| SERDES_RX_SIG_DETECT
);
8659 tp
->grc_local_ctrl
&= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT
;
8660 tp
->grc_local_ctrl
|= GRC_LCLCTRL_USE_SIG_DETECT
;
8661 tw32(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
);
8664 if (!tg3_flag(tp
, USE_PHYLIB
)) {
8665 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
) {
8666 tp
->phy_flags
&= ~TG3_PHYFLG_IS_LOW_POWER
;
8667 tp
->link_config
.speed
= tp
->link_config
.orig_speed
;
8668 tp
->link_config
.duplex
= tp
->link_config
.orig_duplex
;
8669 tp
->link_config
.autoneg
= tp
->link_config
.orig_autoneg
;
8672 err
= tg3_setup_phy(tp
, 0);
8676 if (!(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
8677 !(tp
->phy_flags
& TG3_PHYFLG_IS_FET
)) {
8680 /* Clear CRC stats. */
8681 if (!tg3_readphy(tp
, MII_TG3_TEST1
, &tmp
)) {
8682 tg3_writephy(tp
, MII_TG3_TEST1
,
8683 tmp
| MII_TG3_TEST1_CRC_EN
);
8684 tg3_readphy(tp
, MII_TG3_RXR_COUNTERS
, &tmp
);
8689 __tg3_set_rx_mode(tp
->dev
);
8691 /* Initialize receive rules. */
8692 tw32(MAC_RCV_RULE_0
, 0xc2000000 & RCV_RULE_DISABLE_MASK
);
8693 tw32(MAC_RCV_VALUE_0
, 0xffffffff & RCV_RULE_DISABLE_MASK
);
8694 tw32(MAC_RCV_RULE_1
, 0x86000004 & RCV_RULE_DISABLE_MASK
);
8695 tw32(MAC_RCV_VALUE_1
, 0xffffffff & RCV_RULE_DISABLE_MASK
);
8697 if (tg3_flag(tp
, 5705_PLUS
) && !tg3_flag(tp
, 5780_CLASS
))
8701 if (tg3_flag(tp
, ENABLE_ASF
))
8705 tw32(MAC_RCV_RULE_15
, 0); tw32(MAC_RCV_VALUE_15
, 0);
8707 tw32(MAC_RCV_RULE_14
, 0); tw32(MAC_RCV_VALUE_14
, 0);
8709 tw32(MAC_RCV_RULE_13
, 0); tw32(MAC_RCV_VALUE_13
, 0);
8711 tw32(MAC_RCV_RULE_12
, 0); tw32(MAC_RCV_VALUE_12
, 0);
8713 tw32(MAC_RCV_RULE_11
, 0); tw32(MAC_RCV_VALUE_11
, 0);
8715 tw32(MAC_RCV_RULE_10
, 0); tw32(MAC_RCV_VALUE_10
, 0);
8717 tw32(MAC_RCV_RULE_9
, 0); tw32(MAC_RCV_VALUE_9
, 0);
8719 tw32(MAC_RCV_RULE_8
, 0); tw32(MAC_RCV_VALUE_8
, 0);
8721 tw32(MAC_RCV_RULE_7
, 0); tw32(MAC_RCV_VALUE_7
, 0);
8723 tw32(MAC_RCV_RULE_6
, 0); tw32(MAC_RCV_VALUE_6
, 0);
8725 tw32(MAC_RCV_RULE_5
, 0); tw32(MAC_RCV_VALUE_5
, 0);
8727 tw32(MAC_RCV_RULE_4
, 0); tw32(MAC_RCV_VALUE_4
, 0);
8729 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
8731 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
8739 if (tg3_flag(tp
, ENABLE_APE
))
8740 /* Write our heartbeat update interval to APE. */
8741 tg3_ape_write32(tp
, TG3_APE_HOST_HEARTBEAT_INT_MS
,
8742 APE_HOST_HEARTBEAT_INT_DISABLE
);
8744 tg3_write_sig_post_reset(tp
, RESET_KIND_INIT
);
8749 /* Called at device open time to get the chip ready for
8750 * packet processing. Invoked with tp->lock held.
8752 static int tg3_init_hw(struct tg3
*tp
, int reset_phy
)
8754 tg3_switch_clocks(tp
);
8756 tw32(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
8758 return tg3_reset_hw(tp
, reset_phy
);
8761 #define TG3_STAT_ADD32(PSTAT, REG) \
8762 do { u32 __val = tr32(REG); \
8763 (PSTAT)->low += __val; \
8764 if ((PSTAT)->low < __val) \
8765 (PSTAT)->high += 1; \
8768 static void tg3_periodic_fetch_stats(struct tg3
*tp
)
8770 struct tg3_hw_stats
*sp
= tp
->hw_stats
;
8772 if (!netif_carrier_ok(tp
->dev
))
8775 TG3_STAT_ADD32(&sp
->tx_octets
, MAC_TX_STATS_OCTETS
);
8776 TG3_STAT_ADD32(&sp
->tx_collisions
, MAC_TX_STATS_COLLISIONS
);
8777 TG3_STAT_ADD32(&sp
->tx_xon_sent
, MAC_TX_STATS_XON_SENT
);
8778 TG3_STAT_ADD32(&sp
->tx_xoff_sent
, MAC_TX_STATS_XOFF_SENT
);
8779 TG3_STAT_ADD32(&sp
->tx_mac_errors
, MAC_TX_STATS_MAC_ERRORS
);
8780 TG3_STAT_ADD32(&sp
->tx_single_collisions
, MAC_TX_STATS_SINGLE_COLLISIONS
);
8781 TG3_STAT_ADD32(&sp
->tx_mult_collisions
, MAC_TX_STATS_MULT_COLLISIONS
);
8782 TG3_STAT_ADD32(&sp
->tx_deferred
, MAC_TX_STATS_DEFERRED
);
8783 TG3_STAT_ADD32(&sp
->tx_excessive_collisions
, MAC_TX_STATS_EXCESSIVE_COL
);
8784 TG3_STAT_ADD32(&sp
->tx_late_collisions
, MAC_TX_STATS_LATE_COL
);
8785 TG3_STAT_ADD32(&sp
->tx_ucast_packets
, MAC_TX_STATS_UCAST
);
8786 TG3_STAT_ADD32(&sp
->tx_mcast_packets
, MAC_TX_STATS_MCAST
);
8787 TG3_STAT_ADD32(&sp
->tx_bcast_packets
, MAC_TX_STATS_BCAST
);
8789 TG3_STAT_ADD32(&sp
->rx_octets
, MAC_RX_STATS_OCTETS
);
8790 TG3_STAT_ADD32(&sp
->rx_fragments
, MAC_RX_STATS_FRAGMENTS
);
8791 TG3_STAT_ADD32(&sp
->rx_ucast_packets
, MAC_RX_STATS_UCAST
);
8792 TG3_STAT_ADD32(&sp
->rx_mcast_packets
, MAC_RX_STATS_MCAST
);
8793 TG3_STAT_ADD32(&sp
->rx_bcast_packets
, MAC_RX_STATS_BCAST
);
8794 TG3_STAT_ADD32(&sp
->rx_fcs_errors
, MAC_RX_STATS_FCS_ERRORS
);
8795 TG3_STAT_ADD32(&sp
->rx_align_errors
, MAC_RX_STATS_ALIGN_ERRORS
);
8796 TG3_STAT_ADD32(&sp
->rx_xon_pause_rcvd
, MAC_RX_STATS_XON_PAUSE_RECVD
);
8797 TG3_STAT_ADD32(&sp
->rx_xoff_pause_rcvd
, MAC_RX_STATS_XOFF_PAUSE_RECVD
);
8798 TG3_STAT_ADD32(&sp
->rx_mac_ctrl_rcvd
, MAC_RX_STATS_MAC_CTRL_RECVD
);
8799 TG3_STAT_ADD32(&sp
->rx_xoff_entered
, MAC_RX_STATS_XOFF_ENTERED
);
8800 TG3_STAT_ADD32(&sp
->rx_frame_too_long_errors
, MAC_RX_STATS_FRAME_TOO_LONG
);
8801 TG3_STAT_ADD32(&sp
->rx_jabbers
, MAC_RX_STATS_JABBERS
);
8802 TG3_STAT_ADD32(&sp
->rx_undersize_packets
, MAC_RX_STATS_UNDERSIZE
);
8804 TG3_STAT_ADD32(&sp
->rxbds_empty
, RCVLPC_NO_RCV_BD_CNT
);
8805 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5717
&&
8806 tp
->pci_chip_rev_id
!= CHIPREV_ID_5719_A0
&&
8807 tp
->pci_chip_rev_id
!= CHIPREV_ID_5720_A0
) {
8808 TG3_STAT_ADD32(&sp
->rx_discards
, RCVLPC_IN_DISCARDS_CNT
);
8810 u32 val
= tr32(HOSTCC_FLOW_ATTN
);
8811 val
= (val
& HOSTCC_FLOW_ATTN_MBUF_LWM
) ? 1 : 0;
8813 tw32(HOSTCC_FLOW_ATTN
, HOSTCC_FLOW_ATTN_MBUF_LWM
);
8814 sp
->rx_discards
.low
+= val
;
8815 if (sp
->rx_discards
.low
< val
)
8816 sp
->rx_discards
.high
+= 1;
8818 sp
->mbuf_lwm_thresh_hit
= sp
->rx_discards
;
8820 TG3_STAT_ADD32(&sp
->rx_errors
, RCVLPC_IN_ERRORS_CNT
);
8823 static void tg3_chk_missed_msi(struct tg3
*tp
)
8827 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
8828 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
8830 if (tg3_has_work(tnapi
)) {
8831 if (tnapi
->last_rx_cons
== tnapi
->rx_rcb_ptr
&&
8832 tnapi
->last_tx_cons
== tnapi
->tx_cons
) {
8833 if (tnapi
->chk_msi_cnt
< 1) {
8834 tnapi
->chk_msi_cnt
++;
8837 tw32_mailbox(tnapi
->int_mbox
,
8838 tnapi
->last_tag
<< 24);
8841 tnapi
->chk_msi_cnt
= 0;
8842 tnapi
->last_rx_cons
= tnapi
->rx_rcb_ptr
;
8843 tnapi
->last_tx_cons
= tnapi
->tx_cons
;
8847 static void tg3_timer(unsigned long __opaque
)
8849 struct tg3
*tp
= (struct tg3
*) __opaque
;
8854 spin_lock(&tp
->lock
);
8856 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
||
8857 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
)
8858 tg3_chk_missed_msi(tp
);
8860 if (!tg3_flag(tp
, TAGGED_STATUS
)) {
8861 /* All of this garbage is because when using non-tagged
8862 * IRQ status the mailbox/status_block protocol the chip
8863 * uses with the cpu is race prone.
8865 if (tp
->napi
[0].hw_status
->status
& SD_STATUS_UPDATED
) {
8866 tw32(GRC_LOCAL_CTRL
,
8867 tp
->grc_local_ctrl
| GRC_LCLCTRL_SETINT
);
8869 tw32(HOSTCC_MODE
, tp
->coalesce_mode
|
8870 HOSTCC_MODE_ENABLE
| HOSTCC_MODE_NOW
);
8873 if (!(tr32(WDMAC_MODE
) & WDMAC_MODE_ENABLE
)) {
8874 tg3_flag_set(tp
, RESTART_TIMER
);
8875 spin_unlock(&tp
->lock
);
8876 schedule_work(&tp
->reset_task
);
8881 /* This part only runs once per second. */
8882 if (!--tp
->timer_counter
) {
8883 if (tg3_flag(tp
, 5705_PLUS
))
8884 tg3_periodic_fetch_stats(tp
);
8886 if (tp
->setlpicnt
&& !--tp
->setlpicnt
)
8887 tg3_phy_eee_enable(tp
);
8889 if (tg3_flag(tp
, USE_LINKCHG_REG
)) {
8893 mac_stat
= tr32(MAC_STATUS
);
8896 if (tp
->phy_flags
& TG3_PHYFLG_USE_MI_INTERRUPT
) {
8897 if (mac_stat
& MAC_STATUS_MI_INTERRUPT
)
8899 } else if (mac_stat
& MAC_STATUS_LNKSTATE_CHANGED
)
8903 tg3_setup_phy(tp
, 0);
8904 } else if (tg3_flag(tp
, POLL_SERDES
)) {
8905 u32 mac_stat
= tr32(MAC_STATUS
);
8908 if (netif_carrier_ok(tp
->dev
) &&
8909 (mac_stat
& MAC_STATUS_LNKSTATE_CHANGED
)) {
8912 if (!netif_carrier_ok(tp
->dev
) &&
8913 (mac_stat
& (MAC_STATUS_PCS_SYNCED
|
8914 MAC_STATUS_SIGNAL_DET
))) {
8918 if (!tp
->serdes_counter
) {
8921 ~MAC_MODE_PORT_MODE_MASK
));
8923 tw32_f(MAC_MODE
, tp
->mac_mode
);
8926 tg3_setup_phy(tp
, 0);
8928 } else if ((tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) &&
8929 tg3_flag(tp
, 5780_CLASS
)) {
8930 tg3_serdes_parallel_detect(tp
);
8933 tp
->timer_counter
= tp
->timer_multiplier
;
8936 /* Heartbeat is only sent once every 2 seconds.
8938 * The heartbeat is to tell the ASF firmware that the host
8939 * driver is still alive. In the event that the OS crashes,
8940 * ASF needs to reset the hardware to free up the FIFO space
8941 * that may be filled with rx packets destined for the host.
8942 * If the FIFO is full, ASF will no longer function properly.
8944 * Unintended resets have been reported on real time kernels
8945 * where the timer doesn't run on time. Netpoll will also have
8948 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
8949 * to check the ring condition when the heartbeat is expiring
8950 * before doing the reset. This will prevent most unintended
8953 if (!--tp
->asf_counter
) {
8954 if (tg3_flag(tp
, ENABLE_ASF
) && !tg3_flag(tp
, ENABLE_APE
)) {
8955 tg3_wait_for_event_ack(tp
);
8957 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_MBOX
,
8958 FWCMD_NICDRV_ALIVE3
);
8959 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_LEN_MBOX
, 4);
8960 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
,
8961 TG3_FW_UPDATE_TIMEOUT_SEC
);
8963 tg3_generate_fw_event(tp
);
8965 tp
->asf_counter
= tp
->asf_multiplier
;
8968 spin_unlock(&tp
->lock
);
8971 tp
->timer
.expires
= jiffies
+ tp
->timer_offset
;
8972 add_timer(&tp
->timer
);
8975 static int tg3_request_irq(struct tg3
*tp
, int irq_num
)
8978 unsigned long flags
;
8980 struct tg3_napi
*tnapi
= &tp
->napi
[irq_num
];
8982 if (tp
->irq_cnt
== 1)
8983 name
= tp
->dev
->name
;
8985 name
= &tnapi
->irq_lbl
[0];
8986 snprintf(name
, IFNAMSIZ
, "%s-%d", tp
->dev
->name
, irq_num
);
8987 name
[IFNAMSIZ
-1] = 0;
8990 if (tg3_flag(tp
, USING_MSI
) || tg3_flag(tp
, USING_MSIX
)) {
8992 if (tg3_flag(tp
, 1SHOT_MSI
))
8997 if (tg3_flag(tp
, TAGGED_STATUS
))
8998 fn
= tg3_interrupt_tagged
;
8999 flags
= IRQF_SHARED
;
9002 return request_irq(tnapi
->irq_vec
, fn
, flags
, name
, tnapi
);
9005 static int tg3_test_interrupt(struct tg3
*tp
)
9007 struct tg3_napi
*tnapi
= &tp
->napi
[0];
9008 struct net_device
*dev
= tp
->dev
;
9009 int err
, i
, intr_ok
= 0;
9012 if (!netif_running(dev
))
9015 tg3_disable_ints(tp
);
9017 free_irq(tnapi
->irq_vec
, tnapi
);
9020 * Turn off MSI one shot mode. Otherwise this test has no
9021 * observable way to know whether the interrupt was delivered.
9023 if (tg3_flag(tp
, 57765_PLUS
) && tg3_flag(tp
, USING_MSI
)) {
9024 val
= tr32(MSGINT_MODE
) | MSGINT_MODE_ONE_SHOT_DISABLE
;
9025 tw32(MSGINT_MODE
, val
);
9028 err
= request_irq(tnapi
->irq_vec
, tg3_test_isr
,
9029 IRQF_SHARED
| IRQF_SAMPLE_RANDOM
, dev
->name
, tnapi
);
9033 tnapi
->hw_status
->status
&= ~SD_STATUS_UPDATED
;
9034 tg3_enable_ints(tp
);
9036 tw32_f(HOSTCC_MODE
, tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
|
9039 for (i
= 0; i
< 5; i
++) {
9040 u32 int_mbox
, misc_host_ctrl
;
9042 int_mbox
= tr32_mailbox(tnapi
->int_mbox
);
9043 misc_host_ctrl
= tr32(TG3PCI_MISC_HOST_CTRL
);
9045 if ((int_mbox
!= 0) ||
9046 (misc_host_ctrl
& MISC_HOST_CTRL_MASK_PCI_INT
)) {
9054 tg3_disable_ints(tp
);
9056 free_irq(tnapi
->irq_vec
, tnapi
);
9058 err
= tg3_request_irq(tp
, 0);
9064 /* Reenable MSI one shot mode. */
9065 if (tg3_flag(tp
, 57765_PLUS
) && tg3_flag(tp
, USING_MSI
)) {
9066 val
= tr32(MSGINT_MODE
) & ~MSGINT_MODE_ONE_SHOT_DISABLE
;
9067 tw32(MSGINT_MODE
, val
);
9075 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
9076 * successfully restored
9078 static int tg3_test_msi(struct tg3
*tp
)
9083 if (!tg3_flag(tp
, USING_MSI
))
9086 /* Turn off SERR reporting in case MSI terminates with Master
9089 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
9090 pci_write_config_word(tp
->pdev
, PCI_COMMAND
,
9091 pci_cmd
& ~PCI_COMMAND_SERR
);
9093 err
= tg3_test_interrupt(tp
);
9095 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
9100 /* other failures */
9104 /* MSI test failed, go back to INTx mode */
9105 netdev_warn(tp
->dev
, "No interrupt was generated using MSI. Switching "
9106 "to INTx mode. Please report this failure to the PCI "
9107 "maintainer and include system chipset information\n");
9109 free_irq(tp
->napi
[0].irq_vec
, &tp
->napi
[0]);
9111 pci_disable_msi(tp
->pdev
);
9113 tg3_flag_clear(tp
, USING_MSI
);
9114 tp
->napi
[0].irq_vec
= tp
->pdev
->irq
;
9116 err
= tg3_request_irq(tp
, 0);
9120 /* Need to reset the chip because the MSI cycle may have terminated
9121 * with Master Abort.
9123 tg3_full_lock(tp
, 1);
9125 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
9126 err
= tg3_init_hw(tp
, 1);
9128 tg3_full_unlock(tp
);
9131 free_irq(tp
->napi
[0].irq_vec
, &tp
->napi
[0]);
9136 static int tg3_request_firmware(struct tg3
*tp
)
9138 const __be32
*fw_data
;
9140 if (request_firmware(&tp
->fw
, tp
->fw_needed
, &tp
->pdev
->dev
)) {
9141 netdev_err(tp
->dev
, "Failed to load firmware \"%s\"\n",
9146 fw_data
= (void *)tp
->fw
->data
;
9148 /* Firmware blob starts with version numbers, followed by
9149 * start address and _full_ length including BSS sections
9150 * (which must be longer than the actual data, of course
9153 tp
->fw_len
= be32_to_cpu(fw_data
[2]); /* includes bss */
9154 if (tp
->fw_len
< (tp
->fw
->size
- 12)) {
9155 netdev_err(tp
->dev
, "bogus length %d in \"%s\"\n",
9156 tp
->fw_len
, tp
->fw_needed
);
9157 release_firmware(tp
->fw
);
9162 /* We no longer need firmware; we have it. */
9163 tp
->fw_needed
= NULL
;
9167 static bool tg3_enable_msix(struct tg3
*tp
)
9169 int i
, rc
, cpus
= num_online_cpus();
9170 struct msix_entry msix_ent
[tp
->irq_max
];
9173 /* Just fallback to the simpler MSI mode. */
9177 * We want as many rx rings enabled as there are cpus.
9178 * The first MSIX vector only deals with link interrupts, etc,
9179 * so we add one to the number of vectors we are requesting.
9181 tp
->irq_cnt
= min_t(unsigned, cpus
+ 1, tp
->irq_max
);
9183 for (i
= 0; i
< tp
->irq_max
; i
++) {
9184 msix_ent
[i
].entry
= i
;
9185 msix_ent
[i
].vector
= 0;
9188 rc
= pci_enable_msix(tp
->pdev
, msix_ent
, tp
->irq_cnt
);
9191 } else if (rc
!= 0) {
9192 if (pci_enable_msix(tp
->pdev
, msix_ent
, rc
))
9194 netdev_notice(tp
->dev
, "Requested %d MSI-X vectors, received %d\n",
9199 for (i
= 0; i
< tp
->irq_max
; i
++)
9200 tp
->napi
[i
].irq_vec
= msix_ent
[i
].vector
;
9202 netif_set_real_num_tx_queues(tp
->dev
, 1);
9203 rc
= tp
->irq_cnt
> 1 ? tp
->irq_cnt
- 1 : 1;
9204 if (netif_set_real_num_rx_queues(tp
->dev
, rc
)) {
9205 pci_disable_msix(tp
->pdev
);
9209 if (tp
->irq_cnt
> 1) {
9210 tg3_flag_set(tp
, ENABLE_RSS
);
9212 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
||
9213 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
) {
9214 tg3_flag_set(tp
, ENABLE_TSS
);
9215 netif_set_real_num_tx_queues(tp
->dev
, tp
->irq_cnt
- 1);
9222 static void tg3_ints_init(struct tg3
*tp
)
9224 if ((tg3_flag(tp
, SUPPORT_MSI
) || tg3_flag(tp
, SUPPORT_MSIX
)) &&
9225 !tg3_flag(tp
, TAGGED_STATUS
)) {
9226 /* All MSI supporting chips should support tagged
9227 * status. Assert that this is the case.
9229 netdev_warn(tp
->dev
,
9230 "MSI without TAGGED_STATUS? Not using MSI\n");
9234 if (tg3_flag(tp
, SUPPORT_MSIX
) && tg3_enable_msix(tp
))
9235 tg3_flag_set(tp
, USING_MSIX
);
9236 else if (tg3_flag(tp
, SUPPORT_MSI
) && pci_enable_msi(tp
->pdev
) == 0)
9237 tg3_flag_set(tp
, USING_MSI
);
9239 if (tg3_flag(tp
, USING_MSI
) || tg3_flag(tp
, USING_MSIX
)) {
9240 u32 msi_mode
= tr32(MSGINT_MODE
);
9241 if (tg3_flag(tp
, USING_MSIX
) && tp
->irq_cnt
> 1)
9242 msi_mode
|= MSGINT_MODE_MULTIVEC_EN
;
9243 tw32(MSGINT_MODE
, msi_mode
| MSGINT_MODE_ENABLE
);
9246 if (!tg3_flag(tp
, USING_MSIX
)) {
9248 tp
->napi
[0].irq_vec
= tp
->pdev
->irq
;
9249 netif_set_real_num_tx_queues(tp
->dev
, 1);
9250 netif_set_real_num_rx_queues(tp
->dev
, 1);
9254 static void tg3_ints_fini(struct tg3
*tp
)
9256 if (tg3_flag(tp
, USING_MSIX
))
9257 pci_disable_msix(tp
->pdev
);
9258 else if (tg3_flag(tp
, USING_MSI
))
9259 pci_disable_msi(tp
->pdev
);
9260 tg3_flag_clear(tp
, USING_MSI
);
9261 tg3_flag_clear(tp
, USING_MSIX
);
9262 tg3_flag_clear(tp
, ENABLE_RSS
);
9263 tg3_flag_clear(tp
, ENABLE_TSS
);
9266 static int tg3_open(struct net_device
*dev
)
9268 struct tg3
*tp
= netdev_priv(dev
);
9271 if (tp
->fw_needed
) {
9272 err
= tg3_request_firmware(tp
);
9273 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5701_A0
) {
9277 netdev_warn(tp
->dev
, "TSO capability disabled\n");
9278 tg3_flag_clear(tp
, TSO_CAPABLE
);
9279 } else if (!tg3_flag(tp
, TSO_CAPABLE
)) {
9280 netdev_notice(tp
->dev
, "TSO capability restored\n");
9281 tg3_flag_set(tp
, TSO_CAPABLE
);
9285 netif_carrier_off(tp
->dev
);
9287 err
= tg3_power_up(tp
);
9291 tg3_full_lock(tp
, 0);
9293 tg3_disable_ints(tp
);
9294 tg3_flag_clear(tp
, INIT_COMPLETE
);
9296 tg3_full_unlock(tp
);
9299 * Setup interrupts first so we know how
9300 * many NAPI resources to allocate
9304 /* The placement of this call is tied
9305 * to the setup and use of Host TX descriptors.
9307 err
= tg3_alloc_consistent(tp
);
9313 tg3_napi_enable(tp
);
9315 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
9316 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
9317 err
= tg3_request_irq(tp
, i
);
9319 for (i
--; i
>= 0; i
--)
9320 free_irq(tnapi
->irq_vec
, tnapi
);
9328 tg3_full_lock(tp
, 0);
9330 err
= tg3_init_hw(tp
, 1);
9332 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
9335 if (tg3_flag(tp
, TAGGED_STATUS
) &&
9336 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5717
&&
9337 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_57765
)
9338 tp
->timer_offset
= HZ
;
9340 tp
->timer_offset
= HZ
/ 10;
9342 BUG_ON(tp
->timer_offset
> HZ
);
9343 tp
->timer_counter
= tp
->timer_multiplier
=
9344 (HZ
/ tp
->timer_offset
);
9345 tp
->asf_counter
= tp
->asf_multiplier
=
9346 ((HZ
/ tp
->timer_offset
) * 2);
9348 init_timer(&tp
->timer
);
9349 tp
->timer
.expires
= jiffies
+ tp
->timer_offset
;
9350 tp
->timer
.data
= (unsigned long) tp
;
9351 tp
->timer
.function
= tg3_timer
;
9354 tg3_full_unlock(tp
);
9359 if (tg3_flag(tp
, USING_MSI
)) {
9360 err
= tg3_test_msi(tp
);
9363 tg3_full_lock(tp
, 0);
9364 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
9366 tg3_full_unlock(tp
);
9371 if (!tg3_flag(tp
, 57765_PLUS
) && tg3_flag(tp
, USING_MSI
)) {
9372 u32 val
= tr32(PCIE_TRANSACTION_CFG
);
9374 tw32(PCIE_TRANSACTION_CFG
,
9375 val
| PCIE_TRANS_CFG_1SHOT_MSI
);
9381 tg3_full_lock(tp
, 0);
9383 add_timer(&tp
->timer
);
9384 tg3_flag_set(tp
, INIT_COMPLETE
);
9385 tg3_enable_ints(tp
);
9387 tg3_full_unlock(tp
);
9389 netif_tx_start_all_queues(dev
);
9392 * Reset loopback feature if it was turned on while the device was down
9393 * make sure that it's installed properly now.
9395 if (dev
->features
& NETIF_F_LOOPBACK
)
9396 tg3_set_loopback(dev
, dev
->features
);
9401 for (i
= tp
->irq_cnt
- 1; i
>= 0; i
--) {
9402 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
9403 free_irq(tnapi
->irq_vec
, tnapi
);
9407 tg3_napi_disable(tp
);
9409 tg3_free_consistent(tp
);
9416 static struct rtnl_link_stats64
*tg3_get_stats64(struct net_device
*,
9417 struct rtnl_link_stats64
*);
9418 static struct tg3_ethtool_stats
*tg3_get_estats(struct tg3
*);
9420 static int tg3_close(struct net_device
*dev
)
9423 struct tg3
*tp
= netdev_priv(dev
);
9425 tg3_napi_disable(tp
);
9426 cancel_work_sync(&tp
->reset_task
);
9428 netif_tx_stop_all_queues(dev
);
9430 del_timer_sync(&tp
->timer
);
9434 tg3_full_lock(tp
, 1);
9436 tg3_disable_ints(tp
);
9438 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
9440 tg3_flag_clear(tp
, INIT_COMPLETE
);
9442 tg3_full_unlock(tp
);
9444 for (i
= tp
->irq_cnt
- 1; i
>= 0; i
--) {
9445 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
9446 free_irq(tnapi
->irq_vec
, tnapi
);
9451 tg3_get_stats64(tp
->dev
, &tp
->net_stats_prev
);
9453 memcpy(&tp
->estats_prev
, tg3_get_estats(tp
),
9454 sizeof(tp
->estats_prev
));
9458 tg3_free_consistent(tp
);
9462 netif_carrier_off(tp
->dev
);
9467 static inline u64
get_stat64(tg3_stat64_t
*val
)
9469 return ((u64
)val
->high
<< 32) | ((u64
)val
->low
);
9472 static u64
calc_crc_errors(struct tg3
*tp
)
9474 struct tg3_hw_stats
*hw_stats
= tp
->hw_stats
;
9476 if (!(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
9477 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
9478 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
)) {
9481 spin_lock_bh(&tp
->lock
);
9482 if (!tg3_readphy(tp
, MII_TG3_TEST1
, &val
)) {
9483 tg3_writephy(tp
, MII_TG3_TEST1
,
9484 val
| MII_TG3_TEST1_CRC_EN
);
9485 tg3_readphy(tp
, MII_TG3_RXR_COUNTERS
, &val
);
9488 spin_unlock_bh(&tp
->lock
);
9490 tp
->phy_crc_errors
+= val
;
9492 return tp
->phy_crc_errors
;
9495 return get_stat64(&hw_stats
->rx_fcs_errors
);
9498 #define ESTAT_ADD(member) \
9499 estats->member = old_estats->member + \
9500 get_stat64(&hw_stats->member)
9502 static struct tg3_ethtool_stats
*tg3_get_estats(struct tg3
*tp
)
9504 struct tg3_ethtool_stats
*estats
= &tp
->estats
;
9505 struct tg3_ethtool_stats
*old_estats
= &tp
->estats_prev
;
9506 struct tg3_hw_stats
*hw_stats
= tp
->hw_stats
;
9511 ESTAT_ADD(rx_octets
);
9512 ESTAT_ADD(rx_fragments
);
9513 ESTAT_ADD(rx_ucast_packets
);
9514 ESTAT_ADD(rx_mcast_packets
);
9515 ESTAT_ADD(rx_bcast_packets
);
9516 ESTAT_ADD(rx_fcs_errors
);
9517 ESTAT_ADD(rx_align_errors
);
9518 ESTAT_ADD(rx_xon_pause_rcvd
);
9519 ESTAT_ADD(rx_xoff_pause_rcvd
);
9520 ESTAT_ADD(rx_mac_ctrl_rcvd
);
9521 ESTAT_ADD(rx_xoff_entered
);
9522 ESTAT_ADD(rx_frame_too_long_errors
);
9523 ESTAT_ADD(rx_jabbers
);
9524 ESTAT_ADD(rx_undersize_packets
);
9525 ESTAT_ADD(rx_in_length_errors
);
9526 ESTAT_ADD(rx_out_length_errors
);
9527 ESTAT_ADD(rx_64_or_less_octet_packets
);
9528 ESTAT_ADD(rx_65_to_127_octet_packets
);
9529 ESTAT_ADD(rx_128_to_255_octet_packets
);
9530 ESTAT_ADD(rx_256_to_511_octet_packets
);
9531 ESTAT_ADD(rx_512_to_1023_octet_packets
);
9532 ESTAT_ADD(rx_1024_to_1522_octet_packets
);
9533 ESTAT_ADD(rx_1523_to_2047_octet_packets
);
9534 ESTAT_ADD(rx_2048_to_4095_octet_packets
);
9535 ESTAT_ADD(rx_4096_to_8191_octet_packets
);
9536 ESTAT_ADD(rx_8192_to_9022_octet_packets
);
9538 ESTAT_ADD(tx_octets
);
9539 ESTAT_ADD(tx_collisions
);
9540 ESTAT_ADD(tx_xon_sent
);
9541 ESTAT_ADD(tx_xoff_sent
);
9542 ESTAT_ADD(tx_flow_control
);
9543 ESTAT_ADD(tx_mac_errors
);
9544 ESTAT_ADD(tx_single_collisions
);
9545 ESTAT_ADD(tx_mult_collisions
);
9546 ESTAT_ADD(tx_deferred
);
9547 ESTAT_ADD(tx_excessive_collisions
);
9548 ESTAT_ADD(tx_late_collisions
);
9549 ESTAT_ADD(tx_collide_2times
);
9550 ESTAT_ADD(tx_collide_3times
);
9551 ESTAT_ADD(tx_collide_4times
);
9552 ESTAT_ADD(tx_collide_5times
);
9553 ESTAT_ADD(tx_collide_6times
);
9554 ESTAT_ADD(tx_collide_7times
);
9555 ESTAT_ADD(tx_collide_8times
);
9556 ESTAT_ADD(tx_collide_9times
);
9557 ESTAT_ADD(tx_collide_10times
);
9558 ESTAT_ADD(tx_collide_11times
);
9559 ESTAT_ADD(tx_collide_12times
);
9560 ESTAT_ADD(tx_collide_13times
);
9561 ESTAT_ADD(tx_collide_14times
);
9562 ESTAT_ADD(tx_collide_15times
);
9563 ESTAT_ADD(tx_ucast_packets
);
9564 ESTAT_ADD(tx_mcast_packets
);
9565 ESTAT_ADD(tx_bcast_packets
);
9566 ESTAT_ADD(tx_carrier_sense_errors
);
9567 ESTAT_ADD(tx_discards
);
9568 ESTAT_ADD(tx_errors
);
9570 ESTAT_ADD(dma_writeq_full
);
9571 ESTAT_ADD(dma_write_prioq_full
);
9572 ESTAT_ADD(rxbds_empty
);
9573 ESTAT_ADD(rx_discards
);
9574 ESTAT_ADD(rx_errors
);
9575 ESTAT_ADD(rx_threshold_hit
);
9577 ESTAT_ADD(dma_readq_full
);
9578 ESTAT_ADD(dma_read_prioq_full
);
9579 ESTAT_ADD(tx_comp_queue_full
);
9581 ESTAT_ADD(ring_set_send_prod_index
);
9582 ESTAT_ADD(ring_status_update
);
9583 ESTAT_ADD(nic_irqs
);
9584 ESTAT_ADD(nic_avoided_irqs
);
9585 ESTAT_ADD(nic_tx_threshold_hit
);
9587 ESTAT_ADD(mbuf_lwm_thresh_hit
);
9592 static struct rtnl_link_stats64
*tg3_get_stats64(struct net_device
*dev
,
9593 struct rtnl_link_stats64
*stats
)
9595 struct tg3
*tp
= netdev_priv(dev
);
9596 struct rtnl_link_stats64
*old_stats
= &tp
->net_stats_prev
;
9597 struct tg3_hw_stats
*hw_stats
= tp
->hw_stats
;
9602 stats
->rx_packets
= old_stats
->rx_packets
+
9603 get_stat64(&hw_stats
->rx_ucast_packets
) +
9604 get_stat64(&hw_stats
->rx_mcast_packets
) +
9605 get_stat64(&hw_stats
->rx_bcast_packets
);
9607 stats
->tx_packets
= old_stats
->tx_packets
+
9608 get_stat64(&hw_stats
->tx_ucast_packets
) +
9609 get_stat64(&hw_stats
->tx_mcast_packets
) +
9610 get_stat64(&hw_stats
->tx_bcast_packets
);
9612 stats
->rx_bytes
= old_stats
->rx_bytes
+
9613 get_stat64(&hw_stats
->rx_octets
);
9614 stats
->tx_bytes
= old_stats
->tx_bytes
+
9615 get_stat64(&hw_stats
->tx_octets
);
9617 stats
->rx_errors
= old_stats
->rx_errors
+
9618 get_stat64(&hw_stats
->rx_errors
);
9619 stats
->tx_errors
= old_stats
->tx_errors
+
9620 get_stat64(&hw_stats
->tx_errors
) +
9621 get_stat64(&hw_stats
->tx_mac_errors
) +
9622 get_stat64(&hw_stats
->tx_carrier_sense_errors
) +
9623 get_stat64(&hw_stats
->tx_discards
);
9625 stats
->multicast
= old_stats
->multicast
+
9626 get_stat64(&hw_stats
->rx_mcast_packets
);
9627 stats
->collisions
= old_stats
->collisions
+
9628 get_stat64(&hw_stats
->tx_collisions
);
9630 stats
->rx_length_errors
= old_stats
->rx_length_errors
+
9631 get_stat64(&hw_stats
->rx_frame_too_long_errors
) +
9632 get_stat64(&hw_stats
->rx_undersize_packets
);
9634 stats
->rx_over_errors
= old_stats
->rx_over_errors
+
9635 get_stat64(&hw_stats
->rxbds_empty
);
9636 stats
->rx_frame_errors
= old_stats
->rx_frame_errors
+
9637 get_stat64(&hw_stats
->rx_align_errors
);
9638 stats
->tx_aborted_errors
= old_stats
->tx_aborted_errors
+
9639 get_stat64(&hw_stats
->tx_discards
);
9640 stats
->tx_carrier_errors
= old_stats
->tx_carrier_errors
+
9641 get_stat64(&hw_stats
->tx_carrier_sense_errors
);
9643 stats
->rx_crc_errors
= old_stats
->rx_crc_errors
+
9644 calc_crc_errors(tp
);
9646 stats
->rx_missed_errors
= old_stats
->rx_missed_errors
+
9647 get_stat64(&hw_stats
->rx_discards
);
9649 stats
->rx_dropped
= tp
->rx_dropped
;
9654 static inline u32
calc_crc(unsigned char *buf
, int len
)
9662 for (j
= 0; j
< len
; j
++) {
9665 for (k
= 0; k
< 8; k
++) {
9678 static void tg3_set_multi(struct tg3
*tp
, unsigned int accept_all
)
9680 /* accept or reject all multicast frames */
9681 tw32(MAC_HASH_REG_0
, accept_all
? 0xffffffff : 0);
9682 tw32(MAC_HASH_REG_1
, accept_all
? 0xffffffff : 0);
9683 tw32(MAC_HASH_REG_2
, accept_all
? 0xffffffff : 0);
9684 tw32(MAC_HASH_REG_3
, accept_all
? 0xffffffff : 0);
9687 static void __tg3_set_rx_mode(struct net_device
*dev
)
9689 struct tg3
*tp
= netdev_priv(dev
);
9692 rx_mode
= tp
->rx_mode
& ~(RX_MODE_PROMISC
|
9693 RX_MODE_KEEP_VLAN_TAG
);
9695 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9696 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9699 if (!tg3_flag(tp
, ENABLE_ASF
))
9700 rx_mode
|= RX_MODE_KEEP_VLAN_TAG
;
9703 if (dev
->flags
& IFF_PROMISC
) {
9704 /* Promiscuous mode. */
9705 rx_mode
|= RX_MODE_PROMISC
;
9706 } else if (dev
->flags
& IFF_ALLMULTI
) {
9707 /* Accept all multicast. */
9708 tg3_set_multi(tp
, 1);
9709 } else if (netdev_mc_empty(dev
)) {
9710 /* Reject all multicast. */
9711 tg3_set_multi(tp
, 0);
9713 /* Accept one or more multicast(s). */
9714 struct netdev_hw_addr
*ha
;
9715 u32 mc_filter
[4] = { 0, };
9720 netdev_for_each_mc_addr(ha
, dev
) {
9721 crc
= calc_crc(ha
->addr
, ETH_ALEN
);
9723 regidx
= (bit
& 0x60) >> 5;
9725 mc_filter
[regidx
] |= (1 << bit
);
9728 tw32(MAC_HASH_REG_0
, mc_filter
[0]);
9729 tw32(MAC_HASH_REG_1
, mc_filter
[1]);
9730 tw32(MAC_HASH_REG_2
, mc_filter
[2]);
9731 tw32(MAC_HASH_REG_3
, mc_filter
[3]);
9734 if (rx_mode
!= tp
->rx_mode
) {
9735 tp
->rx_mode
= rx_mode
;
9736 tw32_f(MAC_RX_MODE
, rx_mode
);
9741 static void tg3_set_rx_mode(struct net_device
*dev
)
9743 struct tg3
*tp
= netdev_priv(dev
);
9745 if (!netif_running(dev
))
9748 tg3_full_lock(tp
, 0);
9749 __tg3_set_rx_mode(dev
);
9750 tg3_full_unlock(tp
);
9753 static int tg3_get_regs_len(struct net_device
*dev
)
9755 return TG3_REG_BLK_SIZE
;
9758 static void tg3_get_regs(struct net_device
*dev
,
9759 struct ethtool_regs
*regs
, void *_p
)
9761 struct tg3
*tp
= netdev_priv(dev
);
9765 memset(_p
, 0, TG3_REG_BLK_SIZE
);
9767 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)
9770 tg3_full_lock(tp
, 0);
9772 tg3_dump_legacy_regs(tp
, (u32
*)_p
);
9774 tg3_full_unlock(tp
);
9777 static int tg3_get_eeprom_len(struct net_device
*dev
)
9779 struct tg3
*tp
= netdev_priv(dev
);
9781 return tp
->nvram_size
;
9784 static int tg3_get_eeprom(struct net_device
*dev
, struct ethtool_eeprom
*eeprom
, u8
*data
)
9786 struct tg3
*tp
= netdev_priv(dev
);
9789 u32 i
, offset
, len
, b_offset
, b_count
;
9792 if (tg3_flag(tp
, NO_NVRAM
))
9795 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)
9798 offset
= eeprom
->offset
;
9802 eeprom
->magic
= TG3_EEPROM_MAGIC
;
9805 /* adjustments to start on required 4 byte boundary */
9806 b_offset
= offset
& 3;
9807 b_count
= 4 - b_offset
;
9808 if (b_count
> len
) {
9809 /* i.e. offset=1 len=2 */
9812 ret
= tg3_nvram_read_be32(tp
, offset
-b_offset
, &val
);
9815 memcpy(data
, ((char *)&val
) + b_offset
, b_count
);
9818 eeprom
->len
+= b_count
;
9821 /* read bytes up to the last 4 byte boundary */
9822 pd
= &data
[eeprom
->len
];
9823 for (i
= 0; i
< (len
- (len
& 3)); i
+= 4) {
9824 ret
= tg3_nvram_read_be32(tp
, offset
+ i
, &val
);
9829 memcpy(pd
+ i
, &val
, 4);
9834 /* read last bytes not ending on 4 byte boundary */
9835 pd
= &data
[eeprom
->len
];
9837 b_offset
= offset
+ len
- b_count
;
9838 ret
= tg3_nvram_read_be32(tp
, b_offset
, &val
);
9841 memcpy(pd
, &val
, b_count
);
9842 eeprom
->len
+= b_count
;
9847 static int tg3_nvram_write_block(struct tg3
*tp
, u32 offset
, u32 len
, u8
*buf
);
9849 static int tg3_set_eeprom(struct net_device
*dev
, struct ethtool_eeprom
*eeprom
, u8
*data
)
9851 struct tg3
*tp
= netdev_priv(dev
);
9853 u32 offset
, len
, b_offset
, odd_len
;
9857 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)
9860 if (tg3_flag(tp
, NO_NVRAM
) ||
9861 eeprom
->magic
!= TG3_EEPROM_MAGIC
)
9864 offset
= eeprom
->offset
;
9867 if ((b_offset
= (offset
& 3))) {
9868 /* adjustments to start on required 4 byte boundary */
9869 ret
= tg3_nvram_read_be32(tp
, offset
-b_offset
, &start
);
9880 /* adjustments to end on required 4 byte boundary */
9882 len
= (len
+ 3) & ~3;
9883 ret
= tg3_nvram_read_be32(tp
, offset
+len
-4, &end
);
9889 if (b_offset
|| odd_len
) {
9890 buf
= kmalloc(len
, GFP_KERNEL
);
9894 memcpy(buf
, &start
, 4);
9896 memcpy(buf
+len
-4, &end
, 4);
9897 memcpy(buf
+ b_offset
, data
, eeprom
->len
);
9900 ret
= tg3_nvram_write_block(tp
, offset
, len
, buf
);
9908 static int tg3_get_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
9910 struct tg3
*tp
= netdev_priv(dev
);
9912 if (tg3_flag(tp
, USE_PHYLIB
)) {
9913 struct phy_device
*phydev
;
9914 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
9916 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
9917 return phy_ethtool_gset(phydev
, cmd
);
9920 cmd
->supported
= (SUPPORTED_Autoneg
);
9922 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
))
9923 cmd
->supported
|= (SUPPORTED_1000baseT_Half
|
9924 SUPPORTED_1000baseT_Full
);
9926 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)) {
9927 cmd
->supported
|= (SUPPORTED_100baseT_Half
|
9928 SUPPORTED_100baseT_Full
|
9929 SUPPORTED_10baseT_Half
|
9930 SUPPORTED_10baseT_Full
|
9932 cmd
->port
= PORT_TP
;
9934 cmd
->supported
|= SUPPORTED_FIBRE
;
9935 cmd
->port
= PORT_FIBRE
;
9938 cmd
->advertising
= tp
->link_config
.advertising
;
9939 if (netif_running(dev
)) {
9940 ethtool_cmd_speed_set(cmd
, tp
->link_config
.active_speed
);
9941 cmd
->duplex
= tp
->link_config
.active_duplex
;
9943 ethtool_cmd_speed_set(cmd
, SPEED_INVALID
);
9944 cmd
->duplex
= DUPLEX_INVALID
;
9946 cmd
->phy_address
= tp
->phy_addr
;
9947 cmd
->transceiver
= XCVR_INTERNAL
;
9948 cmd
->autoneg
= tp
->link_config
.autoneg
;
9954 static int tg3_set_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
9956 struct tg3
*tp
= netdev_priv(dev
);
9957 u32 speed
= ethtool_cmd_speed(cmd
);
9959 if (tg3_flag(tp
, USE_PHYLIB
)) {
9960 struct phy_device
*phydev
;
9961 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
9963 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
9964 return phy_ethtool_sset(phydev
, cmd
);
9967 if (cmd
->autoneg
!= AUTONEG_ENABLE
&&
9968 cmd
->autoneg
!= AUTONEG_DISABLE
)
9971 if (cmd
->autoneg
== AUTONEG_DISABLE
&&
9972 cmd
->duplex
!= DUPLEX_FULL
&&
9973 cmd
->duplex
!= DUPLEX_HALF
)
9976 if (cmd
->autoneg
== AUTONEG_ENABLE
) {
9977 u32 mask
= ADVERTISED_Autoneg
|
9979 ADVERTISED_Asym_Pause
;
9981 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
))
9982 mask
|= ADVERTISED_1000baseT_Half
|
9983 ADVERTISED_1000baseT_Full
;
9985 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
9986 mask
|= ADVERTISED_100baseT_Half
|
9987 ADVERTISED_100baseT_Full
|
9988 ADVERTISED_10baseT_Half
|
9989 ADVERTISED_10baseT_Full
|
9992 mask
|= ADVERTISED_FIBRE
;
9994 if (cmd
->advertising
& ~mask
)
9997 mask
&= (ADVERTISED_1000baseT_Half
|
9998 ADVERTISED_1000baseT_Full
|
9999 ADVERTISED_100baseT_Half
|
10000 ADVERTISED_100baseT_Full
|
10001 ADVERTISED_10baseT_Half
|
10002 ADVERTISED_10baseT_Full
);
10004 cmd
->advertising
&= mask
;
10006 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
) {
10007 if (speed
!= SPEED_1000
)
10010 if (cmd
->duplex
!= DUPLEX_FULL
)
10013 if (speed
!= SPEED_100
&&
10019 tg3_full_lock(tp
, 0);
10021 tp
->link_config
.autoneg
= cmd
->autoneg
;
10022 if (cmd
->autoneg
== AUTONEG_ENABLE
) {
10023 tp
->link_config
.advertising
= (cmd
->advertising
|
10024 ADVERTISED_Autoneg
);
10025 tp
->link_config
.speed
= SPEED_INVALID
;
10026 tp
->link_config
.duplex
= DUPLEX_INVALID
;
10028 tp
->link_config
.advertising
= 0;
10029 tp
->link_config
.speed
= speed
;
10030 tp
->link_config
.duplex
= cmd
->duplex
;
10033 tp
->link_config
.orig_speed
= tp
->link_config
.speed
;
10034 tp
->link_config
.orig_duplex
= tp
->link_config
.duplex
;
10035 tp
->link_config
.orig_autoneg
= tp
->link_config
.autoneg
;
10037 if (netif_running(dev
))
10038 tg3_setup_phy(tp
, 1);
10040 tg3_full_unlock(tp
);
10045 static void tg3_get_drvinfo(struct net_device
*dev
, struct ethtool_drvinfo
*info
)
10047 struct tg3
*tp
= netdev_priv(dev
);
10049 strcpy(info
->driver
, DRV_MODULE_NAME
);
10050 strcpy(info
->version
, DRV_MODULE_VERSION
);
10051 strcpy(info
->fw_version
, tp
->fw_ver
);
10052 strcpy(info
->bus_info
, pci_name(tp
->pdev
));
10055 static void tg3_get_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
10057 struct tg3
*tp
= netdev_priv(dev
);
10059 if (tg3_flag(tp
, WOL_CAP
) && device_can_wakeup(&tp
->pdev
->dev
))
10060 wol
->supported
= WAKE_MAGIC
;
10062 wol
->supported
= 0;
10064 if (tg3_flag(tp
, WOL_ENABLE
) && device_can_wakeup(&tp
->pdev
->dev
))
10065 wol
->wolopts
= WAKE_MAGIC
;
10066 memset(&wol
->sopass
, 0, sizeof(wol
->sopass
));
10069 static int tg3_set_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
10071 struct tg3
*tp
= netdev_priv(dev
);
10072 struct device
*dp
= &tp
->pdev
->dev
;
10074 if (wol
->wolopts
& ~WAKE_MAGIC
)
10076 if ((wol
->wolopts
& WAKE_MAGIC
) &&
10077 !(tg3_flag(tp
, WOL_CAP
) && device_can_wakeup(dp
)))
10080 device_set_wakeup_enable(dp
, wol
->wolopts
& WAKE_MAGIC
);
10082 spin_lock_bh(&tp
->lock
);
10083 if (device_may_wakeup(dp
))
10084 tg3_flag_set(tp
, WOL_ENABLE
);
10086 tg3_flag_clear(tp
, WOL_ENABLE
);
10087 spin_unlock_bh(&tp
->lock
);
10092 static u32
tg3_get_msglevel(struct net_device
*dev
)
10094 struct tg3
*tp
= netdev_priv(dev
);
10095 return tp
->msg_enable
;
10098 static void tg3_set_msglevel(struct net_device
*dev
, u32 value
)
10100 struct tg3
*tp
= netdev_priv(dev
);
10101 tp
->msg_enable
= value
;
10104 static int tg3_nway_reset(struct net_device
*dev
)
10106 struct tg3
*tp
= netdev_priv(dev
);
10109 if (!netif_running(dev
))
10112 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
10115 if (tg3_flag(tp
, USE_PHYLIB
)) {
10116 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
10118 r
= phy_start_aneg(tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
]);
10122 spin_lock_bh(&tp
->lock
);
10124 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
10125 if (!tg3_readphy(tp
, MII_BMCR
, &bmcr
) &&
10126 ((bmcr
& BMCR_ANENABLE
) ||
10127 (tp
->phy_flags
& TG3_PHYFLG_PARALLEL_DETECT
))) {
10128 tg3_writephy(tp
, MII_BMCR
, bmcr
| BMCR_ANRESTART
|
10132 spin_unlock_bh(&tp
->lock
);
10138 static void tg3_get_ringparam(struct net_device
*dev
, struct ethtool_ringparam
*ering
)
10140 struct tg3
*tp
= netdev_priv(dev
);
10142 ering
->rx_max_pending
= tp
->rx_std_ring_mask
;
10143 ering
->rx_mini_max_pending
= 0;
10144 if (tg3_flag(tp
, JUMBO_RING_ENABLE
))
10145 ering
->rx_jumbo_max_pending
= tp
->rx_jmb_ring_mask
;
10147 ering
->rx_jumbo_max_pending
= 0;
10149 ering
->tx_max_pending
= TG3_TX_RING_SIZE
- 1;
10151 ering
->rx_pending
= tp
->rx_pending
;
10152 ering
->rx_mini_pending
= 0;
10153 if (tg3_flag(tp
, JUMBO_RING_ENABLE
))
10154 ering
->rx_jumbo_pending
= tp
->rx_jumbo_pending
;
10156 ering
->rx_jumbo_pending
= 0;
10158 ering
->tx_pending
= tp
->napi
[0].tx_pending
;
10161 static int tg3_set_ringparam(struct net_device
*dev
, struct ethtool_ringparam
*ering
)
10163 struct tg3
*tp
= netdev_priv(dev
);
10164 int i
, irq_sync
= 0, err
= 0;
10166 if ((ering
->rx_pending
> tp
->rx_std_ring_mask
) ||
10167 (ering
->rx_jumbo_pending
> tp
->rx_jmb_ring_mask
) ||
10168 (ering
->tx_pending
> TG3_TX_RING_SIZE
- 1) ||
10169 (ering
->tx_pending
<= MAX_SKB_FRAGS
) ||
10170 (tg3_flag(tp
, TSO_BUG
) &&
10171 (ering
->tx_pending
<= (MAX_SKB_FRAGS
* 3))))
10174 if (netif_running(dev
)) {
10176 tg3_netif_stop(tp
);
10180 tg3_full_lock(tp
, irq_sync
);
10182 tp
->rx_pending
= ering
->rx_pending
;
10184 if (tg3_flag(tp
, MAX_RXPEND_64
) &&
10185 tp
->rx_pending
> 63)
10186 tp
->rx_pending
= 63;
10187 tp
->rx_jumbo_pending
= ering
->rx_jumbo_pending
;
10189 for (i
= 0; i
< tp
->irq_max
; i
++)
10190 tp
->napi
[i
].tx_pending
= ering
->tx_pending
;
10192 if (netif_running(dev
)) {
10193 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
10194 err
= tg3_restart_hw(tp
, 1);
10196 tg3_netif_start(tp
);
10199 tg3_full_unlock(tp
);
10201 if (irq_sync
&& !err
)
10207 static void tg3_get_pauseparam(struct net_device
*dev
, struct ethtool_pauseparam
*epause
)
10209 struct tg3
*tp
= netdev_priv(dev
);
10211 epause
->autoneg
= !!tg3_flag(tp
, PAUSE_AUTONEG
);
10213 if (tp
->link_config
.active_flowctrl
& FLOW_CTRL_RX
)
10214 epause
->rx_pause
= 1;
10216 epause
->rx_pause
= 0;
10218 if (tp
->link_config
.active_flowctrl
& FLOW_CTRL_TX
)
10219 epause
->tx_pause
= 1;
10221 epause
->tx_pause
= 0;
10224 static int tg3_set_pauseparam(struct net_device
*dev
, struct ethtool_pauseparam
*epause
)
10226 struct tg3
*tp
= netdev_priv(dev
);
10229 if (tg3_flag(tp
, USE_PHYLIB
)) {
10231 struct phy_device
*phydev
;
10233 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
10235 if (!(phydev
->supported
& SUPPORTED_Pause
) ||
10236 (!(phydev
->supported
& SUPPORTED_Asym_Pause
) &&
10237 (epause
->rx_pause
!= epause
->tx_pause
)))
10240 tp
->link_config
.flowctrl
= 0;
10241 if (epause
->rx_pause
) {
10242 tp
->link_config
.flowctrl
|= FLOW_CTRL_RX
;
10244 if (epause
->tx_pause
) {
10245 tp
->link_config
.flowctrl
|= FLOW_CTRL_TX
;
10246 newadv
= ADVERTISED_Pause
;
10248 newadv
= ADVERTISED_Pause
|
10249 ADVERTISED_Asym_Pause
;
10250 } else if (epause
->tx_pause
) {
10251 tp
->link_config
.flowctrl
|= FLOW_CTRL_TX
;
10252 newadv
= ADVERTISED_Asym_Pause
;
10256 if (epause
->autoneg
)
10257 tg3_flag_set(tp
, PAUSE_AUTONEG
);
10259 tg3_flag_clear(tp
, PAUSE_AUTONEG
);
10261 if (tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
) {
10262 u32 oldadv
= phydev
->advertising
&
10263 (ADVERTISED_Pause
| ADVERTISED_Asym_Pause
);
10264 if (oldadv
!= newadv
) {
10265 phydev
->advertising
&=
10266 ~(ADVERTISED_Pause
|
10267 ADVERTISED_Asym_Pause
);
10268 phydev
->advertising
|= newadv
;
10269 if (phydev
->autoneg
) {
10271 * Always renegotiate the link to
10272 * inform our link partner of our
10273 * flow control settings, even if the
10274 * flow control is forced. Let
10275 * tg3_adjust_link() do the final
10276 * flow control setup.
10278 return phy_start_aneg(phydev
);
10282 if (!epause
->autoneg
)
10283 tg3_setup_flow_control(tp
, 0, 0);
10285 tp
->link_config
.orig_advertising
&=
10286 ~(ADVERTISED_Pause
|
10287 ADVERTISED_Asym_Pause
);
10288 tp
->link_config
.orig_advertising
|= newadv
;
10293 if (netif_running(dev
)) {
10294 tg3_netif_stop(tp
);
10298 tg3_full_lock(tp
, irq_sync
);
10300 if (epause
->autoneg
)
10301 tg3_flag_set(tp
, PAUSE_AUTONEG
);
10303 tg3_flag_clear(tp
, PAUSE_AUTONEG
);
10304 if (epause
->rx_pause
)
10305 tp
->link_config
.flowctrl
|= FLOW_CTRL_RX
;
10307 tp
->link_config
.flowctrl
&= ~FLOW_CTRL_RX
;
10308 if (epause
->tx_pause
)
10309 tp
->link_config
.flowctrl
|= FLOW_CTRL_TX
;
10311 tp
->link_config
.flowctrl
&= ~FLOW_CTRL_TX
;
10313 if (netif_running(dev
)) {
10314 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
10315 err
= tg3_restart_hw(tp
, 1);
10317 tg3_netif_start(tp
);
10320 tg3_full_unlock(tp
);
10326 static int tg3_get_sset_count(struct net_device
*dev
, int sset
)
10330 return TG3_NUM_TEST
;
10332 return TG3_NUM_STATS
;
10334 return -EOPNOTSUPP
;
10338 static void tg3_get_strings(struct net_device
*dev
, u32 stringset
, u8
*buf
)
10340 switch (stringset
) {
10342 memcpy(buf
, ðtool_stats_keys
, sizeof(ethtool_stats_keys
));
10345 memcpy(buf
, ðtool_test_keys
, sizeof(ethtool_test_keys
));
10348 WARN_ON(1); /* we need a WARN() */
10353 static int tg3_set_phys_id(struct net_device
*dev
,
10354 enum ethtool_phys_id_state state
)
10356 struct tg3
*tp
= netdev_priv(dev
);
10358 if (!netif_running(tp
->dev
))
10362 case ETHTOOL_ID_ACTIVE
:
10363 return 1; /* cycle on/off once per second */
10365 case ETHTOOL_ID_ON
:
10366 tw32(MAC_LED_CTRL
, LED_CTRL_LNKLED_OVERRIDE
|
10367 LED_CTRL_1000MBPS_ON
|
10368 LED_CTRL_100MBPS_ON
|
10369 LED_CTRL_10MBPS_ON
|
10370 LED_CTRL_TRAFFIC_OVERRIDE
|
10371 LED_CTRL_TRAFFIC_BLINK
|
10372 LED_CTRL_TRAFFIC_LED
);
10375 case ETHTOOL_ID_OFF
:
10376 tw32(MAC_LED_CTRL
, LED_CTRL_LNKLED_OVERRIDE
|
10377 LED_CTRL_TRAFFIC_OVERRIDE
);
10380 case ETHTOOL_ID_INACTIVE
:
10381 tw32(MAC_LED_CTRL
, tp
->led_ctrl
);
10388 static void tg3_get_ethtool_stats(struct net_device
*dev
,
10389 struct ethtool_stats
*estats
, u64
*tmp_stats
)
10391 struct tg3
*tp
= netdev_priv(dev
);
10392 memcpy(tmp_stats
, tg3_get_estats(tp
), sizeof(tp
->estats
));
10395 static __be32
* tg3_vpd_readblock(struct tg3
*tp
)
10399 u32 offset
= 0, len
= 0;
10402 if (tg3_flag(tp
, NO_NVRAM
) || tg3_nvram_read(tp
, 0, &magic
))
10405 if (magic
== TG3_EEPROM_MAGIC
) {
10406 for (offset
= TG3_NVM_DIR_START
;
10407 offset
< TG3_NVM_DIR_END
;
10408 offset
+= TG3_NVM_DIRENT_SIZE
) {
10409 if (tg3_nvram_read(tp
, offset
, &val
))
10412 if ((val
>> TG3_NVM_DIRTYPE_SHIFT
) ==
10413 TG3_NVM_DIRTYPE_EXTVPD
)
10417 if (offset
!= TG3_NVM_DIR_END
) {
10418 len
= (val
& TG3_NVM_DIRTYPE_LENMSK
) * 4;
10419 if (tg3_nvram_read(tp
, offset
+ 4, &offset
))
10422 offset
= tg3_nvram_logical_addr(tp
, offset
);
10426 if (!offset
|| !len
) {
10427 offset
= TG3_NVM_VPD_OFF
;
10428 len
= TG3_NVM_VPD_LEN
;
10431 buf
= kmalloc(len
, GFP_KERNEL
);
10435 if (magic
== TG3_EEPROM_MAGIC
) {
10436 for (i
= 0; i
< len
; i
+= 4) {
10437 /* The data is in little-endian format in NVRAM.
10438 * Use the big-endian read routines to preserve
10439 * the byte order as it exists in NVRAM.
10441 if (tg3_nvram_read_be32(tp
, offset
+ i
, &buf
[i
/4]))
10447 unsigned int pos
= 0;
10449 ptr
= (u8
*)&buf
[0];
10450 for (i
= 0; pos
< len
&& i
< 3; i
++, pos
+= cnt
, ptr
+= cnt
) {
10451 cnt
= pci_read_vpd(tp
->pdev
, pos
,
10453 if (cnt
== -ETIMEDOUT
|| cnt
== -EINTR
)
10469 #define NVRAM_TEST_SIZE 0x100
10470 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
10471 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
10472 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
10473 #define NVRAM_SELFBOOT_HW_SIZE 0x20
10474 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
10476 static int tg3_test_nvram(struct tg3
*tp
)
10480 int i
, j
, k
, err
= 0, size
;
10482 if (tg3_flag(tp
, NO_NVRAM
))
10485 if (tg3_nvram_read(tp
, 0, &magic
) != 0)
10488 if (magic
== TG3_EEPROM_MAGIC
)
10489 size
= NVRAM_TEST_SIZE
;
10490 else if ((magic
& TG3_EEPROM_MAGIC_FW_MSK
) == TG3_EEPROM_MAGIC_FW
) {
10491 if ((magic
& TG3_EEPROM_SB_FORMAT_MASK
) ==
10492 TG3_EEPROM_SB_FORMAT_1
) {
10493 switch (magic
& TG3_EEPROM_SB_REVISION_MASK
) {
10494 case TG3_EEPROM_SB_REVISION_0
:
10495 size
= NVRAM_SELFBOOT_FORMAT1_0_SIZE
;
10497 case TG3_EEPROM_SB_REVISION_2
:
10498 size
= NVRAM_SELFBOOT_FORMAT1_2_SIZE
;
10500 case TG3_EEPROM_SB_REVISION_3
:
10501 size
= NVRAM_SELFBOOT_FORMAT1_3_SIZE
;
10508 } else if ((magic
& TG3_EEPROM_MAGIC_HW_MSK
) == TG3_EEPROM_MAGIC_HW
)
10509 size
= NVRAM_SELFBOOT_HW_SIZE
;
10513 buf
= kmalloc(size
, GFP_KERNEL
);
10518 for (i
= 0, j
= 0; i
< size
; i
+= 4, j
++) {
10519 err
= tg3_nvram_read_be32(tp
, i
, &buf
[j
]);
10526 /* Selfboot format */
10527 magic
= be32_to_cpu(buf
[0]);
10528 if ((magic
& TG3_EEPROM_MAGIC_FW_MSK
) ==
10529 TG3_EEPROM_MAGIC_FW
) {
10530 u8
*buf8
= (u8
*) buf
, csum8
= 0;
10532 if ((magic
& TG3_EEPROM_SB_REVISION_MASK
) ==
10533 TG3_EEPROM_SB_REVISION_2
) {
10534 /* For rev 2, the csum doesn't include the MBA. */
10535 for (i
= 0; i
< TG3_EEPROM_SB_F1R2_MBA_OFF
; i
++)
10537 for (i
= TG3_EEPROM_SB_F1R2_MBA_OFF
+ 4; i
< size
; i
++)
10540 for (i
= 0; i
< size
; i
++)
10553 if ((magic
& TG3_EEPROM_MAGIC_HW_MSK
) ==
10554 TG3_EEPROM_MAGIC_HW
) {
10555 u8 data
[NVRAM_SELFBOOT_DATA_SIZE
];
10556 u8 parity
[NVRAM_SELFBOOT_DATA_SIZE
];
10557 u8
*buf8
= (u8
*) buf
;
10559 /* Separate the parity bits and the data bytes. */
10560 for (i
= 0, j
= 0, k
= 0; i
< NVRAM_SELFBOOT_HW_SIZE
; i
++) {
10561 if ((i
== 0) || (i
== 8)) {
10565 for (l
= 0, msk
= 0x80; l
< 7; l
++, msk
>>= 1)
10566 parity
[k
++] = buf8
[i
] & msk
;
10568 } else if (i
== 16) {
10572 for (l
= 0, msk
= 0x20; l
< 6; l
++, msk
>>= 1)
10573 parity
[k
++] = buf8
[i
] & msk
;
10576 for (l
= 0, msk
= 0x80; l
< 8; l
++, msk
>>= 1)
10577 parity
[k
++] = buf8
[i
] & msk
;
10580 data
[j
++] = buf8
[i
];
10584 for (i
= 0; i
< NVRAM_SELFBOOT_DATA_SIZE
; i
++) {
10585 u8 hw8
= hweight8(data
[i
]);
10587 if ((hw8
& 0x1) && parity
[i
])
10589 else if (!(hw8
& 0x1) && !parity
[i
])
10598 /* Bootstrap checksum at offset 0x10 */
10599 csum
= calc_crc((unsigned char *) buf
, 0x10);
10600 if (csum
!= le32_to_cpu(buf
[0x10/4]))
10603 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
10604 csum
= calc_crc((unsigned char *) &buf
[0x74/4], 0x88);
10605 if (csum
!= le32_to_cpu(buf
[0xfc/4]))
10610 buf
= tg3_vpd_readblock(tp
);
10614 i
= pci_vpd_find_tag((u8
*)buf
, 0, TG3_NVM_VPD_LEN
,
10615 PCI_VPD_LRDT_RO_DATA
);
10617 j
= pci_vpd_lrdt_size(&((u8
*)buf
)[i
]);
10621 if (i
+ PCI_VPD_LRDT_TAG_SIZE
+ j
> TG3_NVM_VPD_LEN
)
10624 i
+= PCI_VPD_LRDT_TAG_SIZE
;
10625 j
= pci_vpd_find_info_keyword((u8
*)buf
, i
, j
,
10626 PCI_VPD_RO_KEYWORD_CHKSUM
);
10630 j
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
10632 for (i
= 0; i
<= j
; i
++)
10633 csum8
+= ((u8
*)buf
)[i
];
10647 #define TG3_SERDES_TIMEOUT_SEC 2
10648 #define TG3_COPPER_TIMEOUT_SEC 6
10650 static int tg3_test_link(struct tg3
*tp
)
10654 if (!netif_running(tp
->dev
))
10657 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)
10658 max
= TG3_SERDES_TIMEOUT_SEC
;
10660 max
= TG3_COPPER_TIMEOUT_SEC
;
10662 for (i
= 0; i
< max
; i
++) {
10663 if (netif_carrier_ok(tp
->dev
))
10666 if (msleep_interruptible(1000))
10673 /* Only test the commonly used registers */
10674 static int tg3_test_registers(struct tg3
*tp
)
10676 int i
, is_5705
, is_5750
;
10677 u32 offset
, read_mask
, write_mask
, val
, save_val
, read_val
;
10681 #define TG3_FL_5705 0x1
10682 #define TG3_FL_NOT_5705 0x2
10683 #define TG3_FL_NOT_5788 0x4
10684 #define TG3_FL_NOT_5750 0x8
10688 /* MAC Control Registers */
10689 { MAC_MODE
, TG3_FL_NOT_5705
,
10690 0x00000000, 0x00ef6f8c },
10691 { MAC_MODE
, TG3_FL_5705
,
10692 0x00000000, 0x01ef6b8c },
10693 { MAC_STATUS
, TG3_FL_NOT_5705
,
10694 0x03800107, 0x00000000 },
10695 { MAC_STATUS
, TG3_FL_5705
,
10696 0x03800100, 0x00000000 },
10697 { MAC_ADDR_0_HIGH
, 0x0000,
10698 0x00000000, 0x0000ffff },
10699 { MAC_ADDR_0_LOW
, 0x0000,
10700 0x00000000, 0xffffffff },
10701 { MAC_RX_MTU_SIZE
, 0x0000,
10702 0x00000000, 0x0000ffff },
10703 { MAC_TX_MODE
, 0x0000,
10704 0x00000000, 0x00000070 },
10705 { MAC_TX_LENGTHS
, 0x0000,
10706 0x00000000, 0x00003fff },
10707 { MAC_RX_MODE
, TG3_FL_NOT_5705
,
10708 0x00000000, 0x000007fc },
10709 { MAC_RX_MODE
, TG3_FL_5705
,
10710 0x00000000, 0x000007dc },
10711 { MAC_HASH_REG_0
, 0x0000,
10712 0x00000000, 0xffffffff },
10713 { MAC_HASH_REG_1
, 0x0000,
10714 0x00000000, 0xffffffff },
10715 { MAC_HASH_REG_2
, 0x0000,
10716 0x00000000, 0xffffffff },
10717 { MAC_HASH_REG_3
, 0x0000,
10718 0x00000000, 0xffffffff },
10720 /* Receive Data and Receive BD Initiator Control Registers. */
10721 { RCVDBDI_JUMBO_BD
+0, TG3_FL_NOT_5705
,
10722 0x00000000, 0xffffffff },
10723 { RCVDBDI_JUMBO_BD
+4, TG3_FL_NOT_5705
,
10724 0x00000000, 0xffffffff },
10725 { RCVDBDI_JUMBO_BD
+8, TG3_FL_NOT_5705
,
10726 0x00000000, 0x00000003 },
10727 { RCVDBDI_JUMBO_BD
+0xc, TG3_FL_NOT_5705
,
10728 0x00000000, 0xffffffff },
10729 { RCVDBDI_STD_BD
+0, 0x0000,
10730 0x00000000, 0xffffffff },
10731 { RCVDBDI_STD_BD
+4, 0x0000,
10732 0x00000000, 0xffffffff },
10733 { RCVDBDI_STD_BD
+8, 0x0000,
10734 0x00000000, 0xffff0002 },
10735 { RCVDBDI_STD_BD
+0xc, 0x0000,
10736 0x00000000, 0xffffffff },
10738 /* Receive BD Initiator Control Registers. */
10739 { RCVBDI_STD_THRESH
, TG3_FL_NOT_5705
,
10740 0x00000000, 0xffffffff },
10741 { RCVBDI_STD_THRESH
, TG3_FL_5705
,
10742 0x00000000, 0x000003ff },
10743 { RCVBDI_JUMBO_THRESH
, TG3_FL_NOT_5705
,
10744 0x00000000, 0xffffffff },
10746 /* Host Coalescing Control Registers. */
10747 { HOSTCC_MODE
, TG3_FL_NOT_5705
,
10748 0x00000000, 0x00000004 },
10749 { HOSTCC_MODE
, TG3_FL_5705
,
10750 0x00000000, 0x000000f6 },
10751 { HOSTCC_RXCOL_TICKS
, TG3_FL_NOT_5705
,
10752 0x00000000, 0xffffffff },
10753 { HOSTCC_RXCOL_TICKS
, TG3_FL_5705
,
10754 0x00000000, 0x000003ff },
10755 { HOSTCC_TXCOL_TICKS
, TG3_FL_NOT_5705
,
10756 0x00000000, 0xffffffff },
10757 { HOSTCC_TXCOL_TICKS
, TG3_FL_5705
,
10758 0x00000000, 0x000003ff },
10759 { HOSTCC_RXMAX_FRAMES
, TG3_FL_NOT_5705
,
10760 0x00000000, 0xffffffff },
10761 { HOSTCC_RXMAX_FRAMES
, TG3_FL_5705
| TG3_FL_NOT_5788
,
10762 0x00000000, 0x000000ff },
10763 { HOSTCC_TXMAX_FRAMES
, TG3_FL_NOT_5705
,
10764 0x00000000, 0xffffffff },
10765 { HOSTCC_TXMAX_FRAMES
, TG3_FL_5705
| TG3_FL_NOT_5788
,
10766 0x00000000, 0x000000ff },
10767 { HOSTCC_RXCOAL_TICK_INT
, TG3_FL_NOT_5705
,
10768 0x00000000, 0xffffffff },
10769 { HOSTCC_TXCOAL_TICK_INT
, TG3_FL_NOT_5705
,
10770 0x00000000, 0xffffffff },
10771 { HOSTCC_RXCOAL_MAXF_INT
, TG3_FL_NOT_5705
,
10772 0x00000000, 0xffffffff },
10773 { HOSTCC_RXCOAL_MAXF_INT
, TG3_FL_5705
| TG3_FL_NOT_5788
,
10774 0x00000000, 0x000000ff },
10775 { HOSTCC_TXCOAL_MAXF_INT
, TG3_FL_NOT_5705
,
10776 0x00000000, 0xffffffff },
10777 { HOSTCC_TXCOAL_MAXF_INT
, TG3_FL_5705
| TG3_FL_NOT_5788
,
10778 0x00000000, 0x000000ff },
10779 { HOSTCC_STAT_COAL_TICKS
, TG3_FL_NOT_5705
,
10780 0x00000000, 0xffffffff },
10781 { HOSTCC_STATS_BLK_HOST_ADDR
, TG3_FL_NOT_5705
,
10782 0x00000000, 0xffffffff },
10783 { HOSTCC_STATS_BLK_HOST_ADDR
+4, TG3_FL_NOT_5705
,
10784 0x00000000, 0xffffffff },
10785 { HOSTCC_STATUS_BLK_HOST_ADDR
, 0x0000,
10786 0x00000000, 0xffffffff },
10787 { HOSTCC_STATUS_BLK_HOST_ADDR
+4, 0x0000,
10788 0x00000000, 0xffffffff },
10789 { HOSTCC_STATS_BLK_NIC_ADDR
, 0x0000,
10790 0xffffffff, 0x00000000 },
10791 { HOSTCC_STATUS_BLK_NIC_ADDR
, 0x0000,
10792 0xffffffff, 0x00000000 },
10794 /* Buffer Manager Control Registers. */
10795 { BUFMGR_MB_POOL_ADDR
, TG3_FL_NOT_5750
,
10796 0x00000000, 0x007fff80 },
10797 { BUFMGR_MB_POOL_SIZE
, TG3_FL_NOT_5750
,
10798 0x00000000, 0x007fffff },
10799 { BUFMGR_MB_RDMA_LOW_WATER
, 0x0000,
10800 0x00000000, 0x0000003f },
10801 { BUFMGR_MB_MACRX_LOW_WATER
, 0x0000,
10802 0x00000000, 0x000001ff },
10803 { BUFMGR_MB_HIGH_WATER
, 0x0000,
10804 0x00000000, 0x000001ff },
10805 { BUFMGR_DMA_DESC_POOL_ADDR
, TG3_FL_NOT_5705
,
10806 0xffffffff, 0x00000000 },
10807 { BUFMGR_DMA_DESC_POOL_SIZE
, TG3_FL_NOT_5705
,
10808 0xffffffff, 0x00000000 },
10810 /* Mailbox Registers */
10811 { GRCMBOX_RCVSTD_PROD_IDX
+4, 0x0000,
10812 0x00000000, 0x000001ff },
10813 { GRCMBOX_RCVJUMBO_PROD_IDX
+4, TG3_FL_NOT_5705
,
10814 0x00000000, 0x000001ff },
10815 { GRCMBOX_RCVRET_CON_IDX_0
+4, 0x0000,
10816 0x00000000, 0x000007ff },
10817 { GRCMBOX_SNDHOST_PROD_IDX_0
+4, 0x0000,
10818 0x00000000, 0x000001ff },
10820 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
10823 is_5705
= is_5750
= 0;
10824 if (tg3_flag(tp
, 5705_PLUS
)) {
10826 if (tg3_flag(tp
, 5750_PLUS
))
10830 for (i
= 0; reg_tbl
[i
].offset
!= 0xffff; i
++) {
10831 if (is_5705
&& (reg_tbl
[i
].flags
& TG3_FL_NOT_5705
))
10834 if (!is_5705
&& (reg_tbl
[i
].flags
& TG3_FL_5705
))
10837 if (tg3_flag(tp
, IS_5788
) &&
10838 (reg_tbl
[i
].flags
& TG3_FL_NOT_5788
))
10841 if (is_5750
&& (reg_tbl
[i
].flags
& TG3_FL_NOT_5750
))
10844 offset
= (u32
) reg_tbl
[i
].offset
;
10845 read_mask
= reg_tbl
[i
].read_mask
;
10846 write_mask
= reg_tbl
[i
].write_mask
;
10848 /* Save the original register content */
10849 save_val
= tr32(offset
);
10851 /* Determine the read-only value. */
10852 read_val
= save_val
& read_mask
;
10854 /* Write zero to the register, then make sure the read-only bits
10855 * are not changed and the read/write bits are all zeros.
10859 val
= tr32(offset
);
10861 /* Test the read-only and read/write bits. */
10862 if (((val
& read_mask
) != read_val
) || (val
& write_mask
))
10865 /* Write ones to all the bits defined by RdMask and WrMask, then
10866 * make sure the read-only bits are not changed and the
10867 * read/write bits are all ones.
10869 tw32(offset
, read_mask
| write_mask
);
10871 val
= tr32(offset
);
10873 /* Test the read-only bits. */
10874 if ((val
& read_mask
) != read_val
)
10877 /* Test the read/write bits. */
10878 if ((val
& write_mask
) != write_mask
)
10881 tw32(offset
, save_val
);
10887 if (netif_msg_hw(tp
))
10888 netdev_err(tp
->dev
,
10889 "Register test failed at offset %x\n", offset
);
10890 tw32(offset
, save_val
);
10894 static int tg3_do_mem_test(struct tg3
*tp
, u32 offset
, u32 len
)
10896 static const u32 test_pattern
[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
10900 for (i
= 0; i
< ARRAY_SIZE(test_pattern
); i
++) {
10901 for (j
= 0; j
< len
; j
+= 4) {
10904 tg3_write_mem(tp
, offset
+ j
, test_pattern
[i
]);
10905 tg3_read_mem(tp
, offset
+ j
, &val
);
10906 if (val
!= test_pattern
[i
])
10913 static int tg3_test_memory(struct tg3
*tp
)
10915 static struct mem_entry
{
10918 } mem_tbl_570x
[] = {
10919 { 0x00000000, 0x00b50},
10920 { 0x00002000, 0x1c000},
10921 { 0xffffffff, 0x00000}
10922 }, mem_tbl_5705
[] = {
10923 { 0x00000100, 0x0000c},
10924 { 0x00000200, 0x00008},
10925 { 0x00004000, 0x00800},
10926 { 0x00006000, 0x01000},
10927 { 0x00008000, 0x02000},
10928 { 0x00010000, 0x0e000},
10929 { 0xffffffff, 0x00000}
10930 }, mem_tbl_5755
[] = {
10931 { 0x00000200, 0x00008},
10932 { 0x00004000, 0x00800},
10933 { 0x00006000, 0x00800},
10934 { 0x00008000, 0x02000},
10935 { 0x00010000, 0x0c000},
10936 { 0xffffffff, 0x00000}
10937 }, mem_tbl_5906
[] = {
10938 { 0x00000200, 0x00008},
10939 { 0x00004000, 0x00400},
10940 { 0x00006000, 0x00400},
10941 { 0x00008000, 0x01000},
10942 { 0x00010000, 0x01000},
10943 { 0xffffffff, 0x00000}
10944 }, mem_tbl_5717
[] = {
10945 { 0x00000200, 0x00008},
10946 { 0x00010000, 0x0a000},
10947 { 0x00020000, 0x13c00},
10948 { 0xffffffff, 0x00000}
10949 }, mem_tbl_57765
[] = {
10950 { 0x00000200, 0x00008},
10951 { 0x00004000, 0x00800},
10952 { 0x00006000, 0x09800},
10953 { 0x00010000, 0x0a000},
10954 { 0xffffffff, 0x00000}
10956 struct mem_entry
*mem_tbl
;
10960 if (tg3_flag(tp
, 5717_PLUS
))
10961 mem_tbl
= mem_tbl_5717
;
10962 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
)
10963 mem_tbl
= mem_tbl_57765
;
10964 else if (tg3_flag(tp
, 5755_PLUS
))
10965 mem_tbl
= mem_tbl_5755
;
10966 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
)
10967 mem_tbl
= mem_tbl_5906
;
10968 else if (tg3_flag(tp
, 5705_PLUS
))
10969 mem_tbl
= mem_tbl_5705
;
10971 mem_tbl
= mem_tbl_570x
;
10973 for (i
= 0; mem_tbl
[i
].offset
!= 0xffffffff; i
++) {
10974 err
= tg3_do_mem_test(tp
, mem_tbl
[i
].offset
, mem_tbl
[i
].len
);
10982 #define TG3_MAC_LOOPBACK 0
10983 #define TG3_PHY_LOOPBACK 1
10984 #define TG3_TSO_LOOPBACK 2
10986 #define TG3_TSO_MSS 500
10988 #define TG3_TSO_IP_HDR_LEN 20
10989 #define TG3_TSO_TCP_HDR_LEN 20
10990 #define TG3_TSO_TCP_OPT_LEN 12
10992 static const u8 tg3_tso_header
[] = {
10994 0x45, 0x00, 0x00, 0x00,
10995 0x00, 0x00, 0x40, 0x00,
10996 0x40, 0x06, 0x00, 0x00,
10997 0x0a, 0x00, 0x00, 0x01,
10998 0x0a, 0x00, 0x00, 0x02,
10999 0x0d, 0x00, 0xe0, 0x00,
11000 0x00, 0x00, 0x01, 0x00,
11001 0x00, 0x00, 0x02, 0x00,
11002 0x80, 0x10, 0x10, 0x00,
11003 0x14, 0x09, 0x00, 0x00,
11004 0x01, 0x01, 0x08, 0x0a,
11005 0x11, 0x11, 0x11, 0x11,
11006 0x11, 0x11, 0x11, 0x11,
11009 static int tg3_run_loopback(struct tg3
*tp
, u32 pktsz
, int loopback_mode
)
11011 u32 mac_mode
, rx_start_idx
, rx_idx
, tx_idx
, opaque_key
;
11012 u32 base_flags
= 0, mss
= 0, desc_idx
, coal_now
, data_off
, val
;
11013 struct sk_buff
*skb
, *rx_skb
;
11016 int num_pkts
, tx_len
, rx_len
, i
, err
;
11017 struct tg3_rx_buffer_desc
*desc
;
11018 struct tg3_napi
*tnapi
, *rnapi
;
11019 struct tg3_rx_prodring_set
*tpr
= &tp
->napi
[0].prodring
;
11021 tnapi
= &tp
->napi
[0];
11022 rnapi
= &tp
->napi
[0];
11023 if (tp
->irq_cnt
> 1) {
11024 if (tg3_flag(tp
, ENABLE_RSS
))
11025 rnapi
= &tp
->napi
[1];
11026 if (tg3_flag(tp
, ENABLE_TSS
))
11027 tnapi
= &tp
->napi
[1];
11029 coal_now
= tnapi
->coal_now
| rnapi
->coal_now
;
11031 if (loopback_mode
== TG3_MAC_LOOPBACK
) {
11032 /* HW errata - mac loopback fails in some cases on 5780.
11033 * Normal traffic and PHY loopback are not affected by
11034 * errata. Also, the MAC loopback test is deprecated for
11035 * all newer ASIC revisions.
11037 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5780
||
11038 tg3_flag(tp
, CPMU_PRESENT
))
11041 mac_mode
= tp
->mac_mode
&
11042 ~(MAC_MODE_PORT_MODE_MASK
| MAC_MODE_HALF_DUPLEX
);
11043 mac_mode
|= MAC_MODE_PORT_INT_LPBACK
;
11044 if (!tg3_flag(tp
, 5705_PLUS
))
11045 mac_mode
|= MAC_MODE_LINK_POLARITY
;
11046 if (tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)
11047 mac_mode
|= MAC_MODE_PORT_MODE_MII
;
11049 mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
11050 tw32(MAC_MODE
, mac_mode
);
11052 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
11053 tg3_phy_fet_toggle_apd(tp
, false);
11054 val
= BMCR_LOOPBACK
| BMCR_FULLDPLX
| BMCR_SPEED100
;
11056 val
= BMCR_LOOPBACK
| BMCR_FULLDPLX
| BMCR_SPEED1000
;
11058 tg3_phy_toggle_automdix(tp
, 0);
11060 tg3_writephy(tp
, MII_BMCR
, val
);
11063 mac_mode
= tp
->mac_mode
&
11064 ~(MAC_MODE_PORT_MODE_MASK
| MAC_MODE_HALF_DUPLEX
);
11065 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
11066 tg3_writephy(tp
, MII_TG3_FET_PTEST
,
11067 MII_TG3_FET_PTEST_FRC_TX_LINK
|
11068 MII_TG3_FET_PTEST_FRC_TX_LOCK
);
11069 /* The write needs to be flushed for the AC131 */
11070 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
)
11071 tg3_readphy(tp
, MII_TG3_FET_PTEST
, &val
);
11072 mac_mode
|= MAC_MODE_PORT_MODE_MII
;
11074 mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
11076 /* reset to prevent losing 1st rx packet intermittently */
11077 if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) {
11078 tw32_f(MAC_RX_MODE
, RX_MODE_RESET
);
11080 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
11082 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
) {
11083 u32 masked_phy_id
= tp
->phy_id
& TG3_PHY_ID_MASK
;
11084 if (masked_phy_id
== TG3_PHY_ID_BCM5401
)
11085 mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
11086 else if (masked_phy_id
== TG3_PHY_ID_BCM5411
)
11087 mac_mode
|= MAC_MODE_LINK_POLARITY
;
11088 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
11089 MII_TG3_EXT_CTRL_LNK3_LED_MODE
);
11091 tw32(MAC_MODE
, mac_mode
);
11093 /* Wait for link */
11094 for (i
= 0; i
< 100; i
++) {
11095 if (tr32(MAC_TX_STATUS
) & TX_STATUS_LINK_UP
)
11104 skb
= netdev_alloc_skb(tp
->dev
, tx_len
);
11108 tx_data
= skb_put(skb
, tx_len
);
11109 memcpy(tx_data
, tp
->dev
->dev_addr
, 6);
11110 memset(tx_data
+ 6, 0x0, 8);
11112 tw32(MAC_RX_MTU_SIZE
, tx_len
+ ETH_FCS_LEN
);
11114 if (loopback_mode
== TG3_TSO_LOOPBACK
) {
11115 struct iphdr
*iph
= (struct iphdr
*)&tx_data
[ETH_HLEN
];
11117 u32 hdr_len
= TG3_TSO_IP_HDR_LEN
+ TG3_TSO_TCP_HDR_LEN
+
11118 TG3_TSO_TCP_OPT_LEN
;
11120 memcpy(tx_data
+ ETH_ALEN
* 2, tg3_tso_header
,
11121 sizeof(tg3_tso_header
));
11124 val
= tx_len
- ETH_ALEN
* 2 - sizeof(tg3_tso_header
);
11125 num_pkts
= DIV_ROUND_UP(val
, TG3_TSO_MSS
);
11127 /* Set the total length field in the IP header */
11128 iph
->tot_len
= htons((u16
)(mss
+ hdr_len
));
11130 base_flags
= (TXD_FLAG_CPU_PRE_DMA
|
11131 TXD_FLAG_CPU_POST_DMA
);
11133 if (tg3_flag(tp
, HW_TSO_1
) ||
11134 tg3_flag(tp
, HW_TSO_2
) ||
11135 tg3_flag(tp
, HW_TSO_3
)) {
11137 val
= ETH_HLEN
+ TG3_TSO_IP_HDR_LEN
;
11138 th
= (struct tcphdr
*)&tx_data
[val
];
11141 base_flags
|= TXD_FLAG_TCPUDP_CSUM
;
11143 if (tg3_flag(tp
, HW_TSO_3
)) {
11144 mss
|= (hdr_len
& 0xc) << 12;
11145 if (hdr_len
& 0x10)
11146 base_flags
|= 0x00000010;
11147 base_flags
|= (hdr_len
& 0x3e0) << 5;
11148 } else if (tg3_flag(tp
, HW_TSO_2
))
11149 mss
|= hdr_len
<< 9;
11150 else if (tg3_flag(tp
, HW_TSO_1
) ||
11151 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) {
11152 mss
|= (TG3_TSO_TCP_OPT_LEN
<< 9);
11154 base_flags
|= (TG3_TSO_TCP_OPT_LEN
<< 10);
11157 data_off
= ETH_ALEN
* 2 + sizeof(tg3_tso_header
);
11160 data_off
= ETH_HLEN
;
11163 for (i
= data_off
; i
< tx_len
; i
++)
11164 tx_data
[i
] = (u8
) (i
& 0xff);
11166 map
= pci_map_single(tp
->pdev
, skb
->data
, tx_len
, PCI_DMA_TODEVICE
);
11167 if (pci_dma_mapping_error(tp
->pdev
, map
)) {
11168 dev_kfree_skb(skb
);
11172 tw32_f(HOSTCC_MODE
, tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
|
11177 rx_start_idx
= rnapi
->hw_status
->idx
[0].rx_producer
;
11179 tg3_set_txd(tnapi
, tnapi
->tx_prod
, map
, tx_len
,
11180 base_flags
, (mss
<< 1) | 1);
11184 tw32_tx_mbox(tnapi
->prodmbox
, tnapi
->tx_prod
);
11185 tr32_mailbox(tnapi
->prodmbox
);
11189 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
11190 for (i
= 0; i
< 35; i
++) {
11191 tw32_f(HOSTCC_MODE
, tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
|
11196 tx_idx
= tnapi
->hw_status
->idx
[0].tx_consumer
;
11197 rx_idx
= rnapi
->hw_status
->idx
[0].rx_producer
;
11198 if ((tx_idx
== tnapi
->tx_prod
) &&
11199 (rx_idx
== (rx_start_idx
+ num_pkts
)))
11203 pci_unmap_single(tp
->pdev
, map
, tx_len
, PCI_DMA_TODEVICE
);
11204 dev_kfree_skb(skb
);
11206 if (tx_idx
!= tnapi
->tx_prod
)
11209 if (rx_idx
!= rx_start_idx
+ num_pkts
)
11213 while (rx_idx
!= rx_start_idx
) {
11214 desc
= &rnapi
->rx_rcb
[rx_start_idx
++];
11215 desc_idx
= desc
->opaque
& RXD_OPAQUE_INDEX_MASK
;
11216 opaque_key
= desc
->opaque
& RXD_OPAQUE_RING_MASK
;
11218 if ((desc
->err_vlan
& RXD_ERR_MASK
) != 0 &&
11219 (desc
->err_vlan
!= RXD_ERR_ODD_NIBBLE_RCVD_MII
))
11222 rx_len
= ((desc
->idx_len
& RXD_LEN_MASK
) >> RXD_LEN_SHIFT
)
11225 if (loopback_mode
!= TG3_TSO_LOOPBACK
) {
11226 if (rx_len
!= tx_len
)
11229 if (pktsz
<= TG3_RX_STD_DMA_SZ
- ETH_FCS_LEN
) {
11230 if (opaque_key
!= RXD_OPAQUE_RING_STD
)
11233 if (opaque_key
!= RXD_OPAQUE_RING_JUMBO
)
11236 } else if ((desc
->type_flags
& RXD_FLAG_TCPUDP_CSUM
) &&
11237 (desc
->ip_tcp_csum
& RXD_TCPCSUM_MASK
)
11238 >> RXD_TCPCSUM_SHIFT
!= 0xffff) {
11242 if (opaque_key
== RXD_OPAQUE_RING_STD
) {
11243 rx_skb
= tpr
->rx_std_buffers
[desc_idx
].skb
;
11244 map
= dma_unmap_addr(&tpr
->rx_std_buffers
[desc_idx
],
11246 } else if (opaque_key
== RXD_OPAQUE_RING_JUMBO
) {
11247 rx_skb
= tpr
->rx_jmb_buffers
[desc_idx
].skb
;
11248 map
= dma_unmap_addr(&tpr
->rx_jmb_buffers
[desc_idx
],
11253 pci_dma_sync_single_for_cpu(tp
->pdev
, map
, rx_len
,
11254 PCI_DMA_FROMDEVICE
);
11256 for (i
= data_off
; i
< rx_len
; i
++, val
++) {
11257 if (*(rx_skb
->data
+ i
) != (u8
) (val
& 0xff))
11264 /* tg3_free_rings will unmap and free the rx_skb */
11269 #define TG3_STD_LOOPBACK_FAILED 1
11270 #define TG3_JMB_LOOPBACK_FAILED 2
11271 #define TG3_TSO_LOOPBACK_FAILED 4
11273 #define TG3_MAC_LOOPBACK_SHIFT 0
11274 #define TG3_PHY_LOOPBACK_SHIFT 4
11275 #define TG3_LOOPBACK_FAILED 0x00000077
11277 static int tg3_test_loopback(struct tg3
*tp
)
11280 u32 eee_cap
, cpmuctrl
= 0;
11282 if (!netif_running(tp
->dev
))
11283 return TG3_LOOPBACK_FAILED
;
11285 eee_cap
= tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
;
11286 tp
->phy_flags
&= ~TG3_PHYFLG_EEE_CAP
;
11288 err
= tg3_reset_hw(tp
, 1);
11290 err
= TG3_LOOPBACK_FAILED
;
11294 if (tg3_flag(tp
, ENABLE_RSS
)) {
11297 /* Reroute all rx packets to the 1st queue */
11298 for (i
= MAC_RSS_INDIR_TBL_0
;
11299 i
< MAC_RSS_INDIR_TBL_0
+ TG3_RSS_INDIR_TBL_SIZE
; i
+= 4)
11303 /* Turn off gphy autopowerdown. */
11304 if (tp
->phy_flags
& TG3_PHYFLG_ENABLE_APD
)
11305 tg3_phy_toggle_apd(tp
, false);
11307 if (tg3_flag(tp
, CPMU_PRESENT
)) {
11311 tw32(TG3_CPMU_MUTEX_REQ
, CPMU_MUTEX_REQ_DRIVER
);
11313 /* Wait for up to 40 microseconds to acquire lock. */
11314 for (i
= 0; i
< 4; i
++) {
11315 status
= tr32(TG3_CPMU_MUTEX_GNT
);
11316 if (status
== CPMU_MUTEX_GNT_DRIVER
)
11321 if (status
!= CPMU_MUTEX_GNT_DRIVER
) {
11322 err
= TG3_LOOPBACK_FAILED
;
11326 /* Turn off link-based power management. */
11327 cpmuctrl
= tr32(TG3_CPMU_CTRL
);
11328 tw32(TG3_CPMU_CTRL
,
11329 cpmuctrl
& ~(CPMU_CTRL_LINK_SPEED_MODE
|
11330 CPMU_CTRL_LINK_AWARE_MODE
));
11333 if (tg3_run_loopback(tp
, ETH_FRAME_LEN
, TG3_MAC_LOOPBACK
))
11334 err
|= TG3_STD_LOOPBACK_FAILED
<< TG3_MAC_LOOPBACK_SHIFT
;
11336 if (tg3_flag(tp
, JUMBO_RING_ENABLE
) &&
11337 tg3_run_loopback(tp
, 9000 + ETH_HLEN
, TG3_MAC_LOOPBACK
))
11338 err
|= TG3_JMB_LOOPBACK_FAILED
<< TG3_MAC_LOOPBACK_SHIFT
;
11340 if (tg3_flag(tp
, CPMU_PRESENT
)) {
11341 tw32(TG3_CPMU_CTRL
, cpmuctrl
);
11343 /* Release the mutex */
11344 tw32(TG3_CPMU_MUTEX_GNT
, CPMU_MUTEX_GNT_DRIVER
);
11347 if (!(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
11348 !tg3_flag(tp
, USE_PHYLIB
)) {
11349 if (tg3_run_loopback(tp
, ETH_FRAME_LEN
, TG3_PHY_LOOPBACK
))
11350 err
|= TG3_STD_LOOPBACK_FAILED
<<
11351 TG3_PHY_LOOPBACK_SHIFT
;
11352 if (tg3_flag(tp
, TSO_CAPABLE
) &&
11353 tg3_run_loopback(tp
, ETH_FRAME_LEN
, TG3_TSO_LOOPBACK
))
11354 err
|= TG3_TSO_LOOPBACK_FAILED
<<
11355 TG3_PHY_LOOPBACK_SHIFT
;
11356 if (tg3_flag(tp
, JUMBO_RING_ENABLE
) &&
11357 tg3_run_loopback(tp
, 9000 + ETH_HLEN
, TG3_PHY_LOOPBACK
))
11358 err
|= TG3_JMB_LOOPBACK_FAILED
<<
11359 TG3_PHY_LOOPBACK_SHIFT
;
11362 /* Re-enable gphy autopowerdown. */
11363 if (tp
->phy_flags
& TG3_PHYFLG_ENABLE_APD
)
11364 tg3_phy_toggle_apd(tp
, true);
11367 tp
->phy_flags
|= eee_cap
;
11372 static void tg3_self_test(struct net_device
*dev
, struct ethtool_test
*etest
,
11375 struct tg3
*tp
= netdev_priv(dev
);
11377 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)
11380 memset(data
, 0, sizeof(u64
) * TG3_NUM_TEST
);
11382 if (tg3_test_nvram(tp
) != 0) {
11383 etest
->flags
|= ETH_TEST_FL_FAILED
;
11386 if (tg3_test_link(tp
) != 0) {
11387 etest
->flags
|= ETH_TEST_FL_FAILED
;
11390 if (etest
->flags
& ETH_TEST_FL_OFFLINE
) {
11391 int err
, err2
= 0, irq_sync
= 0;
11393 if (netif_running(dev
)) {
11395 tg3_netif_stop(tp
);
11399 tg3_full_lock(tp
, irq_sync
);
11401 tg3_halt(tp
, RESET_KIND_SUSPEND
, 1);
11402 err
= tg3_nvram_lock(tp
);
11403 tg3_halt_cpu(tp
, RX_CPU_BASE
);
11404 if (!tg3_flag(tp
, 5705_PLUS
))
11405 tg3_halt_cpu(tp
, TX_CPU_BASE
);
11407 tg3_nvram_unlock(tp
);
11409 if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)
11412 if (tg3_test_registers(tp
) != 0) {
11413 etest
->flags
|= ETH_TEST_FL_FAILED
;
11416 if (tg3_test_memory(tp
) != 0) {
11417 etest
->flags
|= ETH_TEST_FL_FAILED
;
11420 if ((data
[4] = tg3_test_loopback(tp
)) != 0)
11421 etest
->flags
|= ETH_TEST_FL_FAILED
;
11423 tg3_full_unlock(tp
);
11425 if (tg3_test_interrupt(tp
) != 0) {
11426 etest
->flags
|= ETH_TEST_FL_FAILED
;
11430 tg3_full_lock(tp
, 0);
11432 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
11433 if (netif_running(dev
)) {
11434 tg3_flag_set(tp
, INIT_COMPLETE
);
11435 err2
= tg3_restart_hw(tp
, 1);
11437 tg3_netif_start(tp
);
11440 tg3_full_unlock(tp
);
11442 if (irq_sync
&& !err2
)
11445 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)
11446 tg3_power_down(tp
);
11450 static int tg3_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
11452 struct mii_ioctl_data
*data
= if_mii(ifr
);
11453 struct tg3
*tp
= netdev_priv(dev
);
11456 if (tg3_flag(tp
, USE_PHYLIB
)) {
11457 struct phy_device
*phydev
;
11458 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
11460 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
11461 return phy_mii_ioctl(phydev
, ifr
, cmd
);
11466 data
->phy_id
= tp
->phy_addr
;
11469 case SIOCGMIIREG
: {
11472 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
11473 break; /* We have no PHY */
11475 if (!netif_running(dev
))
11478 spin_lock_bh(&tp
->lock
);
11479 err
= tg3_readphy(tp
, data
->reg_num
& 0x1f, &mii_regval
);
11480 spin_unlock_bh(&tp
->lock
);
11482 data
->val_out
= mii_regval
;
11488 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
11489 break; /* We have no PHY */
11491 if (!netif_running(dev
))
11494 spin_lock_bh(&tp
->lock
);
11495 err
= tg3_writephy(tp
, data
->reg_num
& 0x1f, data
->val_in
);
11496 spin_unlock_bh(&tp
->lock
);
11504 return -EOPNOTSUPP
;
11507 static int tg3_get_coalesce(struct net_device
*dev
, struct ethtool_coalesce
*ec
)
11509 struct tg3
*tp
= netdev_priv(dev
);
11511 memcpy(ec
, &tp
->coal
, sizeof(*ec
));
11515 static int tg3_set_coalesce(struct net_device
*dev
, struct ethtool_coalesce
*ec
)
11517 struct tg3
*tp
= netdev_priv(dev
);
11518 u32 max_rxcoal_tick_int
= 0, max_txcoal_tick_int
= 0;
11519 u32 max_stat_coal_ticks
= 0, min_stat_coal_ticks
= 0;
11521 if (!tg3_flag(tp
, 5705_PLUS
)) {
11522 max_rxcoal_tick_int
= MAX_RXCOAL_TICK_INT
;
11523 max_txcoal_tick_int
= MAX_TXCOAL_TICK_INT
;
11524 max_stat_coal_ticks
= MAX_STAT_COAL_TICKS
;
11525 min_stat_coal_ticks
= MIN_STAT_COAL_TICKS
;
11528 if ((ec
->rx_coalesce_usecs
> MAX_RXCOL_TICKS
) ||
11529 (ec
->tx_coalesce_usecs
> MAX_TXCOL_TICKS
) ||
11530 (ec
->rx_max_coalesced_frames
> MAX_RXMAX_FRAMES
) ||
11531 (ec
->tx_max_coalesced_frames
> MAX_TXMAX_FRAMES
) ||
11532 (ec
->rx_coalesce_usecs_irq
> max_rxcoal_tick_int
) ||
11533 (ec
->tx_coalesce_usecs_irq
> max_txcoal_tick_int
) ||
11534 (ec
->rx_max_coalesced_frames_irq
> MAX_RXCOAL_MAXF_INT
) ||
11535 (ec
->tx_max_coalesced_frames_irq
> MAX_TXCOAL_MAXF_INT
) ||
11536 (ec
->stats_block_coalesce_usecs
> max_stat_coal_ticks
) ||
11537 (ec
->stats_block_coalesce_usecs
< min_stat_coal_ticks
))
11540 /* No rx interrupts will be generated if both are zero */
11541 if ((ec
->rx_coalesce_usecs
== 0) &&
11542 (ec
->rx_max_coalesced_frames
== 0))
11545 /* No tx interrupts will be generated if both are zero */
11546 if ((ec
->tx_coalesce_usecs
== 0) &&
11547 (ec
->tx_max_coalesced_frames
== 0))
11550 /* Only copy relevant parameters, ignore all others. */
11551 tp
->coal
.rx_coalesce_usecs
= ec
->rx_coalesce_usecs
;
11552 tp
->coal
.tx_coalesce_usecs
= ec
->tx_coalesce_usecs
;
11553 tp
->coal
.rx_max_coalesced_frames
= ec
->rx_max_coalesced_frames
;
11554 tp
->coal
.tx_max_coalesced_frames
= ec
->tx_max_coalesced_frames
;
11555 tp
->coal
.rx_coalesce_usecs_irq
= ec
->rx_coalesce_usecs_irq
;
11556 tp
->coal
.tx_coalesce_usecs_irq
= ec
->tx_coalesce_usecs_irq
;
11557 tp
->coal
.rx_max_coalesced_frames_irq
= ec
->rx_max_coalesced_frames_irq
;
11558 tp
->coal
.tx_max_coalesced_frames_irq
= ec
->tx_max_coalesced_frames_irq
;
11559 tp
->coal
.stats_block_coalesce_usecs
= ec
->stats_block_coalesce_usecs
;
11561 if (netif_running(dev
)) {
11562 tg3_full_lock(tp
, 0);
11563 __tg3_set_coalesce(tp
, &tp
->coal
);
11564 tg3_full_unlock(tp
);
11569 static const struct ethtool_ops tg3_ethtool_ops
= {
11570 .get_settings
= tg3_get_settings
,
11571 .set_settings
= tg3_set_settings
,
11572 .get_drvinfo
= tg3_get_drvinfo
,
11573 .get_regs_len
= tg3_get_regs_len
,
11574 .get_regs
= tg3_get_regs
,
11575 .get_wol
= tg3_get_wol
,
11576 .set_wol
= tg3_set_wol
,
11577 .get_msglevel
= tg3_get_msglevel
,
11578 .set_msglevel
= tg3_set_msglevel
,
11579 .nway_reset
= tg3_nway_reset
,
11580 .get_link
= ethtool_op_get_link
,
11581 .get_eeprom_len
= tg3_get_eeprom_len
,
11582 .get_eeprom
= tg3_get_eeprom
,
11583 .set_eeprom
= tg3_set_eeprom
,
11584 .get_ringparam
= tg3_get_ringparam
,
11585 .set_ringparam
= tg3_set_ringparam
,
11586 .get_pauseparam
= tg3_get_pauseparam
,
11587 .set_pauseparam
= tg3_set_pauseparam
,
11588 .self_test
= tg3_self_test
,
11589 .get_strings
= tg3_get_strings
,
11590 .set_phys_id
= tg3_set_phys_id
,
11591 .get_ethtool_stats
= tg3_get_ethtool_stats
,
11592 .get_coalesce
= tg3_get_coalesce
,
11593 .set_coalesce
= tg3_set_coalesce
,
11594 .get_sset_count
= tg3_get_sset_count
,
11597 static void __devinit
tg3_get_eeprom_size(struct tg3
*tp
)
11599 u32 cursize
, val
, magic
;
11601 tp
->nvram_size
= EEPROM_CHIP_SIZE
;
11603 if (tg3_nvram_read(tp
, 0, &magic
) != 0)
11606 if ((magic
!= TG3_EEPROM_MAGIC
) &&
11607 ((magic
& TG3_EEPROM_MAGIC_FW_MSK
) != TG3_EEPROM_MAGIC_FW
) &&
11608 ((magic
& TG3_EEPROM_MAGIC_HW_MSK
) != TG3_EEPROM_MAGIC_HW
))
11612 * Size the chip by reading offsets at increasing powers of two.
11613 * When we encounter our validation signature, we know the addressing
11614 * has wrapped around, and thus have our chip size.
11618 while (cursize
< tp
->nvram_size
) {
11619 if (tg3_nvram_read(tp
, cursize
, &val
) != 0)
11628 tp
->nvram_size
= cursize
;
11631 static void __devinit
tg3_get_nvram_size(struct tg3
*tp
)
11635 if (tg3_flag(tp
, NO_NVRAM
) || tg3_nvram_read(tp
, 0, &val
) != 0)
11638 /* Selfboot format */
11639 if (val
!= TG3_EEPROM_MAGIC
) {
11640 tg3_get_eeprom_size(tp
);
11644 if (tg3_nvram_read(tp
, 0xf0, &val
) == 0) {
11646 /* This is confusing. We want to operate on the
11647 * 16-bit value at offset 0xf2. The tg3_nvram_read()
11648 * call will read from NVRAM and byteswap the data
11649 * according to the byteswapping settings for all
11650 * other register accesses. This ensures the data we
11651 * want will always reside in the lower 16-bits.
11652 * However, the data in NVRAM is in LE format, which
11653 * means the data from the NVRAM read will always be
11654 * opposite the endianness of the CPU. The 16-bit
11655 * byteswap then brings the data to CPU endianness.
11657 tp
->nvram_size
= swab16((u16
)(val
& 0x0000ffff)) * 1024;
11661 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
11664 static void __devinit
tg3_get_nvram_info(struct tg3
*tp
)
11668 nvcfg1
= tr32(NVRAM_CFG1
);
11669 if (nvcfg1
& NVRAM_CFG1_FLASHIF_ENAB
) {
11670 tg3_flag_set(tp
, FLASH
);
11672 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
11673 tw32(NVRAM_CFG1
, nvcfg1
);
11676 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5750
||
11677 tg3_flag(tp
, 5780_CLASS
)) {
11678 switch (nvcfg1
& NVRAM_CFG1_VENDOR_MASK
) {
11679 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED
:
11680 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
11681 tp
->nvram_pagesize
= ATMEL_AT45DB0X1B_PAGE_SIZE
;
11682 tg3_flag_set(tp
, NVRAM_BUFFERED
);
11684 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED
:
11685 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
11686 tp
->nvram_pagesize
= ATMEL_AT25F512_PAGE_SIZE
;
11688 case FLASH_VENDOR_ATMEL_EEPROM
:
11689 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
11690 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
11691 tg3_flag_set(tp
, NVRAM_BUFFERED
);
11693 case FLASH_VENDOR_ST
:
11694 tp
->nvram_jedecnum
= JEDEC_ST
;
11695 tp
->nvram_pagesize
= ST_M45PEX0_PAGE_SIZE
;
11696 tg3_flag_set(tp
, NVRAM_BUFFERED
);
11698 case FLASH_VENDOR_SAIFUN
:
11699 tp
->nvram_jedecnum
= JEDEC_SAIFUN
;
11700 tp
->nvram_pagesize
= SAIFUN_SA25F0XX_PAGE_SIZE
;
11702 case FLASH_VENDOR_SST_SMALL
:
11703 case FLASH_VENDOR_SST_LARGE
:
11704 tp
->nvram_jedecnum
= JEDEC_SST
;
11705 tp
->nvram_pagesize
= SST_25VF0X0_PAGE_SIZE
;
11709 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
11710 tp
->nvram_pagesize
= ATMEL_AT45DB0X1B_PAGE_SIZE
;
11711 tg3_flag_set(tp
, NVRAM_BUFFERED
);
11715 static void __devinit
tg3_nvram_get_pagesize(struct tg3
*tp
, u32 nvmcfg1
)
11717 switch (nvmcfg1
& NVRAM_CFG1_5752PAGE_SIZE_MASK
) {
11718 case FLASH_5752PAGE_SIZE_256
:
11719 tp
->nvram_pagesize
= 256;
11721 case FLASH_5752PAGE_SIZE_512
:
11722 tp
->nvram_pagesize
= 512;
11724 case FLASH_5752PAGE_SIZE_1K
:
11725 tp
->nvram_pagesize
= 1024;
11727 case FLASH_5752PAGE_SIZE_2K
:
11728 tp
->nvram_pagesize
= 2048;
11730 case FLASH_5752PAGE_SIZE_4K
:
11731 tp
->nvram_pagesize
= 4096;
11733 case FLASH_5752PAGE_SIZE_264
:
11734 tp
->nvram_pagesize
= 264;
11736 case FLASH_5752PAGE_SIZE_528
:
11737 tp
->nvram_pagesize
= 528;
11742 static void __devinit
tg3_get_5752_nvram_info(struct tg3
*tp
)
11746 nvcfg1
= tr32(NVRAM_CFG1
);
11748 /* NVRAM protection for TPM */
11749 if (nvcfg1
& (1 << 27))
11750 tg3_flag_set(tp
, PROTECTED_NVRAM
);
11752 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
11753 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ
:
11754 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ
:
11755 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
11756 tg3_flag_set(tp
, NVRAM_BUFFERED
);
11758 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
11759 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
11760 tg3_flag_set(tp
, NVRAM_BUFFERED
);
11761 tg3_flag_set(tp
, FLASH
);
11763 case FLASH_5752VENDOR_ST_M45PE10
:
11764 case FLASH_5752VENDOR_ST_M45PE20
:
11765 case FLASH_5752VENDOR_ST_M45PE40
:
11766 tp
->nvram_jedecnum
= JEDEC_ST
;
11767 tg3_flag_set(tp
, NVRAM_BUFFERED
);
11768 tg3_flag_set(tp
, FLASH
);
11772 if (tg3_flag(tp
, FLASH
)) {
11773 tg3_nvram_get_pagesize(tp
, nvcfg1
);
11775 /* For eeprom, set pagesize to maximum eeprom size */
11776 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
11778 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
11779 tw32(NVRAM_CFG1
, nvcfg1
);
11783 static void __devinit
tg3_get_5755_nvram_info(struct tg3
*tp
)
11785 u32 nvcfg1
, protect
= 0;
11787 nvcfg1
= tr32(NVRAM_CFG1
);
11789 /* NVRAM protection for TPM */
11790 if (nvcfg1
& (1 << 27)) {
11791 tg3_flag_set(tp
, PROTECTED_NVRAM
);
11795 nvcfg1
&= NVRAM_CFG1_5752VENDOR_MASK
;
11797 case FLASH_5755VENDOR_ATMEL_FLASH_1
:
11798 case FLASH_5755VENDOR_ATMEL_FLASH_2
:
11799 case FLASH_5755VENDOR_ATMEL_FLASH_3
:
11800 case FLASH_5755VENDOR_ATMEL_FLASH_5
:
11801 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
11802 tg3_flag_set(tp
, NVRAM_BUFFERED
);
11803 tg3_flag_set(tp
, FLASH
);
11804 tp
->nvram_pagesize
= 264;
11805 if (nvcfg1
== FLASH_5755VENDOR_ATMEL_FLASH_1
||
11806 nvcfg1
== FLASH_5755VENDOR_ATMEL_FLASH_5
)
11807 tp
->nvram_size
= (protect
? 0x3e200 :
11808 TG3_NVRAM_SIZE_512KB
);
11809 else if (nvcfg1
== FLASH_5755VENDOR_ATMEL_FLASH_2
)
11810 tp
->nvram_size
= (protect
? 0x1f200 :
11811 TG3_NVRAM_SIZE_256KB
);
11813 tp
->nvram_size
= (protect
? 0x1f200 :
11814 TG3_NVRAM_SIZE_128KB
);
11816 case FLASH_5752VENDOR_ST_M45PE10
:
11817 case FLASH_5752VENDOR_ST_M45PE20
:
11818 case FLASH_5752VENDOR_ST_M45PE40
:
11819 tp
->nvram_jedecnum
= JEDEC_ST
;
11820 tg3_flag_set(tp
, NVRAM_BUFFERED
);
11821 tg3_flag_set(tp
, FLASH
);
11822 tp
->nvram_pagesize
= 256;
11823 if (nvcfg1
== FLASH_5752VENDOR_ST_M45PE10
)
11824 tp
->nvram_size
= (protect
?
11825 TG3_NVRAM_SIZE_64KB
:
11826 TG3_NVRAM_SIZE_128KB
);
11827 else if (nvcfg1
== FLASH_5752VENDOR_ST_M45PE20
)
11828 tp
->nvram_size
= (protect
?
11829 TG3_NVRAM_SIZE_64KB
:
11830 TG3_NVRAM_SIZE_256KB
);
11832 tp
->nvram_size
= (protect
?
11833 TG3_NVRAM_SIZE_128KB
:
11834 TG3_NVRAM_SIZE_512KB
);
11839 static void __devinit
tg3_get_5787_nvram_info(struct tg3
*tp
)
11843 nvcfg1
= tr32(NVRAM_CFG1
);
11845 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
11846 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ
:
11847 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ
:
11848 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ
:
11849 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ
:
11850 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
11851 tg3_flag_set(tp
, NVRAM_BUFFERED
);
11852 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
11854 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
11855 tw32(NVRAM_CFG1
, nvcfg1
);
11857 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
11858 case FLASH_5755VENDOR_ATMEL_FLASH_1
:
11859 case FLASH_5755VENDOR_ATMEL_FLASH_2
:
11860 case FLASH_5755VENDOR_ATMEL_FLASH_3
:
11861 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
11862 tg3_flag_set(tp
, NVRAM_BUFFERED
);
11863 tg3_flag_set(tp
, FLASH
);
11864 tp
->nvram_pagesize
= 264;
11866 case FLASH_5752VENDOR_ST_M45PE10
:
11867 case FLASH_5752VENDOR_ST_M45PE20
:
11868 case FLASH_5752VENDOR_ST_M45PE40
:
11869 tp
->nvram_jedecnum
= JEDEC_ST
;
11870 tg3_flag_set(tp
, NVRAM_BUFFERED
);
11871 tg3_flag_set(tp
, FLASH
);
11872 tp
->nvram_pagesize
= 256;
11877 static void __devinit
tg3_get_5761_nvram_info(struct tg3
*tp
)
11879 u32 nvcfg1
, protect
= 0;
11881 nvcfg1
= tr32(NVRAM_CFG1
);
11883 /* NVRAM protection for TPM */
11884 if (nvcfg1
& (1 << 27)) {
11885 tg3_flag_set(tp
, PROTECTED_NVRAM
);
11889 nvcfg1
&= NVRAM_CFG1_5752VENDOR_MASK
;
11891 case FLASH_5761VENDOR_ATMEL_ADB021D
:
11892 case FLASH_5761VENDOR_ATMEL_ADB041D
:
11893 case FLASH_5761VENDOR_ATMEL_ADB081D
:
11894 case FLASH_5761VENDOR_ATMEL_ADB161D
:
11895 case FLASH_5761VENDOR_ATMEL_MDB021D
:
11896 case FLASH_5761VENDOR_ATMEL_MDB041D
:
11897 case FLASH_5761VENDOR_ATMEL_MDB081D
:
11898 case FLASH_5761VENDOR_ATMEL_MDB161D
:
11899 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
11900 tg3_flag_set(tp
, NVRAM_BUFFERED
);
11901 tg3_flag_set(tp
, FLASH
);
11902 tg3_flag_set(tp
, NO_NVRAM_ADDR_TRANS
);
11903 tp
->nvram_pagesize
= 256;
11905 case FLASH_5761VENDOR_ST_A_M45PE20
:
11906 case FLASH_5761VENDOR_ST_A_M45PE40
:
11907 case FLASH_5761VENDOR_ST_A_M45PE80
:
11908 case FLASH_5761VENDOR_ST_A_M45PE16
:
11909 case FLASH_5761VENDOR_ST_M_M45PE20
:
11910 case FLASH_5761VENDOR_ST_M_M45PE40
:
11911 case FLASH_5761VENDOR_ST_M_M45PE80
:
11912 case FLASH_5761VENDOR_ST_M_M45PE16
:
11913 tp
->nvram_jedecnum
= JEDEC_ST
;
11914 tg3_flag_set(tp
, NVRAM_BUFFERED
);
11915 tg3_flag_set(tp
, FLASH
);
11916 tp
->nvram_pagesize
= 256;
11921 tp
->nvram_size
= tr32(NVRAM_ADDR_LOCKOUT
);
11924 case FLASH_5761VENDOR_ATMEL_ADB161D
:
11925 case FLASH_5761VENDOR_ATMEL_MDB161D
:
11926 case FLASH_5761VENDOR_ST_A_M45PE16
:
11927 case FLASH_5761VENDOR_ST_M_M45PE16
:
11928 tp
->nvram_size
= TG3_NVRAM_SIZE_2MB
;
11930 case FLASH_5761VENDOR_ATMEL_ADB081D
:
11931 case FLASH_5761VENDOR_ATMEL_MDB081D
:
11932 case FLASH_5761VENDOR_ST_A_M45PE80
:
11933 case FLASH_5761VENDOR_ST_M_M45PE80
:
11934 tp
->nvram_size
= TG3_NVRAM_SIZE_1MB
;
11936 case FLASH_5761VENDOR_ATMEL_ADB041D
:
11937 case FLASH_5761VENDOR_ATMEL_MDB041D
:
11938 case FLASH_5761VENDOR_ST_A_M45PE40
:
11939 case FLASH_5761VENDOR_ST_M_M45PE40
:
11940 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
11942 case FLASH_5761VENDOR_ATMEL_ADB021D
:
11943 case FLASH_5761VENDOR_ATMEL_MDB021D
:
11944 case FLASH_5761VENDOR_ST_A_M45PE20
:
11945 case FLASH_5761VENDOR_ST_M_M45PE20
:
11946 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
11952 static void __devinit
tg3_get_5906_nvram_info(struct tg3
*tp
)
11954 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
11955 tg3_flag_set(tp
, NVRAM_BUFFERED
);
11956 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
11959 static void __devinit
tg3_get_57780_nvram_info(struct tg3
*tp
)
11963 nvcfg1
= tr32(NVRAM_CFG1
);
11965 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
11966 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ
:
11967 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ
:
11968 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
11969 tg3_flag_set(tp
, NVRAM_BUFFERED
);
11970 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
11972 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
11973 tw32(NVRAM_CFG1
, nvcfg1
);
11975 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
11976 case FLASH_57780VENDOR_ATMEL_AT45DB011D
:
11977 case FLASH_57780VENDOR_ATMEL_AT45DB011B
:
11978 case FLASH_57780VENDOR_ATMEL_AT45DB021D
:
11979 case FLASH_57780VENDOR_ATMEL_AT45DB021B
:
11980 case FLASH_57780VENDOR_ATMEL_AT45DB041D
:
11981 case FLASH_57780VENDOR_ATMEL_AT45DB041B
:
11982 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
11983 tg3_flag_set(tp
, NVRAM_BUFFERED
);
11984 tg3_flag_set(tp
, FLASH
);
11986 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
11987 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
11988 case FLASH_57780VENDOR_ATMEL_AT45DB011D
:
11989 case FLASH_57780VENDOR_ATMEL_AT45DB011B
:
11990 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
11992 case FLASH_57780VENDOR_ATMEL_AT45DB021D
:
11993 case FLASH_57780VENDOR_ATMEL_AT45DB021B
:
11994 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
11996 case FLASH_57780VENDOR_ATMEL_AT45DB041D
:
11997 case FLASH_57780VENDOR_ATMEL_AT45DB041B
:
11998 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
12002 case FLASH_5752VENDOR_ST_M45PE10
:
12003 case FLASH_5752VENDOR_ST_M45PE20
:
12004 case FLASH_5752VENDOR_ST_M45PE40
:
12005 tp
->nvram_jedecnum
= JEDEC_ST
;
12006 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12007 tg3_flag_set(tp
, FLASH
);
12009 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
12010 case FLASH_5752VENDOR_ST_M45PE10
:
12011 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
12013 case FLASH_5752VENDOR_ST_M45PE20
:
12014 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
12016 case FLASH_5752VENDOR_ST_M45PE40
:
12017 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
12022 tg3_flag_set(tp
, NO_NVRAM
);
12026 tg3_nvram_get_pagesize(tp
, nvcfg1
);
12027 if (tp
->nvram_pagesize
!= 264 && tp
->nvram_pagesize
!= 528)
12028 tg3_flag_set(tp
, NO_NVRAM_ADDR_TRANS
);
12032 static void __devinit
tg3_get_5717_nvram_info(struct tg3
*tp
)
12036 nvcfg1
= tr32(NVRAM_CFG1
);
12038 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
12039 case FLASH_5717VENDOR_ATMEL_EEPROM
:
12040 case FLASH_5717VENDOR_MICRO_EEPROM
:
12041 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12042 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12043 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
12045 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
12046 tw32(NVRAM_CFG1
, nvcfg1
);
12048 case FLASH_5717VENDOR_ATMEL_MDB011D
:
12049 case FLASH_5717VENDOR_ATMEL_ADB011B
:
12050 case FLASH_5717VENDOR_ATMEL_ADB011D
:
12051 case FLASH_5717VENDOR_ATMEL_MDB021D
:
12052 case FLASH_5717VENDOR_ATMEL_ADB021B
:
12053 case FLASH_5717VENDOR_ATMEL_ADB021D
:
12054 case FLASH_5717VENDOR_ATMEL_45USPT
:
12055 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12056 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12057 tg3_flag_set(tp
, FLASH
);
12059 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
12060 case FLASH_5717VENDOR_ATMEL_MDB021D
:
12061 /* Detect size with tg3_nvram_get_size() */
12063 case FLASH_5717VENDOR_ATMEL_ADB021B
:
12064 case FLASH_5717VENDOR_ATMEL_ADB021D
:
12065 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
12068 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
12072 case FLASH_5717VENDOR_ST_M_M25PE10
:
12073 case FLASH_5717VENDOR_ST_A_M25PE10
:
12074 case FLASH_5717VENDOR_ST_M_M45PE10
:
12075 case FLASH_5717VENDOR_ST_A_M45PE10
:
12076 case FLASH_5717VENDOR_ST_M_M25PE20
:
12077 case FLASH_5717VENDOR_ST_A_M25PE20
:
12078 case FLASH_5717VENDOR_ST_M_M45PE20
:
12079 case FLASH_5717VENDOR_ST_A_M45PE20
:
12080 case FLASH_5717VENDOR_ST_25USPT
:
12081 case FLASH_5717VENDOR_ST_45USPT
:
12082 tp
->nvram_jedecnum
= JEDEC_ST
;
12083 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12084 tg3_flag_set(tp
, FLASH
);
12086 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
12087 case FLASH_5717VENDOR_ST_M_M25PE20
:
12088 case FLASH_5717VENDOR_ST_M_M45PE20
:
12089 /* Detect size with tg3_nvram_get_size() */
12091 case FLASH_5717VENDOR_ST_A_M25PE20
:
12092 case FLASH_5717VENDOR_ST_A_M45PE20
:
12093 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
12096 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
12101 tg3_flag_set(tp
, NO_NVRAM
);
12105 tg3_nvram_get_pagesize(tp
, nvcfg1
);
12106 if (tp
->nvram_pagesize
!= 264 && tp
->nvram_pagesize
!= 528)
12107 tg3_flag_set(tp
, NO_NVRAM_ADDR_TRANS
);
12110 static void __devinit
tg3_get_5720_nvram_info(struct tg3
*tp
)
12112 u32 nvcfg1
, nvmpinstrp
;
12114 nvcfg1
= tr32(NVRAM_CFG1
);
12115 nvmpinstrp
= nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
;
12117 switch (nvmpinstrp
) {
12118 case FLASH_5720_EEPROM_HD
:
12119 case FLASH_5720_EEPROM_LD
:
12120 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12121 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12123 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
12124 tw32(NVRAM_CFG1
, nvcfg1
);
12125 if (nvmpinstrp
== FLASH_5720_EEPROM_HD
)
12126 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
12128 tp
->nvram_pagesize
= ATMEL_AT24C02_CHIP_SIZE
;
12130 case FLASH_5720VENDOR_M_ATMEL_DB011D
:
12131 case FLASH_5720VENDOR_A_ATMEL_DB011B
:
12132 case FLASH_5720VENDOR_A_ATMEL_DB011D
:
12133 case FLASH_5720VENDOR_M_ATMEL_DB021D
:
12134 case FLASH_5720VENDOR_A_ATMEL_DB021B
:
12135 case FLASH_5720VENDOR_A_ATMEL_DB021D
:
12136 case FLASH_5720VENDOR_M_ATMEL_DB041D
:
12137 case FLASH_5720VENDOR_A_ATMEL_DB041B
:
12138 case FLASH_5720VENDOR_A_ATMEL_DB041D
:
12139 case FLASH_5720VENDOR_M_ATMEL_DB081D
:
12140 case FLASH_5720VENDOR_A_ATMEL_DB081D
:
12141 case FLASH_5720VENDOR_ATMEL_45USPT
:
12142 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
12143 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12144 tg3_flag_set(tp
, FLASH
);
12146 switch (nvmpinstrp
) {
12147 case FLASH_5720VENDOR_M_ATMEL_DB021D
:
12148 case FLASH_5720VENDOR_A_ATMEL_DB021B
:
12149 case FLASH_5720VENDOR_A_ATMEL_DB021D
:
12150 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
12152 case FLASH_5720VENDOR_M_ATMEL_DB041D
:
12153 case FLASH_5720VENDOR_A_ATMEL_DB041B
:
12154 case FLASH_5720VENDOR_A_ATMEL_DB041D
:
12155 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
12157 case FLASH_5720VENDOR_M_ATMEL_DB081D
:
12158 case FLASH_5720VENDOR_A_ATMEL_DB081D
:
12159 tp
->nvram_size
= TG3_NVRAM_SIZE_1MB
;
12162 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
12166 case FLASH_5720VENDOR_M_ST_M25PE10
:
12167 case FLASH_5720VENDOR_M_ST_M45PE10
:
12168 case FLASH_5720VENDOR_A_ST_M25PE10
:
12169 case FLASH_5720VENDOR_A_ST_M45PE10
:
12170 case FLASH_5720VENDOR_M_ST_M25PE20
:
12171 case FLASH_5720VENDOR_M_ST_M45PE20
:
12172 case FLASH_5720VENDOR_A_ST_M25PE20
:
12173 case FLASH_5720VENDOR_A_ST_M45PE20
:
12174 case FLASH_5720VENDOR_M_ST_M25PE40
:
12175 case FLASH_5720VENDOR_M_ST_M45PE40
:
12176 case FLASH_5720VENDOR_A_ST_M25PE40
:
12177 case FLASH_5720VENDOR_A_ST_M45PE40
:
12178 case FLASH_5720VENDOR_M_ST_M25PE80
:
12179 case FLASH_5720VENDOR_M_ST_M45PE80
:
12180 case FLASH_5720VENDOR_A_ST_M25PE80
:
12181 case FLASH_5720VENDOR_A_ST_M45PE80
:
12182 case FLASH_5720VENDOR_ST_25USPT
:
12183 case FLASH_5720VENDOR_ST_45USPT
:
12184 tp
->nvram_jedecnum
= JEDEC_ST
;
12185 tg3_flag_set(tp
, NVRAM_BUFFERED
);
12186 tg3_flag_set(tp
, FLASH
);
12188 switch (nvmpinstrp
) {
12189 case FLASH_5720VENDOR_M_ST_M25PE20
:
12190 case FLASH_5720VENDOR_M_ST_M45PE20
:
12191 case FLASH_5720VENDOR_A_ST_M25PE20
:
12192 case FLASH_5720VENDOR_A_ST_M45PE20
:
12193 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
12195 case FLASH_5720VENDOR_M_ST_M25PE40
:
12196 case FLASH_5720VENDOR_M_ST_M45PE40
:
12197 case FLASH_5720VENDOR_A_ST_M25PE40
:
12198 case FLASH_5720VENDOR_A_ST_M45PE40
:
12199 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
12201 case FLASH_5720VENDOR_M_ST_M25PE80
:
12202 case FLASH_5720VENDOR_M_ST_M45PE80
:
12203 case FLASH_5720VENDOR_A_ST_M25PE80
:
12204 case FLASH_5720VENDOR_A_ST_M45PE80
:
12205 tp
->nvram_size
= TG3_NVRAM_SIZE_1MB
;
12208 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
12213 tg3_flag_set(tp
, NO_NVRAM
);
12217 tg3_nvram_get_pagesize(tp
, nvcfg1
);
12218 if (tp
->nvram_pagesize
!= 264 && tp
->nvram_pagesize
!= 528)
12219 tg3_flag_set(tp
, NO_NVRAM_ADDR_TRANS
);
12222 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
12223 static void __devinit
tg3_nvram_init(struct tg3
*tp
)
12225 tw32_f(GRC_EEPROM_ADDR
,
12226 (EEPROM_ADDR_FSM_RESET
|
12227 (EEPROM_DEFAULT_CLOCK_PERIOD
<<
12228 EEPROM_ADDR_CLKPERD_SHIFT
)));
12232 /* Enable seeprom accesses. */
12233 tw32_f(GRC_LOCAL_CTRL
,
12234 tr32(GRC_LOCAL_CTRL
) | GRC_LCLCTRL_AUTO_SEEPROM
);
12237 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
&&
12238 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5701
) {
12239 tg3_flag_set(tp
, NVRAM
);
12241 if (tg3_nvram_lock(tp
)) {
12242 netdev_warn(tp
->dev
,
12243 "Cannot get nvram lock, %s failed\n",
12247 tg3_enable_nvram_access(tp
);
12249 tp
->nvram_size
= 0;
12251 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5752
)
12252 tg3_get_5752_nvram_info(tp
);
12253 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
)
12254 tg3_get_5755_nvram_info(tp
);
12255 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5787
||
12256 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
||
12257 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
)
12258 tg3_get_5787_nvram_info(tp
);
12259 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
)
12260 tg3_get_5761_nvram_info(tp
);
12261 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
)
12262 tg3_get_5906_nvram_info(tp
);
12263 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
||
12264 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
)
12265 tg3_get_57780_nvram_info(tp
);
12266 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
||
12267 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
)
12268 tg3_get_5717_nvram_info(tp
);
12269 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
)
12270 tg3_get_5720_nvram_info(tp
);
12272 tg3_get_nvram_info(tp
);
12274 if (tp
->nvram_size
== 0)
12275 tg3_get_nvram_size(tp
);
12277 tg3_disable_nvram_access(tp
);
12278 tg3_nvram_unlock(tp
);
12281 tg3_flag_clear(tp
, NVRAM
);
12282 tg3_flag_clear(tp
, NVRAM_BUFFERED
);
12284 tg3_get_eeprom_size(tp
);
12288 static int tg3_nvram_write_block_using_eeprom(struct tg3
*tp
,
12289 u32 offset
, u32 len
, u8
*buf
)
12294 for (i
= 0; i
< len
; i
+= 4) {
12300 memcpy(&data
, buf
+ i
, 4);
12303 * The SEEPROM interface expects the data to always be opposite
12304 * the native endian format. We accomplish this by reversing
12305 * all the operations that would have been performed on the
12306 * data from a call to tg3_nvram_read_be32().
12308 tw32(GRC_EEPROM_DATA
, swab32(be32_to_cpu(data
)));
12310 val
= tr32(GRC_EEPROM_ADDR
);
12311 tw32(GRC_EEPROM_ADDR
, val
| EEPROM_ADDR_COMPLETE
);
12313 val
&= ~(EEPROM_ADDR_ADDR_MASK
| EEPROM_ADDR_DEVID_MASK
|
12315 tw32(GRC_EEPROM_ADDR
, val
|
12316 (0 << EEPROM_ADDR_DEVID_SHIFT
) |
12317 (addr
& EEPROM_ADDR_ADDR_MASK
) |
12318 EEPROM_ADDR_START
|
12319 EEPROM_ADDR_WRITE
);
12321 for (j
= 0; j
< 1000; j
++) {
12322 val
= tr32(GRC_EEPROM_ADDR
);
12324 if (val
& EEPROM_ADDR_COMPLETE
)
12328 if (!(val
& EEPROM_ADDR_COMPLETE
)) {
12337 /* offset and length are dword aligned */
12338 static int tg3_nvram_write_block_unbuffered(struct tg3
*tp
, u32 offset
, u32 len
,
12342 u32 pagesize
= tp
->nvram_pagesize
;
12343 u32 pagemask
= pagesize
- 1;
12347 tmp
= kmalloc(pagesize
, GFP_KERNEL
);
12353 u32 phy_addr
, page_off
, size
;
12355 phy_addr
= offset
& ~pagemask
;
12357 for (j
= 0; j
< pagesize
; j
+= 4) {
12358 ret
= tg3_nvram_read_be32(tp
, phy_addr
+ j
,
12359 (__be32
*) (tmp
+ j
));
12366 page_off
= offset
& pagemask
;
12373 memcpy(tmp
+ page_off
, buf
, size
);
12375 offset
= offset
+ (pagesize
- page_off
);
12377 tg3_enable_nvram_access(tp
);
12380 * Before we can erase the flash page, we need
12381 * to issue a special "write enable" command.
12383 nvram_cmd
= NVRAM_CMD_WREN
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
12385 if (tg3_nvram_exec_cmd(tp
, nvram_cmd
))
12388 /* Erase the target page */
12389 tw32(NVRAM_ADDR
, phy_addr
);
12391 nvram_cmd
= NVRAM_CMD_GO
| NVRAM_CMD_DONE
| NVRAM_CMD_WR
|
12392 NVRAM_CMD_FIRST
| NVRAM_CMD_LAST
| NVRAM_CMD_ERASE
;
12394 if (tg3_nvram_exec_cmd(tp
, nvram_cmd
))
12397 /* Issue another write enable to start the write. */
12398 nvram_cmd
= NVRAM_CMD_WREN
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
12400 if (tg3_nvram_exec_cmd(tp
, nvram_cmd
))
12403 for (j
= 0; j
< pagesize
; j
+= 4) {
12406 data
= *((__be32
*) (tmp
+ j
));
12408 tw32(NVRAM_WRDATA
, be32_to_cpu(data
));
12410 tw32(NVRAM_ADDR
, phy_addr
+ j
);
12412 nvram_cmd
= NVRAM_CMD_GO
| NVRAM_CMD_DONE
|
12416 nvram_cmd
|= NVRAM_CMD_FIRST
;
12417 else if (j
== (pagesize
- 4))
12418 nvram_cmd
|= NVRAM_CMD_LAST
;
12420 if ((ret
= tg3_nvram_exec_cmd(tp
, nvram_cmd
)))
12427 nvram_cmd
= NVRAM_CMD_WRDI
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
12428 tg3_nvram_exec_cmd(tp
, nvram_cmd
);
12435 /* offset and length are dword aligned */
12436 static int tg3_nvram_write_block_buffered(struct tg3
*tp
, u32 offset
, u32 len
,
12441 for (i
= 0; i
< len
; i
+= 4, offset
+= 4) {
12442 u32 page_off
, phy_addr
, nvram_cmd
;
12445 memcpy(&data
, buf
+ i
, 4);
12446 tw32(NVRAM_WRDATA
, be32_to_cpu(data
));
12448 page_off
= offset
% tp
->nvram_pagesize
;
12450 phy_addr
= tg3_nvram_phys_addr(tp
, offset
);
12452 tw32(NVRAM_ADDR
, phy_addr
);
12454 nvram_cmd
= NVRAM_CMD_GO
| NVRAM_CMD_DONE
| NVRAM_CMD_WR
;
12456 if (page_off
== 0 || i
== 0)
12457 nvram_cmd
|= NVRAM_CMD_FIRST
;
12458 if (page_off
== (tp
->nvram_pagesize
- 4))
12459 nvram_cmd
|= NVRAM_CMD_LAST
;
12461 if (i
== (len
- 4))
12462 nvram_cmd
|= NVRAM_CMD_LAST
;
12464 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5752
&&
12465 !tg3_flag(tp
, 5755_PLUS
) &&
12466 (tp
->nvram_jedecnum
== JEDEC_ST
) &&
12467 (nvram_cmd
& NVRAM_CMD_FIRST
)) {
12469 if ((ret
= tg3_nvram_exec_cmd(tp
,
12470 NVRAM_CMD_WREN
| NVRAM_CMD_GO
|
12475 if (!tg3_flag(tp
, FLASH
)) {
12476 /* We always do complete word writes to eeprom. */
12477 nvram_cmd
|= (NVRAM_CMD_FIRST
| NVRAM_CMD_LAST
);
12480 if ((ret
= tg3_nvram_exec_cmd(tp
, nvram_cmd
)))
12486 /* offset and length are dword aligned */
12487 static int tg3_nvram_write_block(struct tg3
*tp
, u32 offset
, u32 len
, u8
*buf
)
12491 if (tg3_flag(tp
, EEPROM_WRITE_PROT
)) {
12492 tw32_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
&
12493 ~GRC_LCLCTRL_GPIO_OUTPUT1
);
12497 if (!tg3_flag(tp
, NVRAM
)) {
12498 ret
= tg3_nvram_write_block_using_eeprom(tp
, offset
, len
, buf
);
12502 ret
= tg3_nvram_lock(tp
);
12506 tg3_enable_nvram_access(tp
);
12507 if (tg3_flag(tp
, 5750_PLUS
) && !tg3_flag(tp
, PROTECTED_NVRAM
))
12508 tw32(NVRAM_WRITE1
, 0x406);
12510 grc_mode
= tr32(GRC_MODE
);
12511 tw32(GRC_MODE
, grc_mode
| GRC_MODE_NVRAM_WR_ENABLE
);
12513 if (tg3_flag(tp
, NVRAM_BUFFERED
) || !tg3_flag(tp
, FLASH
)) {
12514 ret
= tg3_nvram_write_block_buffered(tp
, offset
, len
,
12517 ret
= tg3_nvram_write_block_unbuffered(tp
, offset
, len
,
12521 grc_mode
= tr32(GRC_MODE
);
12522 tw32(GRC_MODE
, grc_mode
& ~GRC_MODE_NVRAM_WR_ENABLE
);
12524 tg3_disable_nvram_access(tp
);
12525 tg3_nvram_unlock(tp
);
12528 if (tg3_flag(tp
, EEPROM_WRITE_PROT
)) {
12529 tw32_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
);
12536 struct subsys_tbl_ent
{
12537 u16 subsys_vendor
, subsys_devid
;
12541 static struct subsys_tbl_ent subsys_id_to_phy_id
[] __devinitdata
= {
12542 /* Broadcom boards. */
12543 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
12544 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6
, TG3_PHY_ID_BCM5401
},
12545 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
12546 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5
, TG3_PHY_ID_BCM5701
},
12547 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
12548 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6
, TG3_PHY_ID_BCM8002
},
12549 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
12550 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9
, 0 },
12551 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
12552 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1
, TG3_PHY_ID_BCM5701
},
12553 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
12554 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8
, TG3_PHY_ID_BCM5701
},
12555 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
12556 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7
, 0 },
12557 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
12558 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10
, TG3_PHY_ID_BCM5701
},
12559 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
12560 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12
, TG3_PHY_ID_BCM5701
},
12561 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
12562 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1
, TG3_PHY_ID_BCM5703
},
12563 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
12564 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2
, TG3_PHY_ID_BCM5703
},
12567 { TG3PCI_SUBVENDOR_ID_3COM
,
12568 TG3PCI_SUBDEVICE_ID_3COM_3C996T
, TG3_PHY_ID_BCM5401
},
12569 { TG3PCI_SUBVENDOR_ID_3COM
,
12570 TG3PCI_SUBDEVICE_ID_3COM_3C996BT
, TG3_PHY_ID_BCM5701
},
12571 { TG3PCI_SUBVENDOR_ID_3COM
,
12572 TG3PCI_SUBDEVICE_ID_3COM_3C996SX
, 0 },
12573 { TG3PCI_SUBVENDOR_ID_3COM
,
12574 TG3PCI_SUBDEVICE_ID_3COM_3C1000T
, TG3_PHY_ID_BCM5701
},
12575 { TG3PCI_SUBVENDOR_ID_3COM
,
12576 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01
, TG3_PHY_ID_BCM5701
},
12579 { TG3PCI_SUBVENDOR_ID_DELL
,
12580 TG3PCI_SUBDEVICE_ID_DELL_VIPER
, TG3_PHY_ID_BCM5401
},
12581 { TG3PCI_SUBVENDOR_ID_DELL
,
12582 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR
, TG3_PHY_ID_BCM5401
},
12583 { TG3PCI_SUBVENDOR_ID_DELL
,
12584 TG3PCI_SUBDEVICE_ID_DELL_MERLOT
, TG3_PHY_ID_BCM5411
},
12585 { TG3PCI_SUBVENDOR_ID_DELL
,
12586 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT
, TG3_PHY_ID_BCM5411
},
12588 /* Compaq boards. */
12589 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
12590 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE
, TG3_PHY_ID_BCM5701
},
12591 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
12592 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2
, TG3_PHY_ID_BCM5701
},
12593 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
12594 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING
, 0 },
12595 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
12596 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780
, TG3_PHY_ID_BCM5701
},
12597 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
12598 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2
, TG3_PHY_ID_BCM5701
},
12601 { TG3PCI_SUBVENDOR_ID_IBM
,
12602 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2
, 0 }
12605 static struct subsys_tbl_ent
* __devinit
tg3_lookup_by_subsys(struct tg3
*tp
)
12609 for (i
= 0; i
< ARRAY_SIZE(subsys_id_to_phy_id
); i
++) {
12610 if ((subsys_id_to_phy_id
[i
].subsys_vendor
==
12611 tp
->pdev
->subsystem_vendor
) &&
12612 (subsys_id_to_phy_id
[i
].subsys_devid
==
12613 tp
->pdev
->subsystem_device
))
12614 return &subsys_id_to_phy_id
[i
];
12619 static void __devinit
tg3_get_eeprom_hw_cfg(struct tg3
*tp
)
12624 /* On some early chips the SRAM cannot be accessed in D3hot state,
12625 * so need make sure we're in D0.
12627 pci_read_config_word(tp
->pdev
, tp
->pm_cap
+ PCI_PM_CTRL
, &pmcsr
);
12628 pmcsr
&= ~PCI_PM_CTRL_STATE_MASK
;
12629 pci_write_config_word(tp
->pdev
, tp
->pm_cap
+ PCI_PM_CTRL
, pmcsr
);
12632 /* Make sure register accesses (indirect or otherwise)
12633 * will function correctly.
12635 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
12636 tp
->misc_host_ctrl
);
12638 /* The memory arbiter has to be enabled in order for SRAM accesses
12639 * to succeed. Normally on powerup the tg3 chip firmware will make
12640 * sure it is enabled, but other entities such as system netboot
12641 * code might disable it.
12643 val
= tr32(MEMARB_MODE
);
12644 tw32(MEMARB_MODE
, val
| MEMARB_MODE_ENABLE
);
12646 tp
->phy_id
= TG3_PHY_ID_INVALID
;
12647 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
12649 /* Assume an onboard device and WOL capable by default. */
12650 tg3_flag_set(tp
, EEPROM_WRITE_PROT
);
12651 tg3_flag_set(tp
, WOL_CAP
);
12653 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
12654 if (!(tr32(PCIE_TRANSACTION_CFG
) & PCIE_TRANS_CFG_LOM
)) {
12655 tg3_flag_clear(tp
, EEPROM_WRITE_PROT
);
12656 tg3_flag_set(tp
, IS_NIC
);
12658 val
= tr32(VCPU_CFGSHDW
);
12659 if (val
& VCPU_CFGSHDW_ASPM_DBNC
)
12660 tg3_flag_set(tp
, ASPM_WORKAROUND
);
12661 if ((val
& VCPU_CFGSHDW_WOL_ENABLE
) &&
12662 (val
& VCPU_CFGSHDW_WOL_MAGPKT
)) {
12663 tg3_flag_set(tp
, WOL_ENABLE
);
12664 device_set_wakeup_enable(&tp
->pdev
->dev
, true);
12669 tg3_read_mem(tp
, NIC_SRAM_DATA_SIG
, &val
);
12670 if (val
== NIC_SRAM_DATA_SIG_MAGIC
) {
12671 u32 nic_cfg
, led_cfg
;
12672 u32 nic_phy_id
, ver
, cfg2
= 0, cfg4
= 0, eeprom_phy_id
;
12673 int eeprom_phy_serdes
= 0;
12675 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG
, &nic_cfg
);
12676 tp
->nic_sram_data_cfg
= nic_cfg
;
12678 tg3_read_mem(tp
, NIC_SRAM_DATA_VER
, &ver
);
12679 ver
>>= NIC_SRAM_DATA_VER_SHIFT
;
12680 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
&&
12681 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5701
&&
12682 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5703
&&
12683 (ver
> 0) && (ver
< 0x100))
12684 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG_2
, &cfg2
);
12686 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
)
12687 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG_4
, &cfg4
);
12689 if ((nic_cfg
& NIC_SRAM_DATA_CFG_PHY_TYPE_MASK
) ==
12690 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER
)
12691 eeprom_phy_serdes
= 1;
12693 tg3_read_mem(tp
, NIC_SRAM_DATA_PHY_ID
, &nic_phy_id
);
12694 if (nic_phy_id
!= 0) {
12695 u32 id1
= nic_phy_id
& NIC_SRAM_DATA_PHY_ID1_MASK
;
12696 u32 id2
= nic_phy_id
& NIC_SRAM_DATA_PHY_ID2_MASK
;
12698 eeprom_phy_id
= (id1
>> 16) << 10;
12699 eeprom_phy_id
|= (id2
& 0xfc00) << 16;
12700 eeprom_phy_id
|= (id2
& 0x03ff) << 0;
12704 tp
->phy_id
= eeprom_phy_id
;
12705 if (eeprom_phy_serdes
) {
12706 if (!tg3_flag(tp
, 5705_PLUS
))
12707 tp
->phy_flags
|= TG3_PHYFLG_PHY_SERDES
;
12709 tp
->phy_flags
|= TG3_PHYFLG_MII_SERDES
;
12712 if (tg3_flag(tp
, 5750_PLUS
))
12713 led_cfg
= cfg2
& (NIC_SRAM_DATA_CFG_LED_MODE_MASK
|
12714 SHASTA_EXT_LED_MODE_MASK
);
12716 led_cfg
= nic_cfg
& NIC_SRAM_DATA_CFG_LED_MODE_MASK
;
12720 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1
:
12721 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
12724 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2
:
12725 tp
->led_ctrl
= LED_CTRL_MODE_PHY_2
;
12728 case NIC_SRAM_DATA_CFG_LED_MODE_MAC
:
12729 tp
->led_ctrl
= LED_CTRL_MODE_MAC
;
12731 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
12732 * read on some older 5700/5701 bootcode.
12734 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) ==
12736 GET_ASIC_REV(tp
->pci_chip_rev_id
) ==
12738 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
12742 case SHASTA_EXT_LED_SHARED
:
12743 tp
->led_ctrl
= LED_CTRL_MODE_SHARED
;
12744 if (tp
->pci_chip_rev_id
!= CHIPREV_ID_5750_A0
&&
12745 tp
->pci_chip_rev_id
!= CHIPREV_ID_5750_A1
)
12746 tp
->led_ctrl
|= (LED_CTRL_MODE_PHY_1
|
12747 LED_CTRL_MODE_PHY_2
);
12750 case SHASTA_EXT_LED_MAC
:
12751 tp
->led_ctrl
= LED_CTRL_MODE_SHASTA_MAC
;
12754 case SHASTA_EXT_LED_COMBO
:
12755 tp
->led_ctrl
= LED_CTRL_MODE_COMBO
;
12756 if (tp
->pci_chip_rev_id
!= CHIPREV_ID_5750_A0
)
12757 tp
->led_ctrl
|= (LED_CTRL_MODE_PHY_1
|
12758 LED_CTRL_MODE_PHY_2
);
12763 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
12764 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) &&
12765 tp
->pdev
->subsystem_vendor
== PCI_VENDOR_ID_DELL
)
12766 tp
->led_ctrl
= LED_CTRL_MODE_PHY_2
;
12768 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5784_AX
)
12769 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
12771 if (nic_cfg
& NIC_SRAM_DATA_CFG_EEPROM_WP
) {
12772 tg3_flag_set(tp
, EEPROM_WRITE_PROT
);
12773 if ((tp
->pdev
->subsystem_vendor
==
12774 PCI_VENDOR_ID_ARIMA
) &&
12775 (tp
->pdev
->subsystem_device
== 0x205a ||
12776 tp
->pdev
->subsystem_device
== 0x2063))
12777 tg3_flag_clear(tp
, EEPROM_WRITE_PROT
);
12779 tg3_flag_clear(tp
, EEPROM_WRITE_PROT
);
12780 tg3_flag_set(tp
, IS_NIC
);
12783 if (nic_cfg
& NIC_SRAM_DATA_CFG_ASF_ENABLE
) {
12784 tg3_flag_set(tp
, ENABLE_ASF
);
12785 if (tg3_flag(tp
, 5750_PLUS
))
12786 tg3_flag_set(tp
, ASF_NEW_HANDSHAKE
);
12789 if ((nic_cfg
& NIC_SRAM_DATA_CFG_APE_ENABLE
) &&
12790 tg3_flag(tp
, 5750_PLUS
))
12791 tg3_flag_set(tp
, ENABLE_APE
);
12793 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
&&
12794 !(nic_cfg
& NIC_SRAM_DATA_CFG_FIBER_WOL
))
12795 tg3_flag_clear(tp
, WOL_CAP
);
12797 if (tg3_flag(tp
, WOL_CAP
) &&
12798 (nic_cfg
& NIC_SRAM_DATA_CFG_WOL_ENABLE
)) {
12799 tg3_flag_set(tp
, WOL_ENABLE
);
12800 device_set_wakeup_enable(&tp
->pdev
->dev
, true);
12803 if (cfg2
& (1 << 17))
12804 tp
->phy_flags
|= TG3_PHYFLG_CAPACITIVE_COUPLING
;
12806 /* serdes signal pre-emphasis in register 0x590 set by */
12807 /* bootcode if bit 18 is set */
12808 if (cfg2
& (1 << 18))
12809 tp
->phy_flags
|= TG3_PHYFLG_SERDES_PREEMPHASIS
;
12811 if ((tg3_flag(tp
, 57765_PLUS
) ||
12812 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
&&
12813 GET_CHIP_REV(tp
->pci_chip_rev_id
) != CHIPREV_5784_AX
)) &&
12814 (cfg2
& NIC_SRAM_DATA_CFG_2_APD_EN
))
12815 tp
->phy_flags
|= TG3_PHYFLG_ENABLE_APD
;
12817 if (tg3_flag(tp
, PCI_EXPRESS
) &&
12818 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5785
&&
12819 !tg3_flag(tp
, 57765_PLUS
)) {
12822 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG_3
, &cfg3
);
12823 if (cfg3
& NIC_SRAM_ASPM_DEBOUNCE
)
12824 tg3_flag_set(tp
, ASPM_WORKAROUND
);
12827 if (cfg4
& NIC_SRAM_RGMII_INBAND_DISABLE
)
12828 tg3_flag_set(tp
, RGMII_INBAND_DISABLE
);
12829 if (cfg4
& NIC_SRAM_RGMII_EXT_IBND_RX_EN
)
12830 tg3_flag_set(tp
, RGMII_EXT_IBND_RX_EN
);
12831 if (cfg4
& NIC_SRAM_RGMII_EXT_IBND_TX_EN
)
12832 tg3_flag_set(tp
, RGMII_EXT_IBND_TX_EN
);
12835 if (tg3_flag(tp
, WOL_CAP
))
12836 device_set_wakeup_enable(&tp
->pdev
->dev
,
12837 tg3_flag(tp
, WOL_ENABLE
));
12839 device_set_wakeup_capable(&tp
->pdev
->dev
, false);
12842 static int __devinit
tg3_issue_otp_command(struct tg3
*tp
, u32 cmd
)
12847 tw32(OTP_CTRL
, cmd
| OTP_CTRL_OTP_CMD_START
);
12848 tw32(OTP_CTRL
, cmd
);
12850 /* Wait for up to 1 ms for command to execute. */
12851 for (i
= 0; i
< 100; i
++) {
12852 val
= tr32(OTP_STATUS
);
12853 if (val
& OTP_STATUS_CMD_DONE
)
12858 return (val
& OTP_STATUS_CMD_DONE
) ? 0 : -EBUSY
;
12861 /* Read the gphy configuration from the OTP region of the chip. The gphy
12862 * configuration is a 32-bit value that straddles the alignment boundary.
12863 * We do two 32-bit reads and then shift and merge the results.
12865 static u32 __devinit
tg3_read_otp_phycfg(struct tg3
*tp
)
12867 u32 bhalf_otp
, thalf_otp
;
12869 tw32(OTP_MODE
, OTP_MODE_OTP_THRU_GRC
);
12871 if (tg3_issue_otp_command(tp
, OTP_CTRL_OTP_CMD_INIT
))
12874 tw32(OTP_ADDRESS
, OTP_ADDRESS_MAGIC1
);
12876 if (tg3_issue_otp_command(tp
, OTP_CTRL_OTP_CMD_READ
))
12879 thalf_otp
= tr32(OTP_READ_DATA
);
12881 tw32(OTP_ADDRESS
, OTP_ADDRESS_MAGIC2
);
12883 if (tg3_issue_otp_command(tp
, OTP_CTRL_OTP_CMD_READ
))
12886 bhalf_otp
= tr32(OTP_READ_DATA
);
12888 return ((thalf_otp
& 0x0000ffff) << 16) | (bhalf_otp
>> 16);
12891 static void __devinit
tg3_phy_init_link_config(struct tg3
*tp
)
12893 u32 adv
= ADVERTISED_Autoneg
|
12896 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
))
12897 adv
|= ADVERTISED_1000baseT_Half
|
12898 ADVERTISED_1000baseT_Full
;
12900 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
12901 adv
|= ADVERTISED_100baseT_Half
|
12902 ADVERTISED_100baseT_Full
|
12903 ADVERTISED_10baseT_Half
|
12904 ADVERTISED_10baseT_Full
|
12907 adv
|= ADVERTISED_FIBRE
;
12909 tp
->link_config
.advertising
= adv
;
12910 tp
->link_config
.speed
= SPEED_INVALID
;
12911 tp
->link_config
.duplex
= DUPLEX_INVALID
;
12912 tp
->link_config
.autoneg
= AUTONEG_ENABLE
;
12913 tp
->link_config
.active_speed
= SPEED_INVALID
;
12914 tp
->link_config
.active_duplex
= DUPLEX_INVALID
;
12915 tp
->link_config
.orig_speed
= SPEED_INVALID
;
12916 tp
->link_config
.orig_duplex
= DUPLEX_INVALID
;
12917 tp
->link_config
.orig_autoneg
= AUTONEG_INVALID
;
12920 static int __devinit
tg3_phy_probe(struct tg3
*tp
)
12922 u32 hw_phy_id_1
, hw_phy_id_2
;
12923 u32 hw_phy_id
, hw_phy_id_masked
;
12926 /* flow control autonegotiation is default behavior */
12927 tg3_flag_set(tp
, PAUSE_AUTONEG
);
12928 tp
->link_config
.flowctrl
= FLOW_CTRL_TX
| FLOW_CTRL_RX
;
12930 if (tg3_flag(tp
, USE_PHYLIB
))
12931 return tg3_phy_init(tp
);
12933 /* Reading the PHY ID register can conflict with ASF
12934 * firmware access to the PHY hardware.
12937 if (tg3_flag(tp
, ENABLE_ASF
) || tg3_flag(tp
, ENABLE_APE
)) {
12938 hw_phy_id
= hw_phy_id_masked
= TG3_PHY_ID_INVALID
;
12940 /* Now read the physical PHY_ID from the chip and verify
12941 * that it is sane. If it doesn't look good, we fall back
12942 * to either the hard-coded table based PHY_ID and failing
12943 * that the value found in the eeprom area.
12945 err
|= tg3_readphy(tp
, MII_PHYSID1
, &hw_phy_id_1
);
12946 err
|= tg3_readphy(tp
, MII_PHYSID2
, &hw_phy_id_2
);
12948 hw_phy_id
= (hw_phy_id_1
& 0xffff) << 10;
12949 hw_phy_id
|= (hw_phy_id_2
& 0xfc00) << 16;
12950 hw_phy_id
|= (hw_phy_id_2
& 0x03ff) << 0;
12952 hw_phy_id_masked
= hw_phy_id
& TG3_PHY_ID_MASK
;
12955 if (!err
&& TG3_KNOWN_PHY_ID(hw_phy_id_masked
)) {
12956 tp
->phy_id
= hw_phy_id
;
12957 if (hw_phy_id_masked
== TG3_PHY_ID_BCM8002
)
12958 tp
->phy_flags
|= TG3_PHYFLG_PHY_SERDES
;
12960 tp
->phy_flags
&= ~TG3_PHYFLG_PHY_SERDES
;
12962 if (tp
->phy_id
!= TG3_PHY_ID_INVALID
) {
12963 /* Do nothing, phy ID already set up in
12964 * tg3_get_eeprom_hw_cfg().
12967 struct subsys_tbl_ent
*p
;
12969 /* No eeprom signature? Try the hardcoded
12970 * subsys device table.
12972 p
= tg3_lookup_by_subsys(tp
);
12976 tp
->phy_id
= p
->phy_id
;
12978 tp
->phy_id
== TG3_PHY_ID_BCM8002
)
12979 tp
->phy_flags
|= TG3_PHYFLG_PHY_SERDES
;
12983 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
) &&
12984 ((tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5718
&&
12985 tp
->pci_chip_rev_id
!= CHIPREV_ID_5717_A0
) ||
12986 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
&&
12987 tp
->pci_chip_rev_id
!= CHIPREV_ID_57765_A0
)))
12988 tp
->phy_flags
|= TG3_PHYFLG_EEE_CAP
;
12990 tg3_phy_init_link_config(tp
);
12992 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
) &&
12993 !tg3_flag(tp
, ENABLE_APE
) &&
12994 !tg3_flag(tp
, ENABLE_ASF
)) {
12997 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
12998 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
12999 (bmsr
& BMSR_LSTATUS
))
13000 goto skip_phy_reset
;
13002 err
= tg3_phy_reset(tp
);
13006 tg3_phy_set_wirespeed(tp
);
13008 mask
= (ADVERTISED_10baseT_Half
| ADVERTISED_10baseT_Full
|
13009 ADVERTISED_100baseT_Half
| ADVERTISED_100baseT_Full
|
13010 ADVERTISED_1000baseT_Half
| ADVERTISED_1000baseT_Full
);
13011 if (!tg3_copper_is_advertising_all(tp
, mask
)) {
13012 tg3_phy_autoneg_cfg(tp
, tp
->link_config
.advertising
,
13013 tp
->link_config
.flowctrl
);
13015 tg3_writephy(tp
, MII_BMCR
,
13016 BMCR_ANENABLE
| BMCR_ANRESTART
);
13021 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5401
) {
13022 err
= tg3_init_5401phy_dsp(tp
);
13026 err
= tg3_init_5401phy_dsp(tp
);
13032 static void __devinit
tg3_read_vpd(struct tg3
*tp
)
13035 unsigned int block_end
, rosize
, len
;
13038 vpd_data
= (u8
*)tg3_vpd_readblock(tp
);
13042 i
= pci_vpd_find_tag(vpd_data
, 0, TG3_NVM_VPD_LEN
,
13043 PCI_VPD_LRDT_RO_DATA
);
13045 goto out_not_found
;
13047 rosize
= pci_vpd_lrdt_size(&vpd_data
[i
]);
13048 block_end
= i
+ PCI_VPD_LRDT_TAG_SIZE
+ rosize
;
13049 i
+= PCI_VPD_LRDT_TAG_SIZE
;
13051 if (block_end
> TG3_NVM_VPD_LEN
)
13052 goto out_not_found
;
13054 j
= pci_vpd_find_info_keyword(vpd_data
, i
, rosize
,
13055 PCI_VPD_RO_KEYWORD_MFR_ID
);
13057 len
= pci_vpd_info_field_size(&vpd_data
[j
]);
13059 j
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
13060 if (j
+ len
> block_end
|| len
!= 4 ||
13061 memcmp(&vpd_data
[j
], "1028", 4))
13064 j
= pci_vpd_find_info_keyword(vpd_data
, i
, rosize
,
13065 PCI_VPD_RO_KEYWORD_VENDOR0
);
13069 len
= pci_vpd_info_field_size(&vpd_data
[j
]);
13071 j
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
13072 if (j
+ len
> block_end
)
13075 memcpy(tp
->fw_ver
, &vpd_data
[j
], len
);
13076 strncat(tp
->fw_ver
, " bc ", TG3_NVM_VPD_LEN
- len
- 1);
13080 i
= pci_vpd_find_info_keyword(vpd_data
, i
, rosize
,
13081 PCI_VPD_RO_KEYWORD_PARTNO
);
13083 goto out_not_found
;
13085 len
= pci_vpd_info_field_size(&vpd_data
[i
]);
13087 i
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
13088 if (len
> TG3_BPN_SIZE
||
13089 (len
+ i
) > TG3_NVM_VPD_LEN
)
13090 goto out_not_found
;
13092 memcpy(tp
->board_part_number
, &vpd_data
[i
], len
);
13096 if (tp
->board_part_number
[0])
13100 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
) {
13101 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717
)
13102 strcpy(tp
->board_part_number
, "BCM5717");
13103 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5718
)
13104 strcpy(tp
->board_part_number
, "BCM5718");
13107 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
) {
13108 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57780
)
13109 strcpy(tp
->board_part_number
, "BCM57780");
13110 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57760
)
13111 strcpy(tp
->board_part_number
, "BCM57760");
13112 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57790
)
13113 strcpy(tp
->board_part_number
, "BCM57790");
13114 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57788
)
13115 strcpy(tp
->board_part_number
, "BCM57788");
13118 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
) {
13119 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57761
)
13120 strcpy(tp
->board_part_number
, "BCM57761");
13121 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57765
)
13122 strcpy(tp
->board_part_number
, "BCM57765");
13123 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57781
)
13124 strcpy(tp
->board_part_number
, "BCM57781");
13125 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57785
)
13126 strcpy(tp
->board_part_number
, "BCM57785");
13127 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57791
)
13128 strcpy(tp
->board_part_number
, "BCM57791");
13129 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57795
)
13130 strcpy(tp
->board_part_number
, "BCM57795");
13133 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
13134 strcpy(tp
->board_part_number
, "BCM95906");
13137 strcpy(tp
->board_part_number
, "none");
13141 static int __devinit
tg3_fw_img_is_valid(struct tg3
*tp
, u32 offset
)
13145 if (tg3_nvram_read(tp
, offset
, &val
) ||
13146 (val
& 0xfc000000) != 0x0c000000 ||
13147 tg3_nvram_read(tp
, offset
+ 4, &val
) ||
13154 static void __devinit
tg3_read_bc_ver(struct tg3
*tp
)
13156 u32 val
, offset
, start
, ver_offset
;
13158 bool newver
= false;
13160 if (tg3_nvram_read(tp
, 0xc, &offset
) ||
13161 tg3_nvram_read(tp
, 0x4, &start
))
13164 offset
= tg3_nvram_logical_addr(tp
, offset
);
13166 if (tg3_nvram_read(tp
, offset
, &val
))
13169 if ((val
& 0xfc000000) == 0x0c000000) {
13170 if (tg3_nvram_read(tp
, offset
+ 4, &val
))
13177 dst_off
= strlen(tp
->fw_ver
);
13180 if (TG3_VER_SIZE
- dst_off
< 16 ||
13181 tg3_nvram_read(tp
, offset
+ 8, &ver_offset
))
13184 offset
= offset
+ ver_offset
- start
;
13185 for (i
= 0; i
< 16; i
+= 4) {
13187 if (tg3_nvram_read_be32(tp
, offset
+ i
, &v
))
13190 memcpy(tp
->fw_ver
+ dst_off
+ i
, &v
, sizeof(v
));
13195 if (tg3_nvram_read(tp
, TG3_NVM_PTREV_BCVER
, &ver_offset
))
13198 major
= (ver_offset
& TG3_NVM_BCVER_MAJMSK
) >>
13199 TG3_NVM_BCVER_MAJSFT
;
13200 minor
= ver_offset
& TG3_NVM_BCVER_MINMSK
;
13201 snprintf(&tp
->fw_ver
[dst_off
], TG3_VER_SIZE
- dst_off
,
13202 "v%d.%02d", major
, minor
);
13206 static void __devinit
tg3_read_hwsb_ver(struct tg3
*tp
)
13208 u32 val
, major
, minor
;
13210 /* Use native endian representation */
13211 if (tg3_nvram_read(tp
, TG3_NVM_HWSB_CFG1
, &val
))
13214 major
= (val
& TG3_NVM_HWSB_CFG1_MAJMSK
) >>
13215 TG3_NVM_HWSB_CFG1_MAJSFT
;
13216 minor
= (val
& TG3_NVM_HWSB_CFG1_MINMSK
) >>
13217 TG3_NVM_HWSB_CFG1_MINSFT
;
13219 snprintf(&tp
->fw_ver
[0], 32, "sb v%d.%02d", major
, minor
);
13222 static void __devinit
tg3_read_sb_ver(struct tg3
*tp
, u32 val
)
13224 u32 offset
, major
, minor
, build
;
13226 strncat(tp
->fw_ver
, "sb", TG3_VER_SIZE
- strlen(tp
->fw_ver
) - 1);
13228 if ((val
& TG3_EEPROM_SB_FORMAT_MASK
) != TG3_EEPROM_SB_FORMAT_1
)
13231 switch (val
& TG3_EEPROM_SB_REVISION_MASK
) {
13232 case TG3_EEPROM_SB_REVISION_0
:
13233 offset
= TG3_EEPROM_SB_F1R0_EDH_OFF
;
13235 case TG3_EEPROM_SB_REVISION_2
:
13236 offset
= TG3_EEPROM_SB_F1R2_EDH_OFF
;
13238 case TG3_EEPROM_SB_REVISION_3
:
13239 offset
= TG3_EEPROM_SB_F1R3_EDH_OFF
;
13241 case TG3_EEPROM_SB_REVISION_4
:
13242 offset
= TG3_EEPROM_SB_F1R4_EDH_OFF
;
13244 case TG3_EEPROM_SB_REVISION_5
:
13245 offset
= TG3_EEPROM_SB_F1R5_EDH_OFF
;
13247 case TG3_EEPROM_SB_REVISION_6
:
13248 offset
= TG3_EEPROM_SB_F1R6_EDH_OFF
;
13254 if (tg3_nvram_read(tp
, offset
, &val
))
13257 build
= (val
& TG3_EEPROM_SB_EDH_BLD_MASK
) >>
13258 TG3_EEPROM_SB_EDH_BLD_SHFT
;
13259 major
= (val
& TG3_EEPROM_SB_EDH_MAJ_MASK
) >>
13260 TG3_EEPROM_SB_EDH_MAJ_SHFT
;
13261 minor
= val
& TG3_EEPROM_SB_EDH_MIN_MASK
;
13263 if (minor
> 99 || build
> 26)
13266 offset
= strlen(tp
->fw_ver
);
13267 snprintf(&tp
->fw_ver
[offset
], TG3_VER_SIZE
- offset
,
13268 " v%d.%02d", major
, minor
);
13271 offset
= strlen(tp
->fw_ver
);
13272 if (offset
< TG3_VER_SIZE
- 1)
13273 tp
->fw_ver
[offset
] = 'a' + build
- 1;
13277 static void __devinit
tg3_read_mgmtfw_ver(struct tg3
*tp
)
13279 u32 val
, offset
, start
;
13282 for (offset
= TG3_NVM_DIR_START
;
13283 offset
< TG3_NVM_DIR_END
;
13284 offset
+= TG3_NVM_DIRENT_SIZE
) {
13285 if (tg3_nvram_read(tp
, offset
, &val
))
13288 if ((val
>> TG3_NVM_DIRTYPE_SHIFT
) == TG3_NVM_DIRTYPE_ASFINI
)
13292 if (offset
== TG3_NVM_DIR_END
)
13295 if (!tg3_flag(tp
, 5705_PLUS
))
13296 start
= 0x08000000;
13297 else if (tg3_nvram_read(tp
, offset
- 4, &start
))
13300 if (tg3_nvram_read(tp
, offset
+ 4, &offset
) ||
13301 !tg3_fw_img_is_valid(tp
, offset
) ||
13302 tg3_nvram_read(tp
, offset
+ 8, &val
))
13305 offset
+= val
- start
;
13307 vlen
= strlen(tp
->fw_ver
);
13309 tp
->fw_ver
[vlen
++] = ',';
13310 tp
->fw_ver
[vlen
++] = ' ';
13312 for (i
= 0; i
< 4; i
++) {
13314 if (tg3_nvram_read_be32(tp
, offset
, &v
))
13317 offset
+= sizeof(v
);
13319 if (vlen
> TG3_VER_SIZE
- sizeof(v
)) {
13320 memcpy(&tp
->fw_ver
[vlen
], &v
, TG3_VER_SIZE
- vlen
);
13324 memcpy(&tp
->fw_ver
[vlen
], &v
, sizeof(v
));
13329 static void __devinit
tg3_read_dash_ver(struct tg3
*tp
)
13335 if (!tg3_flag(tp
, ENABLE_APE
) || !tg3_flag(tp
, ENABLE_ASF
))
13338 apedata
= tg3_ape_read32(tp
, TG3_APE_SEG_SIG
);
13339 if (apedata
!= APE_SEG_SIG_MAGIC
)
13342 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_STATUS
);
13343 if (!(apedata
& APE_FW_STATUS_READY
))
13346 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_VERSION
);
13348 if (tg3_ape_read32(tp
, TG3_APE_FW_FEATURES
) & TG3_APE_FW_FEATURE_NCSI
) {
13349 tg3_flag_set(tp
, APE_HAS_NCSI
);
13355 vlen
= strlen(tp
->fw_ver
);
13357 snprintf(&tp
->fw_ver
[vlen
], TG3_VER_SIZE
- vlen
, " %s v%d.%d.%d.%d",
13359 (apedata
& APE_FW_VERSION_MAJMSK
) >> APE_FW_VERSION_MAJSFT
,
13360 (apedata
& APE_FW_VERSION_MINMSK
) >> APE_FW_VERSION_MINSFT
,
13361 (apedata
& APE_FW_VERSION_REVMSK
) >> APE_FW_VERSION_REVSFT
,
13362 (apedata
& APE_FW_VERSION_BLDMSK
));
13365 static void __devinit
tg3_read_fw_ver(struct tg3
*tp
)
13368 bool vpd_vers
= false;
13370 if (tp
->fw_ver
[0] != 0)
13373 if (tg3_flag(tp
, NO_NVRAM
)) {
13374 strcat(tp
->fw_ver
, "sb");
13378 if (tg3_nvram_read(tp
, 0, &val
))
13381 if (val
== TG3_EEPROM_MAGIC
)
13382 tg3_read_bc_ver(tp
);
13383 else if ((val
& TG3_EEPROM_MAGIC_FW_MSK
) == TG3_EEPROM_MAGIC_FW
)
13384 tg3_read_sb_ver(tp
, val
);
13385 else if ((val
& TG3_EEPROM_MAGIC_HW_MSK
) == TG3_EEPROM_MAGIC_HW
)
13386 tg3_read_hwsb_ver(tp
);
13390 if (!tg3_flag(tp
, ENABLE_ASF
) || tg3_flag(tp
, ENABLE_APE
) || vpd_vers
)
13393 tg3_read_mgmtfw_ver(tp
);
13396 tp
->fw_ver
[TG3_VER_SIZE
- 1] = 0;
13399 static struct pci_dev
* __devinit
tg3_find_peer(struct tg3
*);
13401 static inline u32
tg3_rx_ret_ring_size(struct tg3
*tp
)
13403 if (tg3_flag(tp
, LRG_PROD_RING_CAP
))
13404 return TG3_RX_RET_MAX_SIZE_5717
;
13405 else if (tg3_flag(tp
, JUMBO_CAPABLE
) && !tg3_flag(tp
, 5780_CLASS
))
13406 return TG3_RX_RET_MAX_SIZE_5700
;
13408 return TG3_RX_RET_MAX_SIZE_5705
;
13411 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets
) = {
13412 { PCI_DEVICE(PCI_VENDOR_ID_AMD
, PCI_DEVICE_ID_AMD_FE_GATE_700C
) },
13413 { PCI_DEVICE(PCI_VENDOR_ID_AMD
, PCI_DEVICE_ID_AMD_8131_BRIDGE
) },
13414 { PCI_DEVICE(PCI_VENDOR_ID_VIA
, PCI_DEVICE_ID_VIA_8385_0
) },
13418 static int __devinit
tg3_get_invariants(struct tg3
*tp
)
13421 u32 pci_state_reg
, grc_misc_cfg
;
13426 /* Force memory write invalidate off. If we leave it on,
13427 * then on 5700_BX chips we have to enable a workaround.
13428 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
13429 * to match the cacheline size. The Broadcom driver have this
13430 * workaround but turns MWI off all the times so never uses
13431 * it. This seems to suggest that the workaround is insufficient.
13433 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
13434 pci_cmd
&= ~PCI_COMMAND_INVALIDATE
;
13435 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
13437 /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
13438 * has the register indirect write enable bit set before
13439 * we try to access any of the MMIO registers. It is also
13440 * critical that the PCI-X hw workaround situation is decided
13441 * before that as well.
13443 pci_read_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
13446 tp
->pci_chip_rev_id
= (misc_ctrl_reg
>>
13447 MISC_HOST_CTRL_CHIPREV_SHIFT
);
13448 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_USE_PROD_ID_REG
) {
13449 u32 prod_id_asic_rev
;
13451 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717
||
13452 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5718
||
13453 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5719
||
13454 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5720
)
13455 pci_read_config_dword(tp
->pdev
,
13456 TG3PCI_GEN2_PRODID_ASICREV
,
13457 &prod_id_asic_rev
);
13458 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57781
||
13459 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57785
||
13460 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57761
||
13461 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57765
||
13462 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57791
||
13463 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57795
)
13464 pci_read_config_dword(tp
->pdev
,
13465 TG3PCI_GEN15_PRODID_ASICREV
,
13466 &prod_id_asic_rev
);
13468 pci_read_config_dword(tp
->pdev
, TG3PCI_PRODID_ASICREV
,
13469 &prod_id_asic_rev
);
13471 tp
->pci_chip_rev_id
= prod_id_asic_rev
;
13474 /* Wrong chip ID in 5752 A0. This code can be removed later
13475 * as A0 is not in production.
13477 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5752_A0_HW
)
13478 tp
->pci_chip_rev_id
= CHIPREV_ID_5752_A0
;
13480 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
13481 * we need to disable memory and use config. cycles
13482 * only to access all registers. The 5702/03 chips
13483 * can mistakenly decode the special cycles from the
13484 * ICH chipsets as memory write cycles, causing corruption
13485 * of register and memory space. Only certain ICH bridges
13486 * will drive special cycles with non-zero data during the
13487 * address phase which can fall within the 5703's address
13488 * range. This is not an ICH bug as the PCI spec allows
13489 * non-zero address during special cycles. However, only
13490 * these ICH bridges are known to drive non-zero addresses
13491 * during special cycles.
13493 * Since special cycles do not cross PCI bridges, we only
13494 * enable this workaround if the 5703 is on the secondary
13495 * bus of these ICH bridges.
13497 if ((tp
->pci_chip_rev_id
== CHIPREV_ID_5703_A1
) ||
13498 (tp
->pci_chip_rev_id
== CHIPREV_ID_5703_A2
)) {
13499 static struct tg3_dev_id
{
13503 } ich_chipsets
[] = {
13504 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801AA_8
,
13506 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801AB_8
,
13508 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801BA_11
,
13510 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801BA_6
,
13514 struct tg3_dev_id
*pci_id
= &ich_chipsets
[0];
13515 struct pci_dev
*bridge
= NULL
;
13517 while (pci_id
->vendor
!= 0) {
13518 bridge
= pci_get_device(pci_id
->vendor
, pci_id
->device
,
13524 if (pci_id
->rev
!= PCI_ANY_ID
) {
13525 if (bridge
->revision
> pci_id
->rev
)
13528 if (bridge
->subordinate
&&
13529 (bridge
->subordinate
->number
==
13530 tp
->pdev
->bus
->number
)) {
13531 tg3_flag_set(tp
, ICH_WORKAROUND
);
13532 pci_dev_put(bridge
);
13538 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) {
13539 static struct tg3_dev_id
{
13542 } bridge_chipsets
[] = {
13543 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_PXH_0
},
13544 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_PXH_1
},
13547 struct tg3_dev_id
*pci_id
= &bridge_chipsets
[0];
13548 struct pci_dev
*bridge
= NULL
;
13550 while (pci_id
->vendor
!= 0) {
13551 bridge
= pci_get_device(pci_id
->vendor
,
13558 if (bridge
->subordinate
&&
13559 (bridge
->subordinate
->number
<=
13560 tp
->pdev
->bus
->number
) &&
13561 (bridge
->subordinate
->subordinate
>=
13562 tp
->pdev
->bus
->number
)) {
13563 tg3_flag_set(tp
, 5701_DMA_BUG
);
13564 pci_dev_put(bridge
);
13570 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
13571 * DMA addresses > 40-bit. This bridge may have other additional
13572 * 57xx devices behind it in some 4-port NIC designs for example.
13573 * Any tg3 device found behind the bridge will also need the 40-bit
13576 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5780
||
13577 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
) {
13578 tg3_flag_set(tp
, 5780_CLASS
);
13579 tg3_flag_set(tp
, 40BIT_DMA_BUG
);
13580 tp
->msi_cap
= pci_find_capability(tp
->pdev
, PCI_CAP_ID_MSI
);
13582 struct pci_dev
*bridge
= NULL
;
13585 bridge
= pci_get_device(PCI_VENDOR_ID_SERVERWORKS
,
13586 PCI_DEVICE_ID_SERVERWORKS_EPB
,
13588 if (bridge
&& bridge
->subordinate
&&
13589 (bridge
->subordinate
->number
<=
13590 tp
->pdev
->bus
->number
) &&
13591 (bridge
->subordinate
->subordinate
>=
13592 tp
->pdev
->bus
->number
)) {
13593 tg3_flag_set(tp
, 40BIT_DMA_BUG
);
13594 pci_dev_put(bridge
);
13600 /* Initialize misc host control in PCI block. */
13601 tp
->misc_host_ctrl
|= (misc_ctrl_reg
&
13602 MISC_HOST_CTRL_CHIPREV
);
13603 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
13604 tp
->misc_host_ctrl
);
13606 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
||
13607 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
||
13608 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
||
13609 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
)
13610 tp
->pdev_peer
= tg3_find_peer(tp
);
13612 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
||
13613 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
||
13614 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
)
13615 tg3_flag_set(tp
, 5717_PLUS
);
13617 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
||
13618 tg3_flag(tp
, 5717_PLUS
))
13619 tg3_flag_set(tp
, 57765_PLUS
);
13621 /* Intentionally exclude ASIC_REV_5906 */
13622 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
||
13623 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5787
||
13624 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
||
13625 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
||
13626 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
||
13627 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
||
13628 tg3_flag(tp
, 57765_PLUS
))
13629 tg3_flag_set(tp
, 5755_PLUS
);
13631 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5750
||
13632 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5752
||
13633 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
||
13634 tg3_flag(tp
, 5755_PLUS
) ||
13635 tg3_flag(tp
, 5780_CLASS
))
13636 tg3_flag_set(tp
, 5750_PLUS
);
13638 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
||
13639 tg3_flag(tp
, 5750_PLUS
))
13640 tg3_flag_set(tp
, 5705_PLUS
);
13642 /* Determine TSO capabilities */
13643 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
)
13644 ; /* Do nothing. HW bug. */
13645 else if (tg3_flag(tp
, 57765_PLUS
))
13646 tg3_flag_set(tp
, HW_TSO_3
);
13647 else if (tg3_flag(tp
, 5755_PLUS
) ||
13648 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
)
13649 tg3_flag_set(tp
, HW_TSO_2
);
13650 else if (tg3_flag(tp
, 5750_PLUS
)) {
13651 tg3_flag_set(tp
, HW_TSO_1
);
13652 tg3_flag_set(tp
, TSO_BUG
);
13653 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5750
&&
13654 tp
->pci_chip_rev_id
>= CHIPREV_ID_5750_C2
)
13655 tg3_flag_clear(tp
, TSO_BUG
);
13656 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
&&
13657 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5701
&&
13658 tp
->pci_chip_rev_id
!= CHIPREV_ID_5705_A0
) {
13659 tg3_flag_set(tp
, TSO_BUG
);
13660 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
)
13661 tp
->fw_needed
= FIRMWARE_TG3TSO5
;
13663 tp
->fw_needed
= FIRMWARE_TG3TSO
;
13666 /* Selectively allow TSO based on operating conditions */
13667 if (tg3_flag(tp
, HW_TSO_1
) ||
13668 tg3_flag(tp
, HW_TSO_2
) ||
13669 tg3_flag(tp
, HW_TSO_3
) ||
13670 (tp
->fw_needed
&& !tg3_flag(tp
, ENABLE_ASF
)))
13671 tg3_flag_set(tp
, TSO_CAPABLE
);
13673 tg3_flag_clear(tp
, TSO_CAPABLE
);
13674 tg3_flag_clear(tp
, TSO_BUG
);
13675 tp
->fw_needed
= NULL
;
13678 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5701_A0
)
13679 tp
->fw_needed
= FIRMWARE_TG3
;
13683 if (tg3_flag(tp
, 5750_PLUS
)) {
13684 tg3_flag_set(tp
, SUPPORT_MSI
);
13685 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5750_AX
||
13686 GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5750_BX
||
13687 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
&&
13688 tp
->pci_chip_rev_id
<= CHIPREV_ID_5714_A2
&&
13689 tp
->pdev_peer
== tp
->pdev
))
13690 tg3_flag_clear(tp
, SUPPORT_MSI
);
13692 if (tg3_flag(tp
, 5755_PLUS
) ||
13693 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
13694 tg3_flag_set(tp
, 1SHOT_MSI
);
13697 if (tg3_flag(tp
, 57765_PLUS
)) {
13698 tg3_flag_set(tp
, SUPPORT_MSIX
);
13699 tp
->irq_max
= TG3_IRQ_MAX_VECS
;
13703 if (tg3_flag(tp
, 5755_PLUS
))
13704 tg3_flag_set(tp
, SHORT_DMA_BUG
);
13706 if (tg3_flag(tp
, 5717_PLUS
))
13707 tg3_flag_set(tp
, LRG_PROD_RING_CAP
);
13709 if (tg3_flag(tp
, 57765_PLUS
) &&
13710 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5719
)
13711 tg3_flag_set(tp
, USE_JUMBO_BDFLAG
);
13713 if (!tg3_flag(tp
, 5705_PLUS
) ||
13714 tg3_flag(tp
, 5780_CLASS
) ||
13715 tg3_flag(tp
, USE_JUMBO_BDFLAG
))
13716 tg3_flag_set(tp
, JUMBO_CAPABLE
);
13718 pci_read_config_dword(tp
->pdev
, TG3PCI_PCISTATE
,
13721 tp
->pcie_cap
= pci_find_capability(tp
->pdev
, PCI_CAP_ID_EXP
);
13722 if (tp
->pcie_cap
!= 0) {
13725 tg3_flag_set(tp
, PCI_EXPRESS
);
13727 tp
->pcie_readrq
= 4096;
13728 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
||
13729 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
)
13730 tp
->pcie_readrq
= 2048;
13732 pcie_set_readrq(tp
->pdev
, tp
->pcie_readrq
);
13734 pci_read_config_word(tp
->pdev
,
13735 tp
->pcie_cap
+ PCI_EXP_LNKCTL
,
13737 if (lnkctl
& PCI_EXP_LNKCTL_CLKREQ_EN
) {
13738 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) ==
13740 tg3_flag_clear(tp
, HW_TSO_2
);
13741 tg3_flag_clear(tp
, TSO_CAPABLE
);
13743 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
||
13744 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
||
13745 tp
->pci_chip_rev_id
== CHIPREV_ID_57780_A0
||
13746 tp
->pci_chip_rev_id
== CHIPREV_ID_57780_A1
)
13747 tg3_flag_set(tp
, CLKREQ_BUG
);
13748 } else if (tp
->pci_chip_rev_id
== CHIPREV_ID_5717_A0
) {
13749 tg3_flag_set(tp
, L1PLLPD_EN
);
13751 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
) {
13752 tg3_flag_set(tp
, PCI_EXPRESS
);
13753 } else if (!tg3_flag(tp
, 5705_PLUS
) ||
13754 tg3_flag(tp
, 5780_CLASS
)) {
13755 tp
->pcix_cap
= pci_find_capability(tp
->pdev
, PCI_CAP_ID_PCIX
);
13756 if (!tp
->pcix_cap
) {
13757 dev_err(&tp
->pdev
->dev
,
13758 "Cannot find PCI-X capability, aborting\n");
13762 if (!(pci_state_reg
& PCISTATE_CONV_PCI_MODE
))
13763 tg3_flag_set(tp
, PCIX_MODE
);
13766 /* If we have an AMD 762 or VIA K8T800 chipset, write
13767 * reordering to the mailbox registers done by the host
13768 * controller can cause major troubles. We read back from
13769 * every mailbox register write to force the writes to be
13770 * posted to the chip in order.
13772 if (pci_dev_present(tg3_write_reorder_chipsets
) &&
13773 !tg3_flag(tp
, PCI_EXPRESS
))
13774 tg3_flag_set(tp
, MBOX_WRITE_REORDER
);
13776 pci_read_config_byte(tp
->pdev
, PCI_CACHE_LINE_SIZE
,
13777 &tp
->pci_cacheline_sz
);
13778 pci_read_config_byte(tp
->pdev
, PCI_LATENCY_TIMER
,
13779 &tp
->pci_lat_timer
);
13780 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
&&
13781 tp
->pci_lat_timer
< 64) {
13782 tp
->pci_lat_timer
= 64;
13783 pci_write_config_byte(tp
->pdev
, PCI_LATENCY_TIMER
,
13784 tp
->pci_lat_timer
);
13787 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5700_BX
) {
13788 /* 5700 BX chips need to have their TX producer index
13789 * mailboxes written twice to workaround a bug.
13791 tg3_flag_set(tp
, TXD_MBOX_HWBUG
);
13793 /* If we are in PCI-X mode, enable register write workaround.
13795 * The workaround is to use indirect register accesses
13796 * for all chip writes not to mailbox registers.
13798 if (tg3_flag(tp
, PCIX_MODE
)) {
13801 tg3_flag_set(tp
, PCIX_TARGET_HWBUG
);
13803 /* The chip can have it's power management PCI config
13804 * space registers clobbered due to this bug.
13805 * So explicitly force the chip into D0 here.
13807 pci_read_config_dword(tp
->pdev
,
13808 tp
->pm_cap
+ PCI_PM_CTRL
,
13810 pm_reg
&= ~PCI_PM_CTRL_STATE_MASK
;
13811 pm_reg
|= PCI_PM_CTRL_PME_ENABLE
| 0 /* D0 */;
13812 pci_write_config_dword(tp
->pdev
,
13813 tp
->pm_cap
+ PCI_PM_CTRL
,
13816 /* Also, force SERR#/PERR# in PCI command. */
13817 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
13818 pci_cmd
|= PCI_COMMAND_PARITY
| PCI_COMMAND_SERR
;
13819 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
13823 if ((pci_state_reg
& PCISTATE_BUS_SPEED_HIGH
) != 0)
13824 tg3_flag_set(tp
, PCI_HIGH_SPEED
);
13825 if ((pci_state_reg
& PCISTATE_BUS_32BIT
) != 0)
13826 tg3_flag_set(tp
, PCI_32BIT
);
13828 /* Chip-specific fixup from Broadcom driver */
13829 if ((tp
->pci_chip_rev_id
== CHIPREV_ID_5704_A0
) &&
13830 (!(pci_state_reg
& PCISTATE_RETRY_SAME_DMA
))) {
13831 pci_state_reg
|= PCISTATE_RETRY_SAME_DMA
;
13832 pci_write_config_dword(tp
->pdev
, TG3PCI_PCISTATE
, pci_state_reg
);
13835 /* Default fast path register access methods */
13836 tp
->read32
= tg3_read32
;
13837 tp
->write32
= tg3_write32
;
13838 tp
->read32_mbox
= tg3_read32
;
13839 tp
->write32_mbox
= tg3_write32
;
13840 tp
->write32_tx_mbox
= tg3_write32
;
13841 tp
->write32_rx_mbox
= tg3_write32
;
13843 /* Various workaround register access methods */
13844 if (tg3_flag(tp
, PCIX_TARGET_HWBUG
))
13845 tp
->write32
= tg3_write_indirect_reg32
;
13846 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
||
13847 (tg3_flag(tp
, PCI_EXPRESS
) &&
13848 tp
->pci_chip_rev_id
== CHIPREV_ID_5750_A0
)) {
13850 * Back to back register writes can cause problems on these
13851 * chips, the workaround is to read back all reg writes
13852 * except those to mailbox regs.
13854 * See tg3_write_indirect_reg32().
13856 tp
->write32
= tg3_write_flush_reg32
;
13859 if (tg3_flag(tp
, TXD_MBOX_HWBUG
) || tg3_flag(tp
, MBOX_WRITE_REORDER
)) {
13860 tp
->write32_tx_mbox
= tg3_write32_tx_mbox
;
13861 if (tg3_flag(tp
, MBOX_WRITE_REORDER
))
13862 tp
->write32_rx_mbox
= tg3_write_flush_reg32
;
13865 if (tg3_flag(tp
, ICH_WORKAROUND
)) {
13866 tp
->read32
= tg3_read_indirect_reg32
;
13867 tp
->write32
= tg3_write_indirect_reg32
;
13868 tp
->read32_mbox
= tg3_read_indirect_mbox
;
13869 tp
->write32_mbox
= tg3_write_indirect_mbox
;
13870 tp
->write32_tx_mbox
= tg3_write_indirect_mbox
;
13871 tp
->write32_rx_mbox
= tg3_write_indirect_mbox
;
13876 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
13877 pci_cmd
&= ~PCI_COMMAND_MEMORY
;
13878 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
13880 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
13881 tp
->read32_mbox
= tg3_read32_mbox_5906
;
13882 tp
->write32_mbox
= tg3_write32_mbox_5906
;
13883 tp
->write32_tx_mbox
= tg3_write32_mbox_5906
;
13884 tp
->write32_rx_mbox
= tg3_write32_mbox_5906
;
13887 if (tp
->write32
== tg3_write_indirect_reg32
||
13888 (tg3_flag(tp
, PCIX_MODE
) &&
13889 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
13890 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
)))
13891 tg3_flag_set(tp
, SRAM_USE_CONFIG
);
13893 /* Get eeprom hw config before calling tg3_set_power_state().
13894 * In particular, the TG3_FLAG_IS_NIC flag must be
13895 * determined before calling tg3_set_power_state() so that
13896 * we know whether or not to switch out of Vaux power.
13897 * When the flag is set, it means that GPIO1 is used for eeprom
13898 * write protect and also implies that it is a LOM where GPIOs
13899 * are not used to switch power.
13901 tg3_get_eeprom_hw_cfg(tp
);
13903 if (tg3_flag(tp
, ENABLE_APE
)) {
13904 /* Allow reads and writes to the
13905 * APE register and memory space.
13907 pci_state_reg
|= PCISTATE_ALLOW_APE_CTLSPC_WR
|
13908 PCISTATE_ALLOW_APE_SHMEM_WR
|
13909 PCISTATE_ALLOW_APE_PSPACE_WR
;
13910 pci_write_config_dword(tp
->pdev
, TG3PCI_PCISTATE
,
13914 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
||
13915 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
||
13916 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
||
13917 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
||
13918 tg3_flag(tp
, 57765_PLUS
))
13919 tg3_flag_set(tp
, CPMU_PRESENT
);
13921 /* Set up tp->grc_local_ctrl before calling tg3_power_up().
13922 * GPIO1 driven high will bring 5700's external PHY out of reset.
13923 * It is also used as eeprom write protect on LOMs.
13925 tp
->grc_local_ctrl
= GRC_LCLCTRL_INT_ON_ATTN
| GRC_LCLCTRL_AUTO_SEEPROM
;
13926 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
13927 tg3_flag(tp
, EEPROM_WRITE_PROT
))
13928 tp
->grc_local_ctrl
|= (GRC_LCLCTRL_GPIO_OE1
|
13929 GRC_LCLCTRL_GPIO_OUTPUT1
);
13930 /* Unused GPIO3 must be driven as output on 5752 because there
13931 * are no pull-up resistors on unused GPIO pins.
13933 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5752
)
13934 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE3
;
13936 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
||
13937 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
||
13938 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
)
13939 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_UART_SEL
;
13941 if (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5761
||
13942 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5761S
) {
13943 /* Turn off the debug UART. */
13944 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_UART_SEL
;
13945 if (tg3_flag(tp
, IS_NIC
))
13946 /* Keep VMain power. */
13947 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE0
|
13948 GRC_LCLCTRL_GPIO_OUTPUT0
;
13951 /* Force the chip into D0. */
13952 err
= tg3_power_up(tp
);
13954 dev_err(&tp
->pdev
->dev
, "Transition to D0 failed\n");
13958 /* Derive initial jumbo mode from MTU assigned in
13959 * ether_setup() via the alloc_etherdev() call
13961 if (tp
->dev
->mtu
> ETH_DATA_LEN
&& !tg3_flag(tp
, 5780_CLASS
))
13962 tg3_flag_set(tp
, JUMBO_RING_ENABLE
);
13964 /* Determine WakeOnLan speed to use. */
13965 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
13966 tp
->pci_chip_rev_id
== CHIPREV_ID_5701_A0
||
13967 tp
->pci_chip_rev_id
== CHIPREV_ID_5701_B0
||
13968 tp
->pci_chip_rev_id
== CHIPREV_ID_5701_B2
) {
13969 tg3_flag_clear(tp
, WOL_SPEED_100MB
);
13971 tg3_flag_set(tp
, WOL_SPEED_100MB
);
13974 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
)
13975 tp
->phy_flags
|= TG3_PHYFLG_IS_FET
;
13977 /* A few boards don't want Ethernet@WireSpeed phy feature */
13978 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
13979 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
&&
13980 (tp
->pci_chip_rev_id
!= CHIPREV_ID_5705_A0
) &&
13981 (tp
->pci_chip_rev_id
!= CHIPREV_ID_5705_A1
)) ||
13982 (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) ||
13983 (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
13984 tp
->phy_flags
|= TG3_PHYFLG_NO_ETH_WIRE_SPEED
;
13986 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5703_AX
||
13987 GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5704_AX
)
13988 tp
->phy_flags
|= TG3_PHYFLG_ADC_BUG
;
13989 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5704_A0
)
13990 tp
->phy_flags
|= TG3_PHYFLG_5704_A0_BUG
;
13992 if (tg3_flag(tp
, 5705_PLUS
) &&
13993 !(tp
->phy_flags
& TG3_PHYFLG_IS_FET
) &&
13994 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5785
&&
13995 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_57780
&&
13996 !tg3_flag(tp
, 57765_PLUS
)) {
13997 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
||
13998 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5787
||
13999 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
||
14000 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
) {
14001 if (tp
->pdev
->device
!= PCI_DEVICE_ID_TIGON3_5756
&&
14002 tp
->pdev
->device
!= PCI_DEVICE_ID_TIGON3_5722
)
14003 tp
->phy_flags
|= TG3_PHYFLG_JITTER_BUG
;
14004 if (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5755M
)
14005 tp
->phy_flags
|= TG3_PHYFLG_ADJUST_TRIM
;
14007 tp
->phy_flags
|= TG3_PHYFLG_BER_BUG
;
14010 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
&&
14011 GET_CHIP_REV(tp
->pci_chip_rev_id
) != CHIPREV_5784_AX
) {
14012 tp
->phy_otp
= tg3_read_otp_phycfg(tp
);
14013 if (tp
->phy_otp
== 0)
14014 tp
->phy_otp
= TG3_OTP_DEFAULT
;
14017 if (tg3_flag(tp
, CPMU_PRESENT
))
14018 tp
->mi_mode
= MAC_MI_MODE_500KHZ_CONST
;
14020 tp
->mi_mode
= MAC_MI_MODE_BASE
;
14022 tp
->coalesce_mode
= 0;
14023 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) != CHIPREV_5700_AX
&&
14024 GET_CHIP_REV(tp
->pci_chip_rev_id
) != CHIPREV_5700_BX
)
14025 tp
->coalesce_mode
|= HOSTCC_MODE_32BYTE
;
14027 /* Set these bits to enable statistics workaround. */
14028 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
||
14029 tp
->pci_chip_rev_id
== CHIPREV_ID_5719_A0
||
14030 tp
->pci_chip_rev_id
== CHIPREV_ID_5720_A0
) {
14031 tp
->coalesce_mode
|= HOSTCC_MODE_ATTN
;
14032 tp
->grc_mode
|= GRC_MODE_IRQ_ON_FLOW_ATTN
;
14035 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
||
14036 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
)
14037 tg3_flag_set(tp
, USE_PHYLIB
);
14039 err
= tg3_mdio_init(tp
);
14043 /* Initialize data/descriptor byte/word swapping. */
14044 val
= tr32(GRC_MODE
);
14045 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
)
14046 val
&= (GRC_MODE_BYTE_SWAP_B2HRX_DATA
|
14047 GRC_MODE_WORD_SWAP_B2HRX_DATA
|
14048 GRC_MODE_B2HRX_ENABLE
|
14049 GRC_MODE_HTX2B_ENABLE
|
14050 GRC_MODE_HOST_STACKUP
);
14052 val
&= GRC_MODE_HOST_STACKUP
;
14054 tw32(GRC_MODE
, val
| tp
->grc_mode
);
14056 tg3_switch_clocks(tp
);
14058 /* Clear this out for sanity. */
14059 tw32(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
14061 pci_read_config_dword(tp
->pdev
, TG3PCI_PCISTATE
,
14063 if ((pci_state_reg
& PCISTATE_CONV_PCI_MODE
) == 0 &&
14064 !tg3_flag(tp
, PCIX_TARGET_HWBUG
)) {
14065 u32 chiprevid
= GET_CHIP_REV_ID(tp
->misc_host_ctrl
);
14067 if (chiprevid
== CHIPREV_ID_5701_A0
||
14068 chiprevid
== CHIPREV_ID_5701_B0
||
14069 chiprevid
== CHIPREV_ID_5701_B2
||
14070 chiprevid
== CHIPREV_ID_5701_B5
) {
14071 void __iomem
*sram_base
;
14073 /* Write some dummy words into the SRAM status block
14074 * area, see if it reads back correctly. If the return
14075 * value is bad, force enable the PCIX workaround.
14077 sram_base
= tp
->regs
+ NIC_SRAM_WIN_BASE
+ NIC_SRAM_STATS_BLK
;
14079 writel(0x00000000, sram_base
);
14080 writel(0x00000000, sram_base
+ 4);
14081 writel(0xffffffff, sram_base
+ 4);
14082 if (readl(sram_base
) != 0x00000000)
14083 tg3_flag_set(tp
, PCIX_TARGET_HWBUG
);
14088 tg3_nvram_init(tp
);
14090 grc_misc_cfg
= tr32(GRC_MISC_CFG
);
14091 grc_misc_cfg
&= GRC_MISC_CFG_BOARD_ID_MASK
;
14093 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
&&
14094 (grc_misc_cfg
== GRC_MISC_CFG_BOARD_ID_5788
||
14095 grc_misc_cfg
== GRC_MISC_CFG_BOARD_ID_5788M
))
14096 tg3_flag_set(tp
, IS_5788
);
14098 if (!tg3_flag(tp
, IS_5788
) &&
14099 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
)
14100 tg3_flag_set(tp
, TAGGED_STATUS
);
14101 if (tg3_flag(tp
, TAGGED_STATUS
)) {
14102 tp
->coalesce_mode
|= (HOSTCC_MODE_CLRTICK_RXBD
|
14103 HOSTCC_MODE_CLRTICK_TXBD
);
14105 tp
->misc_host_ctrl
|= MISC_HOST_CTRL_TAGGED_STATUS
;
14106 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
14107 tp
->misc_host_ctrl
);
14110 /* Preserve the APE MAC_MODE bits */
14111 if (tg3_flag(tp
, ENABLE_APE
))
14112 tp
->mac_mode
= MAC_MODE_APE_TX_EN
| MAC_MODE_APE_RX_EN
;
14114 tp
->mac_mode
= TG3_DEF_MAC_MODE
;
14116 /* these are limited to 10/100 only */
14117 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
&&
14118 (grc_misc_cfg
== 0x8000 || grc_misc_cfg
== 0x4000)) ||
14119 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
&&
14120 tp
->pdev
->vendor
== PCI_VENDOR_ID_BROADCOM
&&
14121 (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5901
||
14122 tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5901_2
||
14123 tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5705F
)) ||
14124 (tp
->pdev
->vendor
== PCI_VENDOR_ID_BROADCOM
&&
14125 (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5751F
||
14126 tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5753F
||
14127 tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5787F
)) ||
14128 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57790
||
14129 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57791
||
14130 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57795
||
14131 (tp
->phy_flags
& TG3_PHYFLG_IS_FET
))
14132 tp
->phy_flags
|= TG3_PHYFLG_10_100_ONLY
;
14134 err
= tg3_phy_probe(tp
);
14136 dev_err(&tp
->pdev
->dev
, "phy probe failed, err %d\n", err
);
14137 /* ... but do not return immediately ... */
14142 tg3_read_fw_ver(tp
);
14144 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
14145 tp
->phy_flags
&= ~TG3_PHYFLG_USE_MI_INTERRUPT
;
14147 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
)
14148 tp
->phy_flags
|= TG3_PHYFLG_USE_MI_INTERRUPT
;
14150 tp
->phy_flags
&= ~TG3_PHYFLG_USE_MI_INTERRUPT
;
14153 /* 5700 {AX,BX} chips have a broken status block link
14154 * change bit implementation, so we must use the
14155 * status register in those cases.
14157 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
)
14158 tg3_flag_set(tp
, USE_LINKCHG_REG
);
14160 tg3_flag_clear(tp
, USE_LINKCHG_REG
);
14162 /* The led_ctrl is set during tg3_phy_probe, here we might
14163 * have to force the link status polling mechanism based
14164 * upon subsystem IDs.
14166 if (tp
->pdev
->subsystem_vendor
== PCI_VENDOR_ID_DELL
&&
14167 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
&&
14168 !(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)) {
14169 tp
->phy_flags
|= TG3_PHYFLG_USE_MI_INTERRUPT
;
14170 tg3_flag_set(tp
, USE_LINKCHG_REG
);
14173 /* For all SERDES we poll the MAC status register. */
14174 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
14175 tg3_flag_set(tp
, POLL_SERDES
);
14177 tg3_flag_clear(tp
, POLL_SERDES
);
14179 tp
->rx_offset
= NET_IP_ALIGN
;
14180 tp
->rx_copy_thresh
= TG3_RX_COPY_THRESHOLD
;
14181 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
&&
14182 tg3_flag(tp
, PCIX_MODE
)) {
14184 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
14185 tp
->rx_copy_thresh
= ~(u16
)0;
14189 tp
->rx_std_ring_mask
= TG3_RX_STD_RING_SIZE(tp
) - 1;
14190 tp
->rx_jmb_ring_mask
= TG3_RX_JMB_RING_SIZE(tp
) - 1;
14191 tp
->rx_ret_ring_mask
= tg3_rx_ret_ring_size(tp
) - 1;
14193 tp
->rx_std_max_post
= tp
->rx_std_ring_mask
+ 1;
14195 /* Increment the rx prod index on the rx std ring by at most
14196 * 8 for these chips to workaround hw errata.
14198 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5750
||
14199 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5752
||
14200 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
)
14201 tp
->rx_std_max_post
= 8;
14203 if (tg3_flag(tp
, ASPM_WORKAROUND
))
14204 tp
->pwrmgmt_thresh
= tr32(PCIE_PWR_MGMT_THRESH
) &
14205 PCIE_PWR_MGMT_L1_THRESH_MSK
;
14210 #ifdef CONFIG_SPARC
14211 static int __devinit
tg3_get_macaddr_sparc(struct tg3
*tp
)
14213 struct net_device
*dev
= tp
->dev
;
14214 struct pci_dev
*pdev
= tp
->pdev
;
14215 struct device_node
*dp
= pci_device_to_OF_node(pdev
);
14216 const unsigned char *addr
;
14219 addr
= of_get_property(dp
, "local-mac-address", &len
);
14220 if (addr
&& len
== 6) {
14221 memcpy(dev
->dev_addr
, addr
, 6);
14222 memcpy(dev
->perm_addr
, dev
->dev_addr
, 6);
14228 static int __devinit
tg3_get_default_macaddr_sparc(struct tg3
*tp
)
14230 struct net_device
*dev
= tp
->dev
;
14232 memcpy(dev
->dev_addr
, idprom
->id_ethaddr
, 6);
14233 memcpy(dev
->perm_addr
, idprom
->id_ethaddr
, 6);
14238 static int __devinit
tg3_get_device_address(struct tg3
*tp
)
14240 struct net_device
*dev
= tp
->dev
;
14241 u32 hi
, lo
, mac_offset
;
14244 #ifdef CONFIG_SPARC
14245 if (!tg3_get_macaddr_sparc(tp
))
14250 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
||
14251 tg3_flag(tp
, 5780_CLASS
)) {
14252 if (tr32(TG3PCI_DUAL_MAC_CTRL
) & DUAL_MAC_CTRL_ID
)
14254 if (tg3_nvram_lock(tp
))
14255 tw32_f(NVRAM_CMD
, NVRAM_CMD_RESET
);
14257 tg3_nvram_unlock(tp
);
14258 } else if (tg3_flag(tp
, 5717_PLUS
)) {
14259 if (PCI_FUNC(tp
->pdev
->devfn
) & 1)
14261 if (PCI_FUNC(tp
->pdev
->devfn
) > 1)
14262 mac_offset
+= 0x18c;
14263 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
)
14266 /* First try to get it from MAC address mailbox. */
14267 tg3_read_mem(tp
, NIC_SRAM_MAC_ADDR_HIGH_MBOX
, &hi
);
14268 if ((hi
>> 16) == 0x484b) {
14269 dev
->dev_addr
[0] = (hi
>> 8) & 0xff;
14270 dev
->dev_addr
[1] = (hi
>> 0) & 0xff;
14272 tg3_read_mem(tp
, NIC_SRAM_MAC_ADDR_LOW_MBOX
, &lo
);
14273 dev
->dev_addr
[2] = (lo
>> 24) & 0xff;
14274 dev
->dev_addr
[3] = (lo
>> 16) & 0xff;
14275 dev
->dev_addr
[4] = (lo
>> 8) & 0xff;
14276 dev
->dev_addr
[5] = (lo
>> 0) & 0xff;
14278 /* Some old bootcode may report a 0 MAC address in SRAM */
14279 addr_ok
= is_valid_ether_addr(&dev
->dev_addr
[0]);
14282 /* Next, try NVRAM. */
14283 if (!tg3_flag(tp
, NO_NVRAM
) &&
14284 !tg3_nvram_read_be32(tp
, mac_offset
+ 0, &hi
) &&
14285 !tg3_nvram_read_be32(tp
, mac_offset
+ 4, &lo
)) {
14286 memcpy(&dev
->dev_addr
[0], ((char *)&hi
) + 2, 2);
14287 memcpy(&dev
->dev_addr
[2], (char *)&lo
, sizeof(lo
));
14289 /* Finally just fetch it out of the MAC control regs. */
14291 hi
= tr32(MAC_ADDR_0_HIGH
);
14292 lo
= tr32(MAC_ADDR_0_LOW
);
14294 dev
->dev_addr
[5] = lo
& 0xff;
14295 dev
->dev_addr
[4] = (lo
>> 8) & 0xff;
14296 dev
->dev_addr
[3] = (lo
>> 16) & 0xff;
14297 dev
->dev_addr
[2] = (lo
>> 24) & 0xff;
14298 dev
->dev_addr
[1] = hi
& 0xff;
14299 dev
->dev_addr
[0] = (hi
>> 8) & 0xff;
14303 if (!is_valid_ether_addr(&dev
->dev_addr
[0])) {
14304 #ifdef CONFIG_SPARC
14305 if (!tg3_get_default_macaddr_sparc(tp
))
14310 memcpy(dev
->perm_addr
, dev
->dev_addr
, dev
->addr_len
);
14314 #define BOUNDARY_SINGLE_CACHELINE 1
14315 #define BOUNDARY_MULTI_CACHELINE 2
14317 static u32 __devinit
tg3_calc_dma_bndry(struct tg3
*tp
, u32 val
)
14319 int cacheline_size
;
14323 pci_read_config_byte(tp
->pdev
, PCI_CACHE_LINE_SIZE
, &byte
);
14325 cacheline_size
= 1024;
14327 cacheline_size
= (int) byte
* 4;
14329 /* On 5703 and later chips, the boundary bits have no
14332 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
&&
14333 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5701
&&
14334 !tg3_flag(tp
, PCI_EXPRESS
))
14337 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
14338 goal
= BOUNDARY_MULTI_CACHELINE
;
14340 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
14341 goal
= BOUNDARY_SINGLE_CACHELINE
;
14347 if (tg3_flag(tp
, 57765_PLUS
)) {
14348 val
= goal
? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT
;
14355 /* PCI controllers on most RISC systems tend to disconnect
14356 * when a device tries to burst across a cache-line boundary.
14357 * Therefore, letting tg3 do so just wastes PCI bandwidth.
14359 * Unfortunately, for PCI-E there are only limited
14360 * write-side controls for this, and thus for reads
14361 * we will still get the disconnects. We'll also waste
14362 * these PCI cycles for both read and write for chips
14363 * other than 5700 and 5701 which do not implement the
14366 if (tg3_flag(tp
, PCIX_MODE
) && !tg3_flag(tp
, PCI_EXPRESS
)) {
14367 switch (cacheline_size
) {
14372 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
14373 val
|= (DMA_RWCTRL_READ_BNDRY_128_PCIX
|
14374 DMA_RWCTRL_WRITE_BNDRY_128_PCIX
);
14376 val
|= (DMA_RWCTRL_READ_BNDRY_384_PCIX
|
14377 DMA_RWCTRL_WRITE_BNDRY_384_PCIX
);
14382 val
|= (DMA_RWCTRL_READ_BNDRY_256_PCIX
|
14383 DMA_RWCTRL_WRITE_BNDRY_256_PCIX
);
14387 val
|= (DMA_RWCTRL_READ_BNDRY_384_PCIX
|
14388 DMA_RWCTRL_WRITE_BNDRY_384_PCIX
);
14391 } else if (tg3_flag(tp
, PCI_EXPRESS
)) {
14392 switch (cacheline_size
) {
14396 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
14397 val
&= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE
;
14398 val
|= DMA_RWCTRL_WRITE_BNDRY_64_PCIE
;
14404 val
&= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE
;
14405 val
|= DMA_RWCTRL_WRITE_BNDRY_128_PCIE
;
14409 switch (cacheline_size
) {
14411 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
14412 val
|= (DMA_RWCTRL_READ_BNDRY_16
|
14413 DMA_RWCTRL_WRITE_BNDRY_16
);
14418 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
14419 val
|= (DMA_RWCTRL_READ_BNDRY_32
|
14420 DMA_RWCTRL_WRITE_BNDRY_32
);
14425 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
14426 val
|= (DMA_RWCTRL_READ_BNDRY_64
|
14427 DMA_RWCTRL_WRITE_BNDRY_64
);
14432 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
14433 val
|= (DMA_RWCTRL_READ_BNDRY_128
|
14434 DMA_RWCTRL_WRITE_BNDRY_128
);
14439 val
|= (DMA_RWCTRL_READ_BNDRY_256
|
14440 DMA_RWCTRL_WRITE_BNDRY_256
);
14443 val
|= (DMA_RWCTRL_READ_BNDRY_512
|
14444 DMA_RWCTRL_WRITE_BNDRY_512
);
14448 val
|= (DMA_RWCTRL_READ_BNDRY_1024
|
14449 DMA_RWCTRL_WRITE_BNDRY_1024
);
14458 static int __devinit
tg3_do_test_dma(struct tg3
*tp
, u32
*buf
, dma_addr_t buf_dma
, int size
, int to_device
)
14460 struct tg3_internal_buffer_desc test_desc
;
14461 u32 sram_dma_descs
;
14464 sram_dma_descs
= NIC_SRAM_DMA_DESC_POOL_BASE
;
14466 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ
, 0);
14467 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ
, 0);
14468 tw32(RDMAC_STATUS
, 0);
14469 tw32(WDMAC_STATUS
, 0);
14471 tw32(BUFMGR_MODE
, 0);
14472 tw32(FTQ_RESET
, 0);
14474 test_desc
.addr_hi
= ((u64
) buf_dma
) >> 32;
14475 test_desc
.addr_lo
= buf_dma
& 0xffffffff;
14476 test_desc
.nic_mbuf
= 0x00002100;
14477 test_desc
.len
= size
;
14480 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
14481 * the *second* time the tg3 driver was getting loaded after an
14484 * Broadcom tells me:
14485 * ...the DMA engine is connected to the GRC block and a DMA
14486 * reset may affect the GRC block in some unpredictable way...
14487 * The behavior of resets to individual blocks has not been tested.
14489 * Broadcom noted the GRC reset will also reset all sub-components.
14492 test_desc
.cqid_sqid
= (13 << 8) | 2;
14494 tw32_f(RDMAC_MODE
, RDMAC_MODE_ENABLE
);
14497 test_desc
.cqid_sqid
= (16 << 8) | 7;
14499 tw32_f(WDMAC_MODE
, WDMAC_MODE_ENABLE
);
14502 test_desc
.flags
= 0x00000005;
14504 for (i
= 0; i
< (sizeof(test_desc
) / sizeof(u32
)); i
++) {
14507 val
= *(((u32
*)&test_desc
) + i
);
14508 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
,
14509 sram_dma_descs
+ (i
* sizeof(u32
)));
14510 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_DATA
, val
);
14512 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, 0);
14515 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ
, sram_dma_descs
);
14517 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ
, sram_dma_descs
);
14520 for (i
= 0; i
< 40; i
++) {
14524 val
= tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ
);
14526 val
= tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ
);
14527 if ((val
& 0xffff) == sram_dma_descs
) {
14538 #define TEST_BUFFER_SIZE 0x2000
14540 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets
) = {
14541 { PCI_DEVICE(PCI_VENDOR_ID_APPLE
, PCI_DEVICE_ID_APPLE_UNI_N_PCI15
) },
14545 static int __devinit
tg3_test_dma(struct tg3
*tp
)
14547 dma_addr_t buf_dma
;
14548 u32
*buf
, saved_dma_rwctrl
;
14551 buf
= dma_alloc_coherent(&tp
->pdev
->dev
, TEST_BUFFER_SIZE
,
14552 &buf_dma
, GFP_KERNEL
);
14558 tp
->dma_rwctrl
= ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT
) |
14559 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT
));
14561 tp
->dma_rwctrl
= tg3_calc_dma_bndry(tp
, tp
->dma_rwctrl
);
14563 if (tg3_flag(tp
, 57765_PLUS
))
14566 if (tg3_flag(tp
, PCI_EXPRESS
)) {
14567 /* DMA read watermark not used on PCIE */
14568 tp
->dma_rwctrl
|= 0x00180000;
14569 } else if (!tg3_flag(tp
, PCIX_MODE
)) {
14570 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
||
14571 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5750
)
14572 tp
->dma_rwctrl
|= 0x003f0000;
14574 tp
->dma_rwctrl
|= 0x003f000f;
14576 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
||
14577 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
) {
14578 u32 ccval
= (tr32(TG3PCI_CLOCK_CTRL
) & 0x1f);
14579 u32 read_water
= 0x7;
14581 /* If the 5704 is behind the EPB bridge, we can
14582 * do the less restrictive ONE_DMA workaround for
14583 * better performance.
14585 if (tg3_flag(tp
, 40BIT_DMA_BUG
) &&
14586 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
)
14587 tp
->dma_rwctrl
|= 0x8000;
14588 else if (ccval
== 0x6 || ccval
== 0x7)
14589 tp
->dma_rwctrl
|= DMA_RWCTRL_ONE_DMA
;
14591 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
)
14593 /* Set bit 23 to enable PCIX hw bug fix */
14595 (read_water
<< DMA_RWCTRL_READ_WATER_SHIFT
) |
14596 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT
) |
14598 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5780
) {
14599 /* 5780 always in PCIX mode */
14600 tp
->dma_rwctrl
|= 0x00144000;
14601 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
) {
14602 /* 5714 always in PCIX mode */
14603 tp
->dma_rwctrl
|= 0x00148000;
14605 tp
->dma_rwctrl
|= 0x001b000f;
14609 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
||
14610 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
)
14611 tp
->dma_rwctrl
&= 0xfffffff0;
14613 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
14614 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) {
14615 /* Remove this if it causes problems for some boards. */
14616 tp
->dma_rwctrl
|= DMA_RWCTRL_USE_MEM_READ_MULT
;
14618 /* On 5700/5701 chips, we need to set this bit.
14619 * Otherwise the chip will issue cacheline transactions
14620 * to streamable DMA memory with not all the byte
14621 * enables turned on. This is an error on several
14622 * RISC PCI controllers, in particular sparc64.
14624 * On 5703/5704 chips, this bit has been reassigned
14625 * a different meaning. In particular, it is used
14626 * on those chips to enable a PCI-X workaround.
14628 tp
->dma_rwctrl
|= DMA_RWCTRL_ASSERT_ALL_BE
;
14631 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
14634 /* Unneeded, already done by tg3_get_invariants. */
14635 tg3_switch_clocks(tp
);
14638 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
&&
14639 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5701
)
14642 /* It is best to perform DMA test with maximum write burst size
14643 * to expose the 5700/5701 write DMA bug.
14645 saved_dma_rwctrl
= tp
->dma_rwctrl
;
14646 tp
->dma_rwctrl
&= ~DMA_RWCTRL_WRITE_BNDRY_MASK
;
14647 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
14652 for (i
= 0; i
< TEST_BUFFER_SIZE
/ sizeof(u32
); i
++)
14655 /* Send the buffer to the chip. */
14656 ret
= tg3_do_test_dma(tp
, buf
, buf_dma
, TEST_BUFFER_SIZE
, 1);
14658 dev_err(&tp
->pdev
->dev
,
14659 "%s: Buffer write failed. err = %d\n",
14665 /* validate data reached card RAM correctly. */
14666 for (i
= 0; i
< TEST_BUFFER_SIZE
/ sizeof(u32
); i
++) {
14668 tg3_read_mem(tp
, 0x2100 + (i
*4), &val
);
14669 if (le32_to_cpu(val
) != p
[i
]) {
14670 dev_err(&tp
->pdev
->dev
,
14671 "%s: Buffer corrupted on device! "
14672 "(%d != %d)\n", __func__
, val
, i
);
14673 /* ret = -ENODEV here? */
14678 /* Now read it back. */
14679 ret
= tg3_do_test_dma(tp
, buf
, buf_dma
, TEST_BUFFER_SIZE
, 0);
14681 dev_err(&tp
->pdev
->dev
, "%s: Buffer read failed. "
14682 "err = %d\n", __func__
, ret
);
14687 for (i
= 0; i
< TEST_BUFFER_SIZE
/ sizeof(u32
); i
++) {
14691 if ((tp
->dma_rwctrl
& DMA_RWCTRL_WRITE_BNDRY_MASK
) !=
14692 DMA_RWCTRL_WRITE_BNDRY_16
) {
14693 tp
->dma_rwctrl
&= ~DMA_RWCTRL_WRITE_BNDRY_MASK
;
14694 tp
->dma_rwctrl
|= DMA_RWCTRL_WRITE_BNDRY_16
;
14695 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
14698 dev_err(&tp
->pdev
->dev
,
14699 "%s: Buffer corrupted on read back! "
14700 "(%d != %d)\n", __func__
, p
[i
], i
);
14706 if (i
== (TEST_BUFFER_SIZE
/ sizeof(u32
))) {
14712 if ((tp
->dma_rwctrl
& DMA_RWCTRL_WRITE_BNDRY_MASK
) !=
14713 DMA_RWCTRL_WRITE_BNDRY_16
) {
14714 /* DMA test passed without adjusting DMA boundary,
14715 * now look for chipsets that are known to expose the
14716 * DMA bug without failing the test.
14718 if (pci_dev_present(tg3_dma_wait_state_chipsets
)) {
14719 tp
->dma_rwctrl
&= ~DMA_RWCTRL_WRITE_BNDRY_MASK
;
14720 tp
->dma_rwctrl
|= DMA_RWCTRL_WRITE_BNDRY_16
;
14722 /* Safe to use the calculated DMA boundary. */
14723 tp
->dma_rwctrl
= saved_dma_rwctrl
;
14726 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
14730 dma_free_coherent(&tp
->pdev
->dev
, TEST_BUFFER_SIZE
, buf
, buf_dma
);
14735 static void __devinit
tg3_init_bufmgr_config(struct tg3
*tp
)
14737 if (tg3_flag(tp
, 57765_PLUS
)) {
14738 tp
->bufmgr_config
.mbuf_read_dma_low_water
=
14739 DEFAULT_MB_RDMA_LOW_WATER_5705
;
14740 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
14741 DEFAULT_MB_MACRX_LOW_WATER_57765
;
14742 tp
->bufmgr_config
.mbuf_high_water
=
14743 DEFAULT_MB_HIGH_WATER_57765
;
14745 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
=
14746 DEFAULT_MB_RDMA_LOW_WATER_5705
;
14747 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
=
14748 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765
;
14749 tp
->bufmgr_config
.mbuf_high_water_jumbo
=
14750 DEFAULT_MB_HIGH_WATER_JUMBO_57765
;
14751 } else if (tg3_flag(tp
, 5705_PLUS
)) {
14752 tp
->bufmgr_config
.mbuf_read_dma_low_water
=
14753 DEFAULT_MB_RDMA_LOW_WATER_5705
;
14754 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
14755 DEFAULT_MB_MACRX_LOW_WATER_5705
;
14756 tp
->bufmgr_config
.mbuf_high_water
=
14757 DEFAULT_MB_HIGH_WATER_5705
;
14758 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
14759 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
14760 DEFAULT_MB_MACRX_LOW_WATER_5906
;
14761 tp
->bufmgr_config
.mbuf_high_water
=
14762 DEFAULT_MB_HIGH_WATER_5906
;
14765 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
=
14766 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780
;
14767 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
=
14768 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780
;
14769 tp
->bufmgr_config
.mbuf_high_water_jumbo
=
14770 DEFAULT_MB_HIGH_WATER_JUMBO_5780
;
14772 tp
->bufmgr_config
.mbuf_read_dma_low_water
=
14773 DEFAULT_MB_RDMA_LOW_WATER
;
14774 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
14775 DEFAULT_MB_MACRX_LOW_WATER
;
14776 tp
->bufmgr_config
.mbuf_high_water
=
14777 DEFAULT_MB_HIGH_WATER
;
14779 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
=
14780 DEFAULT_MB_RDMA_LOW_WATER_JUMBO
;
14781 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
=
14782 DEFAULT_MB_MACRX_LOW_WATER_JUMBO
;
14783 tp
->bufmgr_config
.mbuf_high_water_jumbo
=
14784 DEFAULT_MB_HIGH_WATER_JUMBO
;
14787 tp
->bufmgr_config
.dma_low_water
= DEFAULT_DMA_LOW_WATER
;
14788 tp
->bufmgr_config
.dma_high_water
= DEFAULT_DMA_HIGH_WATER
;
14791 static char * __devinit
tg3_phy_string(struct tg3
*tp
)
14793 switch (tp
->phy_id
& TG3_PHY_ID_MASK
) {
14794 case TG3_PHY_ID_BCM5400
: return "5400";
14795 case TG3_PHY_ID_BCM5401
: return "5401";
14796 case TG3_PHY_ID_BCM5411
: return "5411";
14797 case TG3_PHY_ID_BCM5701
: return "5701";
14798 case TG3_PHY_ID_BCM5703
: return "5703";
14799 case TG3_PHY_ID_BCM5704
: return "5704";
14800 case TG3_PHY_ID_BCM5705
: return "5705";
14801 case TG3_PHY_ID_BCM5750
: return "5750";
14802 case TG3_PHY_ID_BCM5752
: return "5752";
14803 case TG3_PHY_ID_BCM5714
: return "5714";
14804 case TG3_PHY_ID_BCM5780
: return "5780";
14805 case TG3_PHY_ID_BCM5755
: return "5755";
14806 case TG3_PHY_ID_BCM5787
: return "5787";
14807 case TG3_PHY_ID_BCM5784
: return "5784";
14808 case TG3_PHY_ID_BCM5756
: return "5722/5756";
14809 case TG3_PHY_ID_BCM5906
: return "5906";
14810 case TG3_PHY_ID_BCM5761
: return "5761";
14811 case TG3_PHY_ID_BCM5718C
: return "5718C";
14812 case TG3_PHY_ID_BCM5718S
: return "5718S";
14813 case TG3_PHY_ID_BCM57765
: return "57765";
14814 case TG3_PHY_ID_BCM5719C
: return "5719C";
14815 case TG3_PHY_ID_BCM5720C
: return "5720C";
14816 case TG3_PHY_ID_BCM8002
: return "8002/serdes";
14817 case 0: return "serdes";
14818 default: return "unknown";
14822 static char * __devinit
tg3_bus_string(struct tg3
*tp
, char *str
)
14824 if (tg3_flag(tp
, PCI_EXPRESS
)) {
14825 strcpy(str
, "PCI Express");
14827 } else if (tg3_flag(tp
, PCIX_MODE
)) {
14828 u32 clock_ctrl
= tr32(TG3PCI_CLOCK_CTRL
) & 0x1f;
14830 strcpy(str
, "PCIX:");
14832 if ((clock_ctrl
== 7) ||
14833 ((tr32(GRC_MISC_CFG
) & GRC_MISC_CFG_BOARD_ID_MASK
) ==
14834 GRC_MISC_CFG_BOARD_ID_5704CIOBE
))
14835 strcat(str
, "133MHz");
14836 else if (clock_ctrl
== 0)
14837 strcat(str
, "33MHz");
14838 else if (clock_ctrl
== 2)
14839 strcat(str
, "50MHz");
14840 else if (clock_ctrl
== 4)
14841 strcat(str
, "66MHz");
14842 else if (clock_ctrl
== 6)
14843 strcat(str
, "100MHz");
14845 strcpy(str
, "PCI:");
14846 if (tg3_flag(tp
, PCI_HIGH_SPEED
))
14847 strcat(str
, "66MHz");
14849 strcat(str
, "33MHz");
14851 if (tg3_flag(tp
, PCI_32BIT
))
14852 strcat(str
, ":32-bit");
14854 strcat(str
, ":64-bit");
14858 static struct pci_dev
* __devinit
tg3_find_peer(struct tg3
*tp
)
14860 struct pci_dev
*peer
;
14861 unsigned int func
, devnr
= tp
->pdev
->devfn
& ~7;
14863 for (func
= 0; func
< 8; func
++) {
14864 peer
= pci_get_slot(tp
->pdev
->bus
, devnr
| func
);
14865 if (peer
&& peer
!= tp
->pdev
)
14869 /* 5704 can be configured in single-port mode, set peer to
14870 * tp->pdev in that case.
14878 * We don't need to keep the refcount elevated; there's no way
14879 * to remove one half of this device without removing the other
14886 static void __devinit
tg3_init_coal(struct tg3
*tp
)
14888 struct ethtool_coalesce
*ec
= &tp
->coal
;
14890 memset(ec
, 0, sizeof(*ec
));
14891 ec
->cmd
= ETHTOOL_GCOALESCE
;
14892 ec
->rx_coalesce_usecs
= LOW_RXCOL_TICKS
;
14893 ec
->tx_coalesce_usecs
= LOW_TXCOL_TICKS
;
14894 ec
->rx_max_coalesced_frames
= LOW_RXMAX_FRAMES
;
14895 ec
->tx_max_coalesced_frames
= LOW_TXMAX_FRAMES
;
14896 ec
->rx_coalesce_usecs_irq
= DEFAULT_RXCOAL_TICK_INT
;
14897 ec
->tx_coalesce_usecs_irq
= DEFAULT_TXCOAL_TICK_INT
;
14898 ec
->rx_max_coalesced_frames_irq
= DEFAULT_RXCOAL_MAXF_INT
;
14899 ec
->tx_max_coalesced_frames_irq
= DEFAULT_TXCOAL_MAXF_INT
;
14900 ec
->stats_block_coalesce_usecs
= DEFAULT_STAT_COAL_TICKS
;
14902 if (tp
->coalesce_mode
& (HOSTCC_MODE_CLRTICK_RXBD
|
14903 HOSTCC_MODE_CLRTICK_TXBD
)) {
14904 ec
->rx_coalesce_usecs
= LOW_RXCOL_TICKS_CLRTCKS
;
14905 ec
->rx_coalesce_usecs_irq
= DEFAULT_RXCOAL_TICK_INT_CLRTCKS
;
14906 ec
->tx_coalesce_usecs
= LOW_TXCOL_TICKS_CLRTCKS
;
14907 ec
->tx_coalesce_usecs_irq
= DEFAULT_TXCOAL_TICK_INT_CLRTCKS
;
14910 if (tg3_flag(tp
, 5705_PLUS
)) {
14911 ec
->rx_coalesce_usecs_irq
= 0;
14912 ec
->tx_coalesce_usecs_irq
= 0;
14913 ec
->stats_block_coalesce_usecs
= 0;
14917 static const struct net_device_ops tg3_netdev_ops
= {
14918 .ndo_open
= tg3_open
,
14919 .ndo_stop
= tg3_close
,
14920 .ndo_start_xmit
= tg3_start_xmit
,
14921 .ndo_get_stats64
= tg3_get_stats64
,
14922 .ndo_validate_addr
= eth_validate_addr
,
14923 .ndo_set_multicast_list
= tg3_set_rx_mode
,
14924 .ndo_set_mac_address
= tg3_set_mac_addr
,
14925 .ndo_do_ioctl
= tg3_ioctl
,
14926 .ndo_tx_timeout
= tg3_tx_timeout
,
14927 .ndo_change_mtu
= tg3_change_mtu
,
14928 .ndo_fix_features
= tg3_fix_features
,
14929 .ndo_set_features
= tg3_set_features
,
14930 #ifdef CONFIG_NET_POLL_CONTROLLER
14931 .ndo_poll_controller
= tg3_poll_controller
,
14935 static int __devinit
tg3_init_one(struct pci_dev
*pdev
,
14936 const struct pci_device_id
*ent
)
14938 struct net_device
*dev
;
14940 int i
, err
, pm_cap
;
14941 u32 sndmbx
, rcvmbx
, intmbx
;
14943 u64 dma_mask
, persist_dma_mask
;
14946 printk_once(KERN_INFO
"%s\n", version
);
14948 err
= pci_enable_device(pdev
);
14950 dev_err(&pdev
->dev
, "Cannot enable PCI device, aborting\n");
14954 err
= pci_request_regions(pdev
, DRV_MODULE_NAME
);
14956 dev_err(&pdev
->dev
, "Cannot obtain PCI resources, aborting\n");
14957 goto err_out_disable_pdev
;
14960 pci_set_master(pdev
);
14962 /* Find power-management capability. */
14963 pm_cap
= pci_find_capability(pdev
, PCI_CAP_ID_PM
);
14965 dev_err(&pdev
->dev
,
14966 "Cannot find Power Management capability, aborting\n");
14968 goto err_out_free_res
;
14971 dev
= alloc_etherdev_mq(sizeof(*tp
), TG3_IRQ_MAX_VECS
);
14973 dev_err(&pdev
->dev
, "Etherdev alloc failed, aborting\n");
14975 goto err_out_free_res
;
14978 SET_NETDEV_DEV(dev
, &pdev
->dev
);
14980 tp
= netdev_priv(dev
);
14983 tp
->pm_cap
= pm_cap
;
14984 tp
->rx_mode
= TG3_DEF_RX_MODE
;
14985 tp
->tx_mode
= TG3_DEF_TX_MODE
;
14988 tp
->msg_enable
= tg3_debug
;
14990 tp
->msg_enable
= TG3_DEF_MSG_ENABLE
;
14992 /* The word/byte swap controls here control register access byte
14993 * swapping. DMA data byte swapping is controlled in the GRC_MODE
14996 tp
->misc_host_ctrl
=
14997 MISC_HOST_CTRL_MASK_PCI_INT
|
14998 MISC_HOST_CTRL_WORD_SWAP
|
14999 MISC_HOST_CTRL_INDIR_ACCESS
|
15000 MISC_HOST_CTRL_PCISTATE_RW
;
15002 /* The NONFRM (non-frame) byte/word swap controls take effect
15003 * on descriptor entries, anything which isn't packet data.
15005 * The StrongARM chips on the board (one for tx, one for rx)
15006 * are running in big-endian mode.
15008 tp
->grc_mode
= (GRC_MODE_WSWAP_DATA
| GRC_MODE_BSWAP_DATA
|
15009 GRC_MODE_WSWAP_NONFRM_DATA
);
15010 #ifdef __BIG_ENDIAN
15011 tp
->grc_mode
|= GRC_MODE_BSWAP_NONFRM_DATA
;
15013 spin_lock_init(&tp
->lock
);
15014 spin_lock_init(&tp
->indirect_lock
);
15015 INIT_WORK(&tp
->reset_task
, tg3_reset_task
);
15017 tp
->regs
= pci_ioremap_bar(pdev
, BAR_0
);
15019 dev_err(&pdev
->dev
, "Cannot map device registers, aborting\n");
15021 goto err_out_free_dev
;
15024 tp
->rx_pending
= TG3_DEF_RX_RING_PENDING
;
15025 tp
->rx_jumbo_pending
= TG3_DEF_RX_JUMBO_RING_PENDING
;
15027 dev
->ethtool_ops
= &tg3_ethtool_ops
;
15028 dev
->watchdog_timeo
= TG3_TX_TIMEOUT
;
15029 dev
->netdev_ops
= &tg3_netdev_ops
;
15030 dev
->irq
= pdev
->irq
;
15032 err
= tg3_get_invariants(tp
);
15034 dev_err(&pdev
->dev
,
15035 "Problem fetching invariants of chip, aborting\n");
15036 goto err_out_iounmap
;
15039 /* The EPB bridge inside 5714, 5715, and 5780 and any
15040 * device behind the EPB cannot support DMA addresses > 40-bit.
15041 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
15042 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
15043 * do DMA address check in tg3_start_xmit().
15045 if (tg3_flag(tp
, IS_5788
))
15046 persist_dma_mask
= dma_mask
= DMA_BIT_MASK(32);
15047 else if (tg3_flag(tp
, 40BIT_DMA_BUG
)) {
15048 persist_dma_mask
= dma_mask
= DMA_BIT_MASK(40);
15049 #ifdef CONFIG_HIGHMEM
15050 dma_mask
= DMA_BIT_MASK(64);
15053 persist_dma_mask
= dma_mask
= DMA_BIT_MASK(64);
15055 /* Configure DMA attributes. */
15056 if (dma_mask
> DMA_BIT_MASK(32)) {
15057 err
= pci_set_dma_mask(pdev
, dma_mask
);
15059 features
|= NETIF_F_HIGHDMA
;
15060 err
= pci_set_consistent_dma_mask(pdev
,
15063 dev_err(&pdev
->dev
, "Unable to obtain 64 bit "
15064 "DMA for consistent allocations\n");
15065 goto err_out_iounmap
;
15069 if (err
|| dma_mask
== DMA_BIT_MASK(32)) {
15070 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
15072 dev_err(&pdev
->dev
,
15073 "No usable DMA configuration, aborting\n");
15074 goto err_out_iounmap
;
15078 tg3_init_bufmgr_config(tp
);
15080 features
|= NETIF_F_HW_VLAN_TX
| NETIF_F_HW_VLAN_RX
;
15082 /* 5700 B0 chips do not support checksumming correctly due
15083 * to hardware bugs.
15085 if (tp
->pci_chip_rev_id
!= CHIPREV_ID_5700_B0
) {
15086 features
|= NETIF_F_SG
| NETIF_F_IP_CSUM
| NETIF_F_RXCSUM
;
15088 if (tg3_flag(tp
, 5755_PLUS
))
15089 features
|= NETIF_F_IPV6_CSUM
;
15092 /* TSO is on by default on chips that support hardware TSO.
15093 * Firmware TSO on older chips gives lower performance, so it
15094 * is off by default, but can be enabled using ethtool.
15096 if ((tg3_flag(tp
, HW_TSO_1
) ||
15097 tg3_flag(tp
, HW_TSO_2
) ||
15098 tg3_flag(tp
, HW_TSO_3
)) &&
15099 (features
& NETIF_F_IP_CSUM
))
15100 features
|= NETIF_F_TSO
;
15101 if (tg3_flag(tp
, HW_TSO_2
) || tg3_flag(tp
, HW_TSO_3
)) {
15102 if (features
& NETIF_F_IPV6_CSUM
)
15103 features
|= NETIF_F_TSO6
;
15104 if (tg3_flag(tp
, HW_TSO_3
) ||
15105 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
||
15106 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
&&
15107 GET_CHIP_REV(tp
->pci_chip_rev_id
) != CHIPREV_5784_AX
) ||
15108 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
||
15109 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
)
15110 features
|= NETIF_F_TSO_ECN
;
15113 dev
->features
|= features
;
15114 dev
->vlan_features
|= features
;
15117 * Add loopback capability only for a subset of devices that support
15118 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
15119 * loopback for the remaining devices.
15121 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5780
&&
15122 !tg3_flag(tp
, CPMU_PRESENT
))
15123 /* Add the loopback capability */
15124 features
|= NETIF_F_LOOPBACK
;
15126 dev
->hw_features
|= features
;
15128 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5705_A1
&&
15129 !tg3_flag(tp
, TSO_CAPABLE
) &&
15130 !(tr32(TG3PCI_PCISTATE
) & PCISTATE_BUS_SPEED_HIGH
)) {
15131 tg3_flag_set(tp
, MAX_RXPEND_64
);
15132 tp
->rx_pending
= 63;
15135 err
= tg3_get_device_address(tp
);
15137 dev_err(&pdev
->dev
,
15138 "Could not obtain valid ethernet address, aborting\n");
15139 goto err_out_iounmap
;
15142 if (tg3_flag(tp
, ENABLE_APE
)) {
15143 tp
->aperegs
= pci_ioremap_bar(pdev
, BAR_2
);
15144 if (!tp
->aperegs
) {
15145 dev_err(&pdev
->dev
,
15146 "Cannot map APE registers, aborting\n");
15148 goto err_out_iounmap
;
15151 tg3_ape_lock_init(tp
);
15153 if (tg3_flag(tp
, ENABLE_ASF
))
15154 tg3_read_dash_ver(tp
);
15158 * Reset chip in case UNDI or EFI driver did not shutdown
15159 * DMA self test will enable WDMAC and we'll see (spurious)
15160 * pending DMA on the PCI bus at that point.
15162 if ((tr32(HOSTCC_MODE
) & HOSTCC_MODE_ENABLE
) ||
15163 (tr32(WDMAC_MODE
) & WDMAC_MODE_ENABLE
)) {
15164 tw32(MEMARB_MODE
, MEMARB_MODE_ENABLE
);
15165 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
15168 err
= tg3_test_dma(tp
);
15170 dev_err(&pdev
->dev
, "DMA engine test failed, aborting\n");
15171 goto err_out_apeunmap
;
15174 intmbx
= MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
;
15175 rcvmbx
= MAILBOX_RCVRET_CON_IDX_0
+ TG3_64BIT_REG_LOW
;
15176 sndmbx
= MAILBOX_SNDHOST_PROD_IDX_0
+ TG3_64BIT_REG_LOW
;
15177 for (i
= 0; i
< tp
->irq_max
; i
++) {
15178 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
15181 tnapi
->tx_pending
= TG3_DEF_TX_RING_PENDING
;
15183 tnapi
->int_mbox
= intmbx
;
15189 tnapi
->consmbox
= rcvmbx
;
15190 tnapi
->prodmbox
= sndmbx
;
15193 tnapi
->coal_now
= HOSTCC_MODE_COAL_VEC1_NOW
<< (i
- 1);
15195 tnapi
->coal_now
= HOSTCC_MODE_NOW
;
15197 if (!tg3_flag(tp
, SUPPORT_MSIX
))
15201 * If we support MSIX, we'll be using RSS. If we're using
15202 * RSS, the first vector only handles link interrupts and the
15203 * remaining vectors handle rx and tx interrupts. Reuse the
15204 * mailbox values for the next iteration. The values we setup
15205 * above are still useful for the single vectored mode.
15220 pci_set_drvdata(pdev
, dev
);
15222 err
= register_netdev(dev
);
15224 dev_err(&pdev
->dev
, "Cannot register net device, aborting\n");
15225 goto err_out_apeunmap
;
15228 netdev_info(dev
, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
15229 tp
->board_part_number
,
15230 tp
->pci_chip_rev_id
,
15231 tg3_bus_string(tp
, str
),
15234 if (tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
) {
15235 struct phy_device
*phydev
;
15236 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
15238 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
15239 phydev
->drv
->name
, dev_name(&phydev
->dev
));
15243 if (tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)
15244 ethtype
= "10/100Base-TX";
15245 else if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)
15246 ethtype
= "1000Base-SX";
15248 ethtype
= "10/100/1000Base-T";
15250 netdev_info(dev
, "attached PHY is %s (%s Ethernet) "
15251 "(WireSpeed[%d], EEE[%d])\n",
15252 tg3_phy_string(tp
), ethtype
,
15253 (tp
->phy_flags
& TG3_PHYFLG_NO_ETH_WIRE_SPEED
) == 0,
15254 (tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
) != 0);
15257 netdev_info(dev
, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
15258 (dev
->features
& NETIF_F_RXCSUM
) != 0,
15259 tg3_flag(tp
, USE_LINKCHG_REG
) != 0,
15260 (tp
->phy_flags
& TG3_PHYFLG_USE_MI_INTERRUPT
) != 0,
15261 tg3_flag(tp
, ENABLE_ASF
) != 0,
15262 tg3_flag(tp
, TSO_CAPABLE
) != 0);
15263 netdev_info(dev
, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
15265 pdev
->dma_mask
== DMA_BIT_MASK(32) ? 32 :
15266 ((u64
)pdev
->dma_mask
) == DMA_BIT_MASK(40) ? 40 : 64);
15268 pci_save_state(pdev
);
15274 iounmap(tp
->aperegs
);
15275 tp
->aperegs
= NULL
;
15288 pci_release_regions(pdev
);
15290 err_out_disable_pdev
:
15291 pci_disable_device(pdev
);
15292 pci_set_drvdata(pdev
, NULL
);
15296 static void __devexit
tg3_remove_one(struct pci_dev
*pdev
)
15298 struct net_device
*dev
= pci_get_drvdata(pdev
);
15301 struct tg3
*tp
= netdev_priv(dev
);
15304 release_firmware(tp
->fw
);
15306 cancel_work_sync(&tp
->reset_task
);
15308 if (!tg3_flag(tp
, USE_PHYLIB
)) {
15313 unregister_netdev(dev
);
15315 iounmap(tp
->aperegs
);
15316 tp
->aperegs
= NULL
;
15323 pci_release_regions(pdev
);
15324 pci_disable_device(pdev
);
15325 pci_set_drvdata(pdev
, NULL
);
15329 #ifdef CONFIG_PM_SLEEP
15330 static int tg3_suspend(struct device
*device
)
15332 struct pci_dev
*pdev
= to_pci_dev(device
);
15333 struct net_device
*dev
= pci_get_drvdata(pdev
);
15334 struct tg3
*tp
= netdev_priv(dev
);
15337 if (!netif_running(dev
))
15340 flush_work_sync(&tp
->reset_task
);
15342 tg3_netif_stop(tp
);
15344 del_timer_sync(&tp
->timer
);
15346 tg3_full_lock(tp
, 1);
15347 tg3_disable_ints(tp
);
15348 tg3_full_unlock(tp
);
15350 netif_device_detach(dev
);
15352 tg3_full_lock(tp
, 0);
15353 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
15354 tg3_flag_clear(tp
, INIT_COMPLETE
);
15355 tg3_full_unlock(tp
);
15357 err
= tg3_power_down_prepare(tp
);
15361 tg3_full_lock(tp
, 0);
15363 tg3_flag_set(tp
, INIT_COMPLETE
);
15364 err2
= tg3_restart_hw(tp
, 1);
15368 tp
->timer
.expires
= jiffies
+ tp
->timer_offset
;
15369 add_timer(&tp
->timer
);
15371 netif_device_attach(dev
);
15372 tg3_netif_start(tp
);
15375 tg3_full_unlock(tp
);
15384 static int tg3_resume(struct device
*device
)
15386 struct pci_dev
*pdev
= to_pci_dev(device
);
15387 struct net_device
*dev
= pci_get_drvdata(pdev
);
15388 struct tg3
*tp
= netdev_priv(dev
);
15391 if (!netif_running(dev
))
15394 netif_device_attach(dev
);
15396 tg3_full_lock(tp
, 0);
15398 tg3_flag_set(tp
, INIT_COMPLETE
);
15399 err
= tg3_restart_hw(tp
, 1);
15403 tp
->timer
.expires
= jiffies
+ tp
->timer_offset
;
15404 add_timer(&tp
->timer
);
15406 tg3_netif_start(tp
);
15409 tg3_full_unlock(tp
);
15417 static SIMPLE_DEV_PM_OPS(tg3_pm_ops
, tg3_suspend
, tg3_resume
);
15418 #define TG3_PM_OPS (&tg3_pm_ops)
15422 #define TG3_PM_OPS NULL
15424 #endif /* CONFIG_PM_SLEEP */
15427 * tg3_io_error_detected - called when PCI error is detected
15428 * @pdev: Pointer to PCI device
15429 * @state: The current pci connection state
15431 * This function is called after a PCI bus error affecting
15432 * this device has been detected.
15434 static pci_ers_result_t
tg3_io_error_detected(struct pci_dev
*pdev
,
15435 pci_channel_state_t state
)
15437 struct net_device
*netdev
= pci_get_drvdata(pdev
);
15438 struct tg3
*tp
= netdev_priv(netdev
);
15439 pci_ers_result_t err
= PCI_ERS_RESULT_NEED_RESET
;
15441 netdev_info(netdev
, "PCI I/O error detected\n");
15445 if (!netif_running(netdev
))
15450 tg3_netif_stop(tp
);
15452 del_timer_sync(&tp
->timer
);
15453 tg3_flag_clear(tp
, RESTART_TIMER
);
15455 /* Want to make sure that the reset task doesn't run */
15456 cancel_work_sync(&tp
->reset_task
);
15457 tg3_flag_clear(tp
, TX_RECOVERY_PENDING
);
15458 tg3_flag_clear(tp
, RESTART_TIMER
);
15460 netif_device_detach(netdev
);
15462 /* Clean up software state, even if MMIO is blocked */
15463 tg3_full_lock(tp
, 0);
15464 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 0);
15465 tg3_full_unlock(tp
);
15468 if (state
== pci_channel_io_perm_failure
)
15469 err
= PCI_ERS_RESULT_DISCONNECT
;
15471 pci_disable_device(pdev
);
15479 * tg3_io_slot_reset - called after the pci bus has been reset.
15480 * @pdev: Pointer to PCI device
15482 * Restart the card from scratch, as if from a cold-boot.
15483 * At this point, the card has exprienced a hard reset,
15484 * followed by fixups by BIOS, and has its config space
15485 * set up identically to what it was at cold boot.
15487 static pci_ers_result_t
tg3_io_slot_reset(struct pci_dev
*pdev
)
15489 struct net_device
*netdev
= pci_get_drvdata(pdev
);
15490 struct tg3
*tp
= netdev_priv(netdev
);
15491 pci_ers_result_t rc
= PCI_ERS_RESULT_DISCONNECT
;
15496 if (pci_enable_device(pdev
)) {
15497 netdev_err(netdev
, "Cannot re-enable PCI device after reset.\n");
15501 pci_set_master(pdev
);
15502 pci_restore_state(pdev
);
15503 pci_save_state(pdev
);
15505 if (!netif_running(netdev
)) {
15506 rc
= PCI_ERS_RESULT_RECOVERED
;
15510 err
= tg3_power_up(tp
);
15512 netdev_err(netdev
, "Failed to restore register access.\n");
15516 rc
= PCI_ERS_RESULT_RECOVERED
;
15525 * tg3_io_resume - called when traffic can start flowing again.
15526 * @pdev: Pointer to PCI device
15528 * This callback is called when the error recovery driver tells
15529 * us that its OK to resume normal operation.
15531 static void tg3_io_resume(struct pci_dev
*pdev
)
15533 struct net_device
*netdev
= pci_get_drvdata(pdev
);
15534 struct tg3
*tp
= netdev_priv(netdev
);
15539 if (!netif_running(netdev
))
15542 tg3_full_lock(tp
, 0);
15543 tg3_flag_set(tp
, INIT_COMPLETE
);
15544 err
= tg3_restart_hw(tp
, 1);
15545 tg3_full_unlock(tp
);
15547 netdev_err(netdev
, "Cannot restart hardware after reset.\n");
15551 netif_device_attach(netdev
);
15553 tp
->timer
.expires
= jiffies
+ tp
->timer_offset
;
15554 add_timer(&tp
->timer
);
15556 tg3_netif_start(tp
);
15564 static struct pci_error_handlers tg3_err_handler
= {
15565 .error_detected
= tg3_io_error_detected
,
15566 .slot_reset
= tg3_io_slot_reset
,
15567 .resume
= tg3_io_resume
15570 static struct pci_driver tg3_driver
= {
15571 .name
= DRV_MODULE_NAME
,
15572 .id_table
= tg3_pci_tbl
,
15573 .probe
= tg3_init_one
,
15574 .remove
= __devexit_p(tg3_remove_one
),
15575 .err_handler
= &tg3_err_handler
,
15576 .driver
.pm
= TG3_PM_OPS
,
15579 static int __init
tg3_init(void)
15581 return pci_register_driver(&tg3_driver
);
15584 static void __exit
tg3_cleanup(void)
15586 pci_unregister_driver(&tg3_driver
);
15589 module_init(tg3_init
);
15590 module_exit(tg3_cleanup
);