2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2013 Broadcom Corporation.
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47 #include <linux/ssb/ssb_driver_gige.h>
48 #include <linux/hwmon.h>
49 #include <linux/hwmon-sysfs.h>
51 #include <net/checksum.h>
55 #include <asm/byteorder.h>
56 #include <linux/uaccess.h>
58 #include <uapi/linux/net_tstamp.h>
59 #include <linux/ptp_clock_kernel.h>
62 #include <asm/idprom.h>
71 /* Functions & macros to verify TG3_FLAGS types */
73 static inline int _tg3_flag(enum TG3_FLAGS flag
, unsigned long *bits
)
75 return test_bit(flag
, bits
);
78 static inline void _tg3_flag_set(enum TG3_FLAGS flag
, unsigned long *bits
)
83 static inline void _tg3_flag_clear(enum TG3_FLAGS flag
, unsigned long *bits
)
85 clear_bit(flag
, bits
);
88 #define tg3_flag(tp, flag) \
89 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
90 #define tg3_flag_set(tp, flag) \
91 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
92 #define tg3_flag_clear(tp, flag) \
93 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
95 #define DRV_MODULE_NAME "tg3"
97 #define TG3_MIN_NUM 135
98 #define DRV_MODULE_VERSION \
99 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
100 #define DRV_MODULE_RELDATE "Nov 14, 2013"
102 #define RESET_KIND_SHUTDOWN 0
103 #define RESET_KIND_INIT 1
104 #define RESET_KIND_SUSPEND 2
106 #define TG3_DEF_RX_MODE 0
107 #define TG3_DEF_TX_MODE 0
108 #define TG3_DEF_MSG_ENABLE \
118 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100
120 /* length of time before we decide the hardware is borked,
121 * and dev->tx_timeout() should be called to fix the problem
124 #define TG3_TX_TIMEOUT (5 * HZ)
126 /* hardware minimum and maximum for a single frame's data payload */
127 #define TG3_MIN_MTU 60
128 #define TG3_MAX_MTU(tp) \
129 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
131 /* These numbers seem to be hard coded in the NIC firmware somehow.
132 * You can't change the ring sizes, but you can change where you place
133 * them in the NIC onboard memory.
135 #define TG3_RX_STD_RING_SIZE(tp) \
136 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
137 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
138 #define TG3_DEF_RX_RING_PENDING 200
139 #define TG3_RX_JMB_RING_SIZE(tp) \
140 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
141 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
142 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
144 /* Do not place this n-ring entries value into the tp struct itself,
145 * we really want to expose these constants to GCC so that modulo et
146 * al. operations are done with shifts and masks instead of with
147 * hw multiply/modulo instructions. Another solution would be to
148 * replace things like '% foo' with '& (foo - 1)'.
151 #define TG3_TX_RING_SIZE 512
152 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
154 #define TG3_RX_STD_RING_BYTES(tp) \
155 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
156 #define TG3_RX_JMB_RING_BYTES(tp) \
157 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
158 #define TG3_RX_RCB_RING_BYTES(tp) \
159 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
160 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
162 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
164 #define TG3_DMA_BYTE_ENAB 64
166 #define TG3_RX_STD_DMA_SZ 1536
167 #define TG3_RX_JMB_DMA_SZ 9046
169 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
171 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
172 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
174 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
175 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
177 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
178 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
180 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
181 * that are at least dword aligned when used in PCIX mode. The driver
182 * works around this bug by double copying the packet. This workaround
183 * is built into the normal double copy length check for efficiency.
185 * However, the double copy is only necessary on those architectures
186 * where unaligned memory accesses are inefficient. For those architectures
187 * where unaligned memory accesses incur little penalty, we can reintegrate
188 * the 5701 in the normal rx path. Doing so saves a device structure
189 * dereference by hardcoding the double copy threshold in place.
191 #define TG3_RX_COPY_THRESHOLD 256
192 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
193 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
195 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
198 #if (NET_IP_ALIGN != 0)
199 #define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
201 #define TG3_RX_OFFSET(tp) (NET_SKB_PAD)
204 /* minimum number of free TX descriptors required to wake up TX process */
205 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
206 #define TG3_TX_BD_DMA_MAX_2K 2048
207 #define TG3_TX_BD_DMA_MAX_4K 4096
209 #define TG3_RAW_IP_ALIGN 2
211 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
212 #define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
214 #define FIRMWARE_TG3 "tigon/tg3.bin"
215 #define FIRMWARE_TG357766 "tigon/tg357766.bin"
216 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
217 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
219 static char version
[] =
220 DRV_MODULE_NAME
".c:v" DRV_MODULE_VERSION
" (" DRV_MODULE_RELDATE
")";
222 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
223 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
224 MODULE_LICENSE("GPL");
225 MODULE_VERSION(DRV_MODULE_VERSION
);
226 MODULE_FIRMWARE(FIRMWARE_TG3
);
227 MODULE_FIRMWARE(FIRMWARE_TG3TSO
);
228 MODULE_FIRMWARE(FIRMWARE_TG3TSO5
);
230 static int tg3_debug
= -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
231 module_param(tg3_debug
, int, 0);
232 MODULE_PARM_DESC(tg3_debug
, "Tigon3 bitmapped debugging message enable value");
234 #define TG3_DRV_DATA_FLAG_10_100_ONLY 0x0001
235 #define TG3_DRV_DATA_FLAG_5705_10_100 0x0002
237 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl
) = {
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5700
)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5701
)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702
)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5703
)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5704
)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702FE
)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705
)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705_2
)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705M
)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705M_2
)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702X
)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5703X
)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5704S
)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702A3
)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5703A3
)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5782
)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5788
)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5789
)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5901
),
257 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
|
258 TG3_DRV_DATA_FLAG_5705_10_100
},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5901_2
),
260 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
|
261 TG3_DRV_DATA_FLAG_5705_10_100
},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5704S_2
)},
263 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705F
),
264 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
|
265 TG3_DRV_DATA_FLAG_5705_10_100
},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5721
)},
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5722
)},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5750
)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5751
)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5751M
)},
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5751F
),
272 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5752
)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5752M
)},
275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5753
)},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5753M
)},
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5753F
),
278 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5754
)},
280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5754M
)},
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5755
)},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5755M
)},
283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5756
)},
284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5786
)},
285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5787
)},
286 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5787M
,
287 PCI_VENDOR_ID_LENOVO
,
288 TG3PCI_SUBDEVICE_ID_LENOVO_5787M
),
289 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
290 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5787M
)},
291 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5787F
),
292 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
293 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5714
)},
294 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5714S
)},
295 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5715
)},
296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5715S
)},
297 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5780
)},
298 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5780S
)},
299 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5781
)},
300 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5906
)},
301 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5906M
)},
302 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5784
)},
303 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5764
)},
304 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5723
)},
305 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5761
)},
306 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5761E
)},
307 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5761S
)},
308 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5761SE
)},
309 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5785_G
)},
310 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5785_F
)},
311 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57780
,
312 PCI_VENDOR_ID_AI
, TG3PCI_SUBDEVICE_ID_ACER_57780_A
),
313 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
314 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57780
,
315 PCI_VENDOR_ID_AI
, TG3PCI_SUBDEVICE_ID_ACER_57780_B
),
316 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
317 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57780
)},
318 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57760
)},
319 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57790
),
320 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
321 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57788
)},
322 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5717
)},
323 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5717_C
)},
324 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5718
)},
325 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57781
)},
326 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57785
)},
327 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57761
)},
328 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57765
)},
329 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57791
),
330 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
331 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57795
),
332 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
333 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5719
)},
334 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5720
)},
335 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57762
)},
336 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57766
)},
337 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5762
)},
338 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5725
)},
339 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5727
)},
340 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57764
)},
341 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57767
)},
342 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57787
)},
343 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57782
)},
344 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57786
)},
345 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT
, PCI_DEVICE_ID_SYSKONNECT_9DXX
)},
346 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT
, PCI_DEVICE_ID_SYSKONNECT_9MXX
)},
347 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC1000
)},
348 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC1001
)},
349 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC1003
)},
350 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC9100
)},
351 {PCI_DEVICE(PCI_VENDOR_ID_APPLE
, PCI_DEVICE_ID_APPLE_TIGON3
)},
352 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
356 MODULE_DEVICE_TABLE(pci
, tg3_pci_tbl
);
358 static const struct {
359 const char string
[ETH_GSTRING_LEN
];
360 } ethtool_stats_keys
[] = {
363 { "rx_ucast_packets" },
364 { "rx_mcast_packets" },
365 { "rx_bcast_packets" },
367 { "rx_align_errors" },
368 { "rx_xon_pause_rcvd" },
369 { "rx_xoff_pause_rcvd" },
370 { "rx_mac_ctrl_rcvd" },
371 { "rx_xoff_entered" },
372 { "rx_frame_too_long_errors" },
374 { "rx_undersize_packets" },
375 { "rx_in_length_errors" },
376 { "rx_out_length_errors" },
377 { "rx_64_or_less_octet_packets" },
378 { "rx_65_to_127_octet_packets" },
379 { "rx_128_to_255_octet_packets" },
380 { "rx_256_to_511_octet_packets" },
381 { "rx_512_to_1023_octet_packets" },
382 { "rx_1024_to_1522_octet_packets" },
383 { "rx_1523_to_2047_octet_packets" },
384 { "rx_2048_to_4095_octet_packets" },
385 { "rx_4096_to_8191_octet_packets" },
386 { "rx_8192_to_9022_octet_packets" },
393 { "tx_flow_control" },
395 { "tx_single_collisions" },
396 { "tx_mult_collisions" },
398 { "tx_excessive_collisions" },
399 { "tx_late_collisions" },
400 { "tx_collide_2times" },
401 { "tx_collide_3times" },
402 { "tx_collide_4times" },
403 { "tx_collide_5times" },
404 { "tx_collide_6times" },
405 { "tx_collide_7times" },
406 { "tx_collide_8times" },
407 { "tx_collide_9times" },
408 { "tx_collide_10times" },
409 { "tx_collide_11times" },
410 { "tx_collide_12times" },
411 { "tx_collide_13times" },
412 { "tx_collide_14times" },
413 { "tx_collide_15times" },
414 { "tx_ucast_packets" },
415 { "tx_mcast_packets" },
416 { "tx_bcast_packets" },
417 { "tx_carrier_sense_errors" },
421 { "dma_writeq_full" },
422 { "dma_write_prioq_full" },
426 { "rx_threshold_hit" },
428 { "dma_readq_full" },
429 { "dma_read_prioq_full" },
430 { "tx_comp_queue_full" },
432 { "ring_set_send_prod_index" },
433 { "ring_status_update" },
435 { "nic_avoided_irqs" },
436 { "nic_tx_threshold_hit" },
438 { "mbuf_lwm_thresh_hit" },
441 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
442 #define TG3_NVRAM_TEST 0
443 #define TG3_LINK_TEST 1
444 #define TG3_REGISTER_TEST 2
445 #define TG3_MEMORY_TEST 3
446 #define TG3_MAC_LOOPB_TEST 4
447 #define TG3_PHY_LOOPB_TEST 5
448 #define TG3_EXT_LOOPB_TEST 6
449 #define TG3_INTERRUPT_TEST 7
452 static const struct {
453 const char string
[ETH_GSTRING_LEN
];
454 } ethtool_test_keys
[] = {
455 [TG3_NVRAM_TEST
] = { "nvram test (online) " },
456 [TG3_LINK_TEST
] = { "link test (online) " },
457 [TG3_REGISTER_TEST
] = { "register test (offline)" },
458 [TG3_MEMORY_TEST
] = { "memory test (offline)" },
459 [TG3_MAC_LOOPB_TEST
] = { "mac loopback test (offline)" },
460 [TG3_PHY_LOOPB_TEST
] = { "phy loopback test (offline)" },
461 [TG3_EXT_LOOPB_TEST
] = { "ext loopback test (offline)" },
462 [TG3_INTERRUPT_TEST
] = { "interrupt test (offline)" },
465 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
468 static void tg3_write32(struct tg3
*tp
, u32 off
, u32 val
)
470 writel(val
, tp
->regs
+ off
);
473 static u32
tg3_read32(struct tg3
*tp
, u32 off
)
475 return readl(tp
->regs
+ off
);
478 static void tg3_ape_write32(struct tg3
*tp
, u32 off
, u32 val
)
480 writel(val
, tp
->aperegs
+ off
);
483 static u32
tg3_ape_read32(struct tg3
*tp
, u32 off
)
485 return readl(tp
->aperegs
+ off
);
488 static void tg3_write_indirect_reg32(struct tg3
*tp
, u32 off
, u32 val
)
492 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
493 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
);
494 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, val
);
495 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
498 static void tg3_write_flush_reg32(struct tg3
*tp
, u32 off
, u32 val
)
500 writel(val
, tp
->regs
+ off
);
501 readl(tp
->regs
+ off
);
504 static u32
tg3_read_indirect_reg32(struct tg3
*tp
, u32 off
)
509 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
510 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
);
511 pci_read_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, &val
);
512 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
516 static void tg3_write_indirect_mbox(struct tg3
*tp
, u32 off
, u32 val
)
520 if (off
== (MAILBOX_RCVRET_CON_IDX_0
+ TG3_64BIT_REG_LOW
)) {
521 pci_write_config_dword(tp
->pdev
, TG3PCI_RCV_RET_RING_CON_IDX
+
522 TG3_64BIT_REG_LOW
, val
);
525 if (off
== TG3_RX_STD_PROD_IDX_REG
) {
526 pci_write_config_dword(tp
->pdev
, TG3PCI_STD_RING_PROD_IDX
+
527 TG3_64BIT_REG_LOW
, val
);
531 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
532 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
+ 0x5600);
533 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, val
);
534 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
536 /* In indirect mode when disabling interrupts, we also need
537 * to clear the interrupt bit in the GRC local ctrl register.
539 if ((off
== (MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
)) &&
541 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_LOCAL_CTRL
,
542 tp
->grc_local_ctrl
|GRC_LCLCTRL_CLEARINT
);
546 static u32
tg3_read_indirect_mbox(struct tg3
*tp
, u32 off
)
551 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
552 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
+ 0x5600);
553 pci_read_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, &val
);
554 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
558 /* usec_wait specifies the wait time in usec when writing to certain registers
559 * where it is unsafe to read back the register without some delay.
560 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
561 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
563 static void _tw32_flush(struct tg3
*tp
, u32 off
, u32 val
, u32 usec_wait
)
565 if (tg3_flag(tp
, PCIX_TARGET_HWBUG
) || tg3_flag(tp
, ICH_WORKAROUND
))
566 /* Non-posted methods */
567 tp
->write32(tp
, off
, val
);
570 tg3_write32(tp
, off
, val
);
575 /* Wait again after the read for the posted method to guarantee that
576 * the wait time is met.
582 static inline void tw32_mailbox_flush(struct tg3
*tp
, u32 off
, u32 val
)
584 tp
->write32_mbox(tp
, off
, val
);
585 if (tg3_flag(tp
, FLUSH_POSTED_WRITES
) ||
586 (!tg3_flag(tp
, MBOX_WRITE_REORDER
) &&
587 !tg3_flag(tp
, ICH_WORKAROUND
)))
588 tp
->read32_mbox(tp
, off
);
591 static void tg3_write32_tx_mbox(struct tg3
*tp
, u32 off
, u32 val
)
593 void __iomem
*mbox
= tp
->regs
+ off
;
595 if (tg3_flag(tp
, TXD_MBOX_HWBUG
))
597 if (tg3_flag(tp
, MBOX_WRITE_REORDER
) ||
598 tg3_flag(tp
, FLUSH_POSTED_WRITES
))
602 static u32
tg3_read32_mbox_5906(struct tg3
*tp
, u32 off
)
604 return readl(tp
->regs
+ off
+ GRCMBOX_BASE
);
607 static void tg3_write32_mbox_5906(struct tg3
*tp
, u32 off
, u32 val
)
609 writel(val
, tp
->regs
+ off
+ GRCMBOX_BASE
);
612 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
613 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
614 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
615 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
616 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
618 #define tw32(reg, val) tp->write32(tp, reg, val)
619 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
620 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
621 #define tr32(reg) tp->read32(tp, reg)
623 static void tg3_write_mem(struct tg3
*tp
, u32 off
, u32 val
)
627 if (tg3_asic_rev(tp
) == ASIC_REV_5906
&&
628 (off
>= NIC_SRAM_STATS_BLK
) && (off
< NIC_SRAM_TX_BUFFER_DESC
))
631 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
632 if (tg3_flag(tp
, SRAM_USE_CONFIG
)) {
633 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, off
);
634 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_DATA
, val
);
636 /* Always leave this as zero. */
637 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, 0);
639 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, off
);
640 tw32_f(TG3PCI_MEM_WIN_DATA
, val
);
642 /* Always leave this as zero. */
643 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
645 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
648 static void tg3_read_mem(struct tg3
*tp
, u32 off
, u32
*val
)
652 if (tg3_asic_rev(tp
) == ASIC_REV_5906
&&
653 (off
>= NIC_SRAM_STATS_BLK
) && (off
< NIC_SRAM_TX_BUFFER_DESC
)) {
658 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
659 if (tg3_flag(tp
, SRAM_USE_CONFIG
)) {
660 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, off
);
661 pci_read_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_DATA
, val
);
663 /* Always leave this as zero. */
664 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, 0);
666 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, off
);
667 *val
= tr32(TG3PCI_MEM_WIN_DATA
);
669 /* Always leave this as zero. */
670 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
672 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
675 static void tg3_ape_lock_init(struct tg3
*tp
)
680 if (tg3_asic_rev(tp
) == ASIC_REV_5761
)
681 regbase
= TG3_APE_LOCK_GRANT
;
683 regbase
= TG3_APE_PER_LOCK_GRANT
;
685 /* Make sure the driver hasn't any stale locks. */
686 for (i
= TG3_APE_LOCK_PHY0
; i
<= TG3_APE_LOCK_GPIO
; i
++) {
688 case TG3_APE_LOCK_PHY0
:
689 case TG3_APE_LOCK_PHY1
:
690 case TG3_APE_LOCK_PHY2
:
691 case TG3_APE_LOCK_PHY3
:
692 bit
= APE_LOCK_GRANT_DRIVER
;
696 bit
= APE_LOCK_GRANT_DRIVER
;
698 bit
= 1 << tp
->pci_fn
;
700 tg3_ape_write32(tp
, regbase
+ 4 * i
, bit
);
705 static int tg3_ape_lock(struct tg3
*tp
, int locknum
)
709 u32 status
, req
, gnt
, bit
;
711 if (!tg3_flag(tp
, ENABLE_APE
))
715 case TG3_APE_LOCK_GPIO
:
716 if (tg3_asic_rev(tp
) == ASIC_REV_5761
)
718 case TG3_APE_LOCK_GRC
:
719 case TG3_APE_LOCK_MEM
:
721 bit
= APE_LOCK_REQ_DRIVER
;
723 bit
= 1 << tp
->pci_fn
;
725 case TG3_APE_LOCK_PHY0
:
726 case TG3_APE_LOCK_PHY1
:
727 case TG3_APE_LOCK_PHY2
:
728 case TG3_APE_LOCK_PHY3
:
729 bit
= APE_LOCK_REQ_DRIVER
;
735 if (tg3_asic_rev(tp
) == ASIC_REV_5761
) {
736 req
= TG3_APE_LOCK_REQ
;
737 gnt
= TG3_APE_LOCK_GRANT
;
739 req
= TG3_APE_PER_LOCK_REQ
;
740 gnt
= TG3_APE_PER_LOCK_GRANT
;
745 tg3_ape_write32(tp
, req
+ off
, bit
);
747 /* Wait for up to 1 millisecond to acquire lock. */
748 for (i
= 0; i
< 100; i
++) {
749 status
= tg3_ape_read32(tp
, gnt
+ off
);
752 if (pci_channel_offline(tp
->pdev
))
759 /* Revoke the lock request. */
760 tg3_ape_write32(tp
, gnt
+ off
, bit
);
767 static void tg3_ape_unlock(struct tg3
*tp
, int locknum
)
771 if (!tg3_flag(tp
, ENABLE_APE
))
775 case TG3_APE_LOCK_GPIO
:
776 if (tg3_asic_rev(tp
) == ASIC_REV_5761
)
778 case TG3_APE_LOCK_GRC
:
779 case TG3_APE_LOCK_MEM
:
781 bit
= APE_LOCK_GRANT_DRIVER
;
783 bit
= 1 << tp
->pci_fn
;
785 case TG3_APE_LOCK_PHY0
:
786 case TG3_APE_LOCK_PHY1
:
787 case TG3_APE_LOCK_PHY2
:
788 case TG3_APE_LOCK_PHY3
:
789 bit
= APE_LOCK_GRANT_DRIVER
;
795 if (tg3_asic_rev(tp
) == ASIC_REV_5761
)
796 gnt
= TG3_APE_LOCK_GRANT
;
798 gnt
= TG3_APE_PER_LOCK_GRANT
;
800 tg3_ape_write32(tp
, gnt
+ 4 * locknum
, bit
);
803 static int tg3_ape_event_lock(struct tg3
*tp
, u32 timeout_us
)
808 if (tg3_ape_lock(tp
, TG3_APE_LOCK_MEM
))
811 apedata
= tg3_ape_read32(tp
, TG3_APE_EVENT_STATUS
);
812 if (!(apedata
& APE_EVENT_STATUS_EVENT_PENDING
))
815 tg3_ape_unlock(tp
, TG3_APE_LOCK_MEM
);
818 timeout_us
-= (timeout_us
> 10) ? 10 : timeout_us
;
821 return timeout_us
? 0 : -EBUSY
;
824 static int tg3_ape_wait_for_event(struct tg3
*tp
, u32 timeout_us
)
828 for (i
= 0; i
< timeout_us
/ 10; i
++) {
829 apedata
= tg3_ape_read32(tp
, TG3_APE_EVENT_STATUS
);
831 if (!(apedata
& APE_EVENT_STATUS_EVENT_PENDING
))
837 return i
== timeout_us
/ 10;
840 static int tg3_ape_scratchpad_read(struct tg3
*tp
, u32
*data
, u32 base_off
,
844 u32 i
, bufoff
, msgoff
, maxlen
, apedata
;
846 if (!tg3_flag(tp
, APE_HAS_NCSI
))
849 apedata
= tg3_ape_read32(tp
, TG3_APE_SEG_SIG
);
850 if (apedata
!= APE_SEG_SIG_MAGIC
)
853 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_STATUS
);
854 if (!(apedata
& APE_FW_STATUS_READY
))
857 bufoff
= tg3_ape_read32(tp
, TG3_APE_SEG_MSG_BUF_OFF
) +
859 msgoff
= bufoff
+ 2 * sizeof(u32
);
860 maxlen
= tg3_ape_read32(tp
, TG3_APE_SEG_MSG_BUF_LEN
);
865 /* Cap xfer sizes to scratchpad limits. */
866 length
= (len
> maxlen
) ? maxlen
: len
;
869 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_STATUS
);
870 if (!(apedata
& APE_FW_STATUS_READY
))
873 /* Wait for up to 1 msec for APE to service previous event. */
874 err
= tg3_ape_event_lock(tp
, 1000);
878 apedata
= APE_EVENT_STATUS_DRIVER_EVNT
|
879 APE_EVENT_STATUS_SCRTCHPD_READ
|
880 APE_EVENT_STATUS_EVENT_PENDING
;
881 tg3_ape_write32(tp
, TG3_APE_EVENT_STATUS
, apedata
);
883 tg3_ape_write32(tp
, bufoff
, base_off
);
884 tg3_ape_write32(tp
, bufoff
+ sizeof(u32
), length
);
886 tg3_ape_unlock(tp
, TG3_APE_LOCK_MEM
);
887 tg3_ape_write32(tp
, TG3_APE_EVENT
, APE_EVENT_1
);
891 if (tg3_ape_wait_for_event(tp
, 30000))
894 for (i
= 0; length
; i
+= 4, length
-= 4) {
895 u32 val
= tg3_ape_read32(tp
, msgoff
+ i
);
896 memcpy(data
, &val
, sizeof(u32
));
904 static int tg3_ape_send_event(struct tg3
*tp
, u32 event
)
909 apedata
= tg3_ape_read32(tp
, TG3_APE_SEG_SIG
);
910 if (apedata
!= APE_SEG_SIG_MAGIC
)
913 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_STATUS
);
914 if (!(apedata
& APE_FW_STATUS_READY
))
917 /* Wait for up to 1 millisecond for APE to service previous event. */
918 err
= tg3_ape_event_lock(tp
, 1000);
922 tg3_ape_write32(tp
, TG3_APE_EVENT_STATUS
,
923 event
| APE_EVENT_STATUS_EVENT_PENDING
);
925 tg3_ape_unlock(tp
, TG3_APE_LOCK_MEM
);
926 tg3_ape_write32(tp
, TG3_APE_EVENT
, APE_EVENT_1
);
931 static void tg3_ape_driver_state_change(struct tg3
*tp
, int kind
)
936 if (!tg3_flag(tp
, ENABLE_APE
))
940 case RESET_KIND_INIT
:
941 tg3_ape_write32(tp
, TG3_APE_HOST_SEG_SIG
,
942 APE_HOST_SEG_SIG_MAGIC
);
943 tg3_ape_write32(tp
, TG3_APE_HOST_SEG_LEN
,
944 APE_HOST_SEG_LEN_MAGIC
);
945 apedata
= tg3_ape_read32(tp
, TG3_APE_HOST_INIT_COUNT
);
946 tg3_ape_write32(tp
, TG3_APE_HOST_INIT_COUNT
, ++apedata
);
947 tg3_ape_write32(tp
, TG3_APE_HOST_DRIVER_ID
,
948 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM
, TG3_MIN_NUM
));
949 tg3_ape_write32(tp
, TG3_APE_HOST_BEHAVIOR
,
950 APE_HOST_BEHAV_NO_PHYLOCK
);
951 tg3_ape_write32(tp
, TG3_APE_HOST_DRVR_STATE
,
952 TG3_APE_HOST_DRVR_STATE_START
);
954 event
= APE_EVENT_STATUS_STATE_START
;
956 case RESET_KIND_SHUTDOWN
:
957 /* With the interface we are currently using,
958 * APE does not track driver state. Wiping
959 * out the HOST SEGMENT SIGNATURE forces
960 * the APE to assume OS absent status.
962 tg3_ape_write32(tp
, TG3_APE_HOST_SEG_SIG
, 0x0);
964 if (device_may_wakeup(&tp
->pdev
->dev
) &&
965 tg3_flag(tp
, WOL_ENABLE
)) {
966 tg3_ape_write32(tp
, TG3_APE_HOST_WOL_SPEED
,
967 TG3_APE_HOST_WOL_SPEED_AUTO
);
968 apedata
= TG3_APE_HOST_DRVR_STATE_WOL
;
970 apedata
= TG3_APE_HOST_DRVR_STATE_UNLOAD
;
972 tg3_ape_write32(tp
, TG3_APE_HOST_DRVR_STATE
, apedata
);
974 event
= APE_EVENT_STATUS_STATE_UNLOAD
;
980 event
|= APE_EVENT_STATUS_DRIVER_EVNT
| APE_EVENT_STATUS_STATE_CHNGE
;
982 tg3_ape_send_event(tp
, event
);
985 static void tg3_disable_ints(struct tg3
*tp
)
989 tw32(TG3PCI_MISC_HOST_CTRL
,
990 (tp
->misc_host_ctrl
| MISC_HOST_CTRL_MASK_PCI_INT
));
991 for (i
= 0; i
< tp
->irq_max
; i
++)
992 tw32_mailbox_f(tp
->napi
[i
].int_mbox
, 0x00000001);
995 static void tg3_enable_ints(struct tg3
*tp
)
1002 tw32(TG3PCI_MISC_HOST_CTRL
,
1003 (tp
->misc_host_ctrl
& ~MISC_HOST_CTRL_MASK_PCI_INT
));
1005 tp
->coal_now
= tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
;
1006 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
1007 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
1009 tw32_mailbox_f(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
1010 if (tg3_flag(tp
, 1SHOT_MSI
))
1011 tw32_mailbox_f(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
1013 tp
->coal_now
|= tnapi
->coal_now
;
1016 /* Force an initial interrupt */
1017 if (!tg3_flag(tp
, TAGGED_STATUS
) &&
1018 (tp
->napi
[0].hw_status
->status
& SD_STATUS_UPDATED
))
1019 tw32(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
| GRC_LCLCTRL_SETINT
);
1021 tw32(HOSTCC_MODE
, tp
->coal_now
);
1023 tp
->coal_now
&= ~(tp
->napi
[0].coal_now
| tp
->napi
[1].coal_now
);
1026 static inline unsigned int tg3_has_work(struct tg3_napi
*tnapi
)
1028 struct tg3
*tp
= tnapi
->tp
;
1029 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
1030 unsigned int work_exists
= 0;
1032 /* check for phy events */
1033 if (!(tg3_flag(tp
, USE_LINKCHG_REG
) || tg3_flag(tp
, POLL_SERDES
))) {
1034 if (sblk
->status
& SD_STATUS_LINK_CHG
)
1038 /* check for TX work to do */
1039 if (sblk
->idx
[0].tx_consumer
!= tnapi
->tx_cons
)
1042 /* check for RX work to do */
1043 if (tnapi
->rx_rcb_prod_idx
&&
1044 *(tnapi
->rx_rcb_prod_idx
) != tnapi
->rx_rcb_ptr
)
1051 * similar to tg3_enable_ints, but it accurately determines whether there
1052 * is new work pending and can return without flushing the PIO write
1053 * which reenables interrupts
1055 static void tg3_int_reenable(struct tg3_napi
*tnapi
)
1057 struct tg3
*tp
= tnapi
->tp
;
1059 tw32_mailbox(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
1062 /* When doing tagged status, this work check is unnecessary.
1063 * The last_tag we write above tells the chip which piece of
1064 * work we've completed.
1066 if (!tg3_flag(tp
, TAGGED_STATUS
) && tg3_has_work(tnapi
))
1067 tw32(HOSTCC_MODE
, tp
->coalesce_mode
|
1068 HOSTCC_MODE_ENABLE
| tnapi
->coal_now
);
1071 static void tg3_switch_clocks(struct tg3
*tp
)
1074 u32 orig_clock_ctrl
;
1076 if (tg3_flag(tp
, CPMU_PRESENT
) || tg3_flag(tp
, 5780_CLASS
))
1079 clock_ctrl
= tr32(TG3PCI_CLOCK_CTRL
);
1081 orig_clock_ctrl
= clock_ctrl
;
1082 clock_ctrl
&= (CLOCK_CTRL_FORCE_CLKRUN
|
1083 CLOCK_CTRL_CLKRUN_OENABLE
|
1085 tp
->pci_clock_ctrl
= clock_ctrl
;
1087 if (tg3_flag(tp
, 5705_PLUS
)) {
1088 if (orig_clock_ctrl
& CLOCK_CTRL_625_CORE
) {
1089 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
1090 clock_ctrl
| CLOCK_CTRL_625_CORE
, 40);
1092 } else if ((orig_clock_ctrl
& CLOCK_CTRL_44MHZ_CORE
) != 0) {
1093 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
1095 (CLOCK_CTRL_44MHZ_CORE
| CLOCK_CTRL_ALTCLK
),
1097 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
1098 clock_ctrl
| (CLOCK_CTRL_ALTCLK
),
1101 tw32_wait_f(TG3PCI_CLOCK_CTRL
, clock_ctrl
, 40);
1104 #define PHY_BUSY_LOOPS 5000
1106 static int __tg3_readphy(struct tg3
*tp
, unsigned int phy_addr
, int reg
,
1113 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
1115 (tp
->mi_mode
& ~MAC_MI_MODE_AUTO_POLL
));
1119 tg3_ape_lock(tp
, tp
->phy_ape_lock
);
1123 frame_val
= ((phy_addr
<< MI_COM_PHY_ADDR_SHIFT
) &
1124 MI_COM_PHY_ADDR_MASK
);
1125 frame_val
|= ((reg
<< MI_COM_REG_ADDR_SHIFT
) &
1126 MI_COM_REG_ADDR_MASK
);
1127 frame_val
|= (MI_COM_CMD_READ
| MI_COM_START
);
1129 tw32_f(MAC_MI_COM
, frame_val
);
1131 loops
= PHY_BUSY_LOOPS
;
1132 while (loops
!= 0) {
1134 frame_val
= tr32(MAC_MI_COM
);
1136 if ((frame_val
& MI_COM_BUSY
) == 0) {
1138 frame_val
= tr32(MAC_MI_COM
);
1146 *val
= frame_val
& MI_COM_DATA_MASK
;
1150 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
1151 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
1155 tg3_ape_unlock(tp
, tp
->phy_ape_lock
);
1160 static int tg3_readphy(struct tg3
*tp
, int reg
, u32
*val
)
1162 return __tg3_readphy(tp
, tp
->phy_addr
, reg
, val
);
1165 static int __tg3_writephy(struct tg3
*tp
, unsigned int phy_addr
, int reg
,
1172 if ((tp
->phy_flags
& TG3_PHYFLG_IS_FET
) &&
1173 (reg
== MII_CTRL1000
|| reg
== MII_TG3_AUX_CTRL
))
1176 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
1178 (tp
->mi_mode
& ~MAC_MI_MODE_AUTO_POLL
));
1182 tg3_ape_lock(tp
, tp
->phy_ape_lock
);
1184 frame_val
= ((phy_addr
<< MI_COM_PHY_ADDR_SHIFT
) &
1185 MI_COM_PHY_ADDR_MASK
);
1186 frame_val
|= ((reg
<< MI_COM_REG_ADDR_SHIFT
) &
1187 MI_COM_REG_ADDR_MASK
);
1188 frame_val
|= (val
& MI_COM_DATA_MASK
);
1189 frame_val
|= (MI_COM_CMD_WRITE
| MI_COM_START
);
1191 tw32_f(MAC_MI_COM
, frame_val
);
1193 loops
= PHY_BUSY_LOOPS
;
1194 while (loops
!= 0) {
1196 frame_val
= tr32(MAC_MI_COM
);
1197 if ((frame_val
& MI_COM_BUSY
) == 0) {
1199 frame_val
= tr32(MAC_MI_COM
);
1209 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
1210 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
1214 tg3_ape_unlock(tp
, tp
->phy_ape_lock
);
1219 static int tg3_writephy(struct tg3
*tp
, int reg
, u32 val
)
1221 return __tg3_writephy(tp
, tp
->phy_addr
, reg
, val
);
1224 static int tg3_phy_cl45_write(struct tg3
*tp
, u32 devad
, u32 addr
, u32 val
)
1228 err
= tg3_writephy(tp
, MII_TG3_MMD_CTRL
, devad
);
1232 err
= tg3_writephy(tp
, MII_TG3_MMD_ADDRESS
, addr
);
1236 err
= tg3_writephy(tp
, MII_TG3_MMD_CTRL
,
1237 MII_TG3_MMD_CTRL_DATA_NOINC
| devad
);
1241 err
= tg3_writephy(tp
, MII_TG3_MMD_ADDRESS
, val
);
1247 static int tg3_phy_cl45_read(struct tg3
*tp
, u32 devad
, u32 addr
, u32
*val
)
1251 err
= tg3_writephy(tp
, MII_TG3_MMD_CTRL
, devad
);
1255 err
= tg3_writephy(tp
, MII_TG3_MMD_ADDRESS
, addr
);
1259 err
= tg3_writephy(tp
, MII_TG3_MMD_CTRL
,
1260 MII_TG3_MMD_CTRL_DATA_NOINC
| devad
);
1264 err
= tg3_readphy(tp
, MII_TG3_MMD_ADDRESS
, val
);
1270 static int tg3_phydsp_read(struct tg3
*tp
, u32 reg
, u32
*val
)
1274 err
= tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, reg
);
1276 err
= tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, val
);
1281 static int tg3_phydsp_write(struct tg3
*tp
, u32 reg
, u32 val
)
1285 err
= tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, reg
);
1287 err
= tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, val
);
1292 static int tg3_phy_auxctl_read(struct tg3
*tp
, int reg
, u32
*val
)
1296 err
= tg3_writephy(tp
, MII_TG3_AUX_CTRL
,
1297 (reg
<< MII_TG3_AUXCTL_MISC_RDSEL_SHIFT
) |
1298 MII_TG3_AUXCTL_SHDWSEL_MISC
);
1300 err
= tg3_readphy(tp
, MII_TG3_AUX_CTRL
, val
);
1305 static int tg3_phy_auxctl_write(struct tg3
*tp
, int reg
, u32 set
)
1307 if (reg
== MII_TG3_AUXCTL_SHDWSEL_MISC
)
1308 set
|= MII_TG3_AUXCTL_MISC_WREN
;
1310 return tg3_writephy(tp
, MII_TG3_AUX_CTRL
, set
| reg
);
1313 static int tg3_phy_toggle_auxctl_smdsp(struct tg3
*tp
, bool enable
)
1318 err
= tg3_phy_auxctl_read(tp
, MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, &val
);
1324 val
|= MII_TG3_AUXCTL_ACTL_SMDSP_ENA
;
1326 val
&= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA
;
1328 err
= tg3_phy_auxctl_write((tp
), MII_TG3_AUXCTL_SHDWSEL_AUXCTL
,
1329 val
| MII_TG3_AUXCTL_ACTL_TX_6DB
);
1334 static int tg3_phy_shdw_write(struct tg3
*tp
, int reg
, u32 val
)
1336 return tg3_writephy(tp
, MII_TG3_MISC_SHDW
,
1337 reg
| val
| MII_TG3_MISC_SHDW_WREN
);
1340 static int tg3_bmcr_reset(struct tg3
*tp
)
1345 /* OK, reset it, and poll the BMCR_RESET bit until it
1346 * clears or we time out.
1348 phy_control
= BMCR_RESET
;
1349 err
= tg3_writephy(tp
, MII_BMCR
, phy_control
);
1355 err
= tg3_readphy(tp
, MII_BMCR
, &phy_control
);
1359 if ((phy_control
& BMCR_RESET
) == 0) {
1371 static int tg3_mdio_read(struct mii_bus
*bp
, int mii_id
, int reg
)
1373 struct tg3
*tp
= bp
->priv
;
1376 spin_lock_bh(&tp
->lock
);
1378 if (__tg3_readphy(tp
, mii_id
, reg
, &val
))
1381 spin_unlock_bh(&tp
->lock
);
1386 static int tg3_mdio_write(struct mii_bus
*bp
, int mii_id
, int reg
, u16 val
)
1388 struct tg3
*tp
= bp
->priv
;
1391 spin_lock_bh(&tp
->lock
);
1393 if (__tg3_writephy(tp
, mii_id
, reg
, val
))
1396 spin_unlock_bh(&tp
->lock
);
1401 static int tg3_mdio_reset(struct mii_bus
*bp
)
1406 static void tg3_mdio_config_5785(struct tg3
*tp
)
1409 struct phy_device
*phydev
;
1411 phydev
= tp
->mdio_bus
->phy_map
[tp
->phy_addr
];
1412 switch (phydev
->drv
->phy_id
& phydev
->drv
->phy_id_mask
) {
1413 case PHY_ID_BCM50610
:
1414 case PHY_ID_BCM50610M
:
1415 val
= MAC_PHYCFG2_50610_LED_MODES
;
1417 case PHY_ID_BCMAC131
:
1418 val
= MAC_PHYCFG2_AC131_LED_MODES
;
1420 case PHY_ID_RTL8211C
:
1421 val
= MAC_PHYCFG2_RTL8211C_LED_MODES
;
1423 case PHY_ID_RTL8201E
:
1424 val
= MAC_PHYCFG2_RTL8201E_LED_MODES
;
1430 if (phydev
->interface
!= PHY_INTERFACE_MODE_RGMII
) {
1431 tw32(MAC_PHYCFG2
, val
);
1433 val
= tr32(MAC_PHYCFG1
);
1434 val
&= ~(MAC_PHYCFG1_RGMII_INT
|
1435 MAC_PHYCFG1_RXCLK_TO_MASK
| MAC_PHYCFG1_TXCLK_TO_MASK
);
1436 val
|= MAC_PHYCFG1_RXCLK_TIMEOUT
| MAC_PHYCFG1_TXCLK_TIMEOUT
;
1437 tw32(MAC_PHYCFG1
, val
);
1442 if (!tg3_flag(tp
, RGMII_INBAND_DISABLE
))
1443 val
|= MAC_PHYCFG2_EMODE_MASK_MASK
|
1444 MAC_PHYCFG2_FMODE_MASK_MASK
|
1445 MAC_PHYCFG2_GMODE_MASK_MASK
|
1446 MAC_PHYCFG2_ACT_MASK_MASK
|
1447 MAC_PHYCFG2_QUAL_MASK_MASK
|
1448 MAC_PHYCFG2_INBAND_ENABLE
;
1450 tw32(MAC_PHYCFG2
, val
);
1452 val
= tr32(MAC_PHYCFG1
);
1453 val
&= ~(MAC_PHYCFG1_RXCLK_TO_MASK
| MAC_PHYCFG1_TXCLK_TO_MASK
|
1454 MAC_PHYCFG1_RGMII_EXT_RX_DEC
| MAC_PHYCFG1_RGMII_SND_STAT_EN
);
1455 if (!tg3_flag(tp
, RGMII_INBAND_DISABLE
)) {
1456 if (tg3_flag(tp
, RGMII_EXT_IBND_RX_EN
))
1457 val
|= MAC_PHYCFG1_RGMII_EXT_RX_DEC
;
1458 if (tg3_flag(tp
, RGMII_EXT_IBND_TX_EN
))
1459 val
|= MAC_PHYCFG1_RGMII_SND_STAT_EN
;
1461 val
|= MAC_PHYCFG1_RXCLK_TIMEOUT
| MAC_PHYCFG1_TXCLK_TIMEOUT
|
1462 MAC_PHYCFG1_RGMII_INT
| MAC_PHYCFG1_TXC_DRV
;
1463 tw32(MAC_PHYCFG1
, val
);
1465 val
= tr32(MAC_EXT_RGMII_MODE
);
1466 val
&= ~(MAC_RGMII_MODE_RX_INT_B
|
1467 MAC_RGMII_MODE_RX_QUALITY
|
1468 MAC_RGMII_MODE_RX_ACTIVITY
|
1469 MAC_RGMII_MODE_RX_ENG_DET
|
1470 MAC_RGMII_MODE_TX_ENABLE
|
1471 MAC_RGMII_MODE_TX_LOWPWR
|
1472 MAC_RGMII_MODE_TX_RESET
);
1473 if (!tg3_flag(tp
, RGMII_INBAND_DISABLE
)) {
1474 if (tg3_flag(tp
, RGMII_EXT_IBND_RX_EN
))
1475 val
|= MAC_RGMII_MODE_RX_INT_B
|
1476 MAC_RGMII_MODE_RX_QUALITY
|
1477 MAC_RGMII_MODE_RX_ACTIVITY
|
1478 MAC_RGMII_MODE_RX_ENG_DET
;
1479 if (tg3_flag(tp
, RGMII_EXT_IBND_TX_EN
))
1480 val
|= MAC_RGMII_MODE_TX_ENABLE
|
1481 MAC_RGMII_MODE_TX_LOWPWR
|
1482 MAC_RGMII_MODE_TX_RESET
;
1484 tw32(MAC_EXT_RGMII_MODE
, val
);
1487 static void tg3_mdio_start(struct tg3
*tp
)
1489 tp
->mi_mode
&= ~MAC_MI_MODE_AUTO_POLL
;
1490 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
1493 if (tg3_flag(tp
, MDIOBUS_INITED
) &&
1494 tg3_asic_rev(tp
) == ASIC_REV_5785
)
1495 tg3_mdio_config_5785(tp
);
1498 static int tg3_mdio_init(struct tg3
*tp
)
1502 struct phy_device
*phydev
;
1504 if (tg3_flag(tp
, 5717_PLUS
)) {
1507 tp
->phy_addr
= tp
->pci_fn
+ 1;
1509 if (tg3_chip_rev_id(tp
) != CHIPREV_ID_5717_A0
)
1510 is_serdes
= tr32(SG_DIG_STATUS
) & SG_DIG_IS_SERDES
;
1512 is_serdes
= tr32(TG3_CPMU_PHY_STRAP
) &
1513 TG3_CPMU_PHY_STRAP_IS_SERDES
;
1516 } else if (tg3_flag(tp
, IS_SSB_CORE
) && tg3_flag(tp
, ROBOSWITCH
)) {
1519 addr
= ssb_gige_get_phyaddr(tp
->pdev
);
1522 tp
->phy_addr
= addr
;
1524 tp
->phy_addr
= TG3_PHY_MII_ADDR
;
1528 if (!tg3_flag(tp
, USE_PHYLIB
) || tg3_flag(tp
, MDIOBUS_INITED
))
1531 tp
->mdio_bus
= mdiobus_alloc();
1532 if (tp
->mdio_bus
== NULL
)
1535 tp
->mdio_bus
->name
= "tg3 mdio bus";
1536 snprintf(tp
->mdio_bus
->id
, MII_BUS_ID_SIZE
, "%x",
1537 (tp
->pdev
->bus
->number
<< 8) | tp
->pdev
->devfn
);
1538 tp
->mdio_bus
->priv
= tp
;
1539 tp
->mdio_bus
->parent
= &tp
->pdev
->dev
;
1540 tp
->mdio_bus
->read
= &tg3_mdio_read
;
1541 tp
->mdio_bus
->write
= &tg3_mdio_write
;
1542 tp
->mdio_bus
->reset
= &tg3_mdio_reset
;
1543 tp
->mdio_bus
->phy_mask
= ~(1 << tp
->phy_addr
);
1544 tp
->mdio_bus
->irq
= &tp
->mdio_irq
[0];
1546 for (i
= 0; i
< PHY_MAX_ADDR
; i
++)
1547 tp
->mdio_bus
->irq
[i
] = PHY_POLL
;
1549 /* The bus registration will look for all the PHYs on the mdio bus.
1550 * Unfortunately, it does not ensure the PHY is powered up before
1551 * accessing the PHY ID registers. A chip reset is the
1552 * quickest way to bring the device back to an operational state..
1554 if (tg3_readphy(tp
, MII_BMCR
, ®
) || (reg
& BMCR_PDOWN
))
1557 i
= mdiobus_register(tp
->mdio_bus
);
1559 dev_warn(&tp
->pdev
->dev
, "mdiobus_reg failed (0x%x)\n", i
);
1560 mdiobus_free(tp
->mdio_bus
);
1564 phydev
= tp
->mdio_bus
->phy_map
[tp
->phy_addr
];
1566 if (!phydev
|| !phydev
->drv
) {
1567 dev_warn(&tp
->pdev
->dev
, "No PHY devices\n");
1568 mdiobus_unregister(tp
->mdio_bus
);
1569 mdiobus_free(tp
->mdio_bus
);
1573 switch (phydev
->drv
->phy_id
& phydev
->drv
->phy_id_mask
) {
1574 case PHY_ID_BCM57780
:
1575 phydev
->interface
= PHY_INTERFACE_MODE_GMII
;
1576 phydev
->dev_flags
|= PHY_BRCM_AUTO_PWRDWN_ENABLE
;
1578 case PHY_ID_BCM50610
:
1579 case PHY_ID_BCM50610M
:
1580 phydev
->dev_flags
|= PHY_BRCM_CLEAR_RGMII_MODE
|
1581 PHY_BRCM_RX_REFCLK_UNUSED
|
1582 PHY_BRCM_DIS_TXCRXC_NOENRGY
|
1583 PHY_BRCM_AUTO_PWRDWN_ENABLE
;
1584 if (tg3_flag(tp
, RGMII_INBAND_DISABLE
))
1585 phydev
->dev_flags
|= PHY_BRCM_STD_IBND_DISABLE
;
1586 if (tg3_flag(tp
, RGMII_EXT_IBND_RX_EN
))
1587 phydev
->dev_flags
|= PHY_BRCM_EXT_IBND_RX_ENABLE
;
1588 if (tg3_flag(tp
, RGMII_EXT_IBND_TX_EN
))
1589 phydev
->dev_flags
|= PHY_BRCM_EXT_IBND_TX_ENABLE
;
1591 case PHY_ID_RTL8211C
:
1592 phydev
->interface
= PHY_INTERFACE_MODE_RGMII
;
1594 case PHY_ID_RTL8201E
:
1595 case PHY_ID_BCMAC131
:
1596 phydev
->interface
= PHY_INTERFACE_MODE_MII
;
1597 phydev
->dev_flags
|= PHY_BRCM_AUTO_PWRDWN_ENABLE
;
1598 tp
->phy_flags
|= TG3_PHYFLG_IS_FET
;
1602 tg3_flag_set(tp
, MDIOBUS_INITED
);
1604 if (tg3_asic_rev(tp
) == ASIC_REV_5785
)
1605 tg3_mdio_config_5785(tp
);
1610 static void tg3_mdio_fini(struct tg3
*tp
)
1612 if (tg3_flag(tp
, MDIOBUS_INITED
)) {
1613 tg3_flag_clear(tp
, MDIOBUS_INITED
);
1614 mdiobus_unregister(tp
->mdio_bus
);
1615 mdiobus_free(tp
->mdio_bus
);
1619 /* tp->lock is held. */
1620 static inline void tg3_generate_fw_event(struct tg3
*tp
)
1624 val
= tr32(GRC_RX_CPU_EVENT
);
1625 val
|= GRC_RX_CPU_DRIVER_EVENT
;
1626 tw32_f(GRC_RX_CPU_EVENT
, val
);
1628 tp
->last_event_jiffies
= jiffies
;
1631 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1633 /* tp->lock is held. */
1634 static void tg3_wait_for_event_ack(struct tg3
*tp
)
1637 unsigned int delay_cnt
;
1640 /* If enough time has passed, no wait is necessary. */
1641 time_remain
= (long)(tp
->last_event_jiffies
+ 1 +
1642 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC
)) -
1644 if (time_remain
< 0)
1647 /* Check if we can shorten the wait time. */
1648 delay_cnt
= jiffies_to_usecs(time_remain
);
1649 if (delay_cnt
> TG3_FW_EVENT_TIMEOUT_USEC
)
1650 delay_cnt
= TG3_FW_EVENT_TIMEOUT_USEC
;
1651 delay_cnt
= (delay_cnt
>> 3) + 1;
1653 for (i
= 0; i
< delay_cnt
; i
++) {
1654 if (!(tr32(GRC_RX_CPU_EVENT
) & GRC_RX_CPU_DRIVER_EVENT
))
1656 if (pci_channel_offline(tp
->pdev
))
1663 /* tp->lock is held. */
1664 static void tg3_phy_gather_ump_data(struct tg3
*tp
, u32
*data
)
1669 if (!tg3_readphy(tp
, MII_BMCR
, ®
))
1671 if (!tg3_readphy(tp
, MII_BMSR
, ®
))
1672 val
|= (reg
& 0xffff);
1676 if (!tg3_readphy(tp
, MII_ADVERTISE
, ®
))
1678 if (!tg3_readphy(tp
, MII_LPA
, ®
))
1679 val
|= (reg
& 0xffff);
1683 if (!(tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)) {
1684 if (!tg3_readphy(tp
, MII_CTRL1000
, ®
))
1686 if (!tg3_readphy(tp
, MII_STAT1000
, ®
))
1687 val
|= (reg
& 0xffff);
1691 if (!tg3_readphy(tp
, MII_PHYADDR
, ®
))
1698 /* tp->lock is held. */
1699 static void tg3_ump_link_report(struct tg3
*tp
)
1703 if (!tg3_flag(tp
, 5780_CLASS
) || !tg3_flag(tp
, ENABLE_ASF
))
1706 tg3_phy_gather_ump_data(tp
, data
);
1708 tg3_wait_for_event_ack(tp
);
1710 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_MBOX
, FWCMD_NICDRV_LINK_UPDATE
);
1711 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_LEN_MBOX
, 14);
1712 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
+ 0x0, data
[0]);
1713 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
+ 0x4, data
[1]);
1714 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
+ 0x8, data
[2]);
1715 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
+ 0xc, data
[3]);
1717 tg3_generate_fw_event(tp
);
1720 /* tp->lock is held. */
1721 static void tg3_stop_fw(struct tg3
*tp
)
1723 if (tg3_flag(tp
, ENABLE_ASF
) && !tg3_flag(tp
, ENABLE_APE
)) {
1724 /* Wait for RX cpu to ACK the previous event. */
1725 tg3_wait_for_event_ack(tp
);
1727 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_MBOX
, FWCMD_NICDRV_PAUSE_FW
);
1729 tg3_generate_fw_event(tp
);
1731 /* Wait for RX cpu to ACK this event. */
1732 tg3_wait_for_event_ack(tp
);
1736 /* tp->lock is held. */
1737 static void tg3_write_sig_pre_reset(struct tg3
*tp
, int kind
)
1739 tg3_write_mem(tp
, NIC_SRAM_FIRMWARE_MBOX
,
1740 NIC_SRAM_FIRMWARE_MBOX_MAGIC1
);
1742 if (tg3_flag(tp
, ASF_NEW_HANDSHAKE
)) {
1744 case RESET_KIND_INIT
:
1745 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1749 case RESET_KIND_SHUTDOWN
:
1750 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1754 case RESET_KIND_SUSPEND
:
1755 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1765 /* tp->lock is held. */
1766 static void tg3_write_sig_post_reset(struct tg3
*tp
, int kind
)
1768 if (tg3_flag(tp
, ASF_NEW_HANDSHAKE
)) {
1770 case RESET_KIND_INIT
:
1771 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1772 DRV_STATE_START_DONE
);
1775 case RESET_KIND_SHUTDOWN
:
1776 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1777 DRV_STATE_UNLOAD_DONE
);
1786 /* tp->lock is held. */
1787 static void tg3_write_sig_legacy(struct tg3
*tp
, int kind
)
1789 if (tg3_flag(tp
, ENABLE_ASF
)) {
1791 case RESET_KIND_INIT
:
1792 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1796 case RESET_KIND_SHUTDOWN
:
1797 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1801 case RESET_KIND_SUSPEND
:
1802 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1812 static int tg3_poll_fw(struct tg3
*tp
)
1817 if (tg3_flag(tp
, NO_FWARE_REPORTED
))
1820 if (tg3_flag(tp
, IS_SSB_CORE
)) {
1821 /* We don't use firmware. */
1825 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
1826 /* Wait up to 20ms for init done. */
1827 for (i
= 0; i
< 200; i
++) {
1828 if (tr32(VCPU_STATUS
) & VCPU_STATUS_INIT_DONE
)
1830 if (pci_channel_offline(tp
->pdev
))
1838 /* Wait for firmware initialization to complete. */
1839 for (i
= 0; i
< 100000; i
++) {
1840 tg3_read_mem(tp
, NIC_SRAM_FIRMWARE_MBOX
, &val
);
1841 if (val
== ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1
)
1843 if (pci_channel_offline(tp
->pdev
)) {
1844 if (!tg3_flag(tp
, NO_FWARE_REPORTED
)) {
1845 tg3_flag_set(tp
, NO_FWARE_REPORTED
);
1846 netdev_info(tp
->dev
, "No firmware running\n");
1855 /* Chip might not be fitted with firmware. Some Sun onboard
1856 * parts are configured like that. So don't signal the timeout
1857 * of the above loop as an error, but do report the lack of
1858 * running firmware once.
1860 if (i
>= 100000 && !tg3_flag(tp
, NO_FWARE_REPORTED
)) {
1861 tg3_flag_set(tp
, NO_FWARE_REPORTED
);
1863 netdev_info(tp
->dev
, "No firmware running\n");
1866 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_57765_A0
) {
1867 /* The 57765 A0 needs a little more
1868 * time to do some important work.
1876 static void tg3_link_report(struct tg3
*tp
)
1878 if (!netif_carrier_ok(tp
->dev
)) {
1879 netif_info(tp
, link
, tp
->dev
, "Link is down\n");
1880 tg3_ump_link_report(tp
);
1881 } else if (netif_msg_link(tp
)) {
1882 netdev_info(tp
->dev
, "Link is up at %d Mbps, %s duplex\n",
1883 (tp
->link_config
.active_speed
== SPEED_1000
?
1885 (tp
->link_config
.active_speed
== SPEED_100
?
1887 (tp
->link_config
.active_duplex
== DUPLEX_FULL
?
1890 netdev_info(tp
->dev
, "Flow control is %s for TX and %s for RX\n",
1891 (tp
->link_config
.active_flowctrl
& FLOW_CTRL_TX
) ?
1893 (tp
->link_config
.active_flowctrl
& FLOW_CTRL_RX
) ?
1896 if (tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
)
1897 netdev_info(tp
->dev
, "EEE is %s\n",
1898 tp
->setlpicnt
? "enabled" : "disabled");
1900 tg3_ump_link_report(tp
);
1903 tp
->link_up
= netif_carrier_ok(tp
->dev
);
1906 static u32
tg3_decode_flowctrl_1000T(u32 adv
)
1910 if (adv
& ADVERTISE_PAUSE_CAP
) {
1911 flowctrl
|= FLOW_CTRL_RX
;
1912 if (!(adv
& ADVERTISE_PAUSE_ASYM
))
1913 flowctrl
|= FLOW_CTRL_TX
;
1914 } else if (adv
& ADVERTISE_PAUSE_ASYM
)
1915 flowctrl
|= FLOW_CTRL_TX
;
1920 static u16
tg3_advert_flowctrl_1000X(u8 flow_ctrl
)
1924 if ((flow_ctrl
& FLOW_CTRL_TX
) && (flow_ctrl
& FLOW_CTRL_RX
))
1925 miireg
= ADVERTISE_1000XPAUSE
;
1926 else if (flow_ctrl
& FLOW_CTRL_TX
)
1927 miireg
= ADVERTISE_1000XPSE_ASYM
;
1928 else if (flow_ctrl
& FLOW_CTRL_RX
)
1929 miireg
= ADVERTISE_1000XPAUSE
| ADVERTISE_1000XPSE_ASYM
;
1936 static u32
tg3_decode_flowctrl_1000X(u32 adv
)
1940 if (adv
& ADVERTISE_1000XPAUSE
) {
1941 flowctrl
|= FLOW_CTRL_RX
;
1942 if (!(adv
& ADVERTISE_1000XPSE_ASYM
))
1943 flowctrl
|= FLOW_CTRL_TX
;
1944 } else if (adv
& ADVERTISE_1000XPSE_ASYM
)
1945 flowctrl
|= FLOW_CTRL_TX
;
1950 static u8
tg3_resolve_flowctrl_1000X(u16 lcladv
, u16 rmtadv
)
1954 if (lcladv
& rmtadv
& ADVERTISE_1000XPAUSE
) {
1955 cap
= FLOW_CTRL_TX
| FLOW_CTRL_RX
;
1956 } else if (lcladv
& rmtadv
& ADVERTISE_1000XPSE_ASYM
) {
1957 if (lcladv
& ADVERTISE_1000XPAUSE
)
1959 if (rmtadv
& ADVERTISE_1000XPAUSE
)
1966 static void tg3_setup_flow_control(struct tg3
*tp
, u32 lcladv
, u32 rmtadv
)
1970 u32 old_rx_mode
= tp
->rx_mode
;
1971 u32 old_tx_mode
= tp
->tx_mode
;
1973 if (tg3_flag(tp
, USE_PHYLIB
))
1974 autoneg
= tp
->mdio_bus
->phy_map
[tp
->phy_addr
]->autoneg
;
1976 autoneg
= tp
->link_config
.autoneg
;
1978 if (autoneg
== AUTONEG_ENABLE
&& tg3_flag(tp
, PAUSE_AUTONEG
)) {
1979 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)
1980 flowctrl
= tg3_resolve_flowctrl_1000X(lcladv
, rmtadv
);
1982 flowctrl
= mii_resolve_flowctrl_fdx(lcladv
, rmtadv
);
1984 flowctrl
= tp
->link_config
.flowctrl
;
1986 tp
->link_config
.active_flowctrl
= flowctrl
;
1988 if (flowctrl
& FLOW_CTRL_RX
)
1989 tp
->rx_mode
|= RX_MODE_FLOW_CTRL_ENABLE
;
1991 tp
->rx_mode
&= ~RX_MODE_FLOW_CTRL_ENABLE
;
1993 if (old_rx_mode
!= tp
->rx_mode
)
1994 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
1996 if (flowctrl
& FLOW_CTRL_TX
)
1997 tp
->tx_mode
|= TX_MODE_FLOW_CTRL_ENABLE
;
1999 tp
->tx_mode
&= ~TX_MODE_FLOW_CTRL_ENABLE
;
2001 if (old_tx_mode
!= tp
->tx_mode
)
2002 tw32_f(MAC_TX_MODE
, tp
->tx_mode
);
2005 static void tg3_adjust_link(struct net_device
*dev
)
2007 u8 oldflowctrl
, linkmesg
= 0;
2008 u32 mac_mode
, lcl_adv
, rmt_adv
;
2009 struct tg3
*tp
= netdev_priv(dev
);
2010 struct phy_device
*phydev
= tp
->mdio_bus
->phy_map
[tp
->phy_addr
];
2012 spin_lock_bh(&tp
->lock
);
2014 mac_mode
= tp
->mac_mode
& ~(MAC_MODE_PORT_MODE_MASK
|
2015 MAC_MODE_HALF_DUPLEX
);
2017 oldflowctrl
= tp
->link_config
.active_flowctrl
;
2023 if (phydev
->speed
== SPEED_100
|| phydev
->speed
== SPEED_10
)
2024 mac_mode
|= MAC_MODE_PORT_MODE_MII
;
2025 else if (phydev
->speed
== SPEED_1000
||
2026 tg3_asic_rev(tp
) != ASIC_REV_5785
)
2027 mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
2029 mac_mode
|= MAC_MODE_PORT_MODE_MII
;
2031 if (phydev
->duplex
== DUPLEX_HALF
)
2032 mac_mode
|= MAC_MODE_HALF_DUPLEX
;
2034 lcl_adv
= mii_advertise_flowctrl(
2035 tp
->link_config
.flowctrl
);
2038 rmt_adv
= LPA_PAUSE_CAP
;
2039 if (phydev
->asym_pause
)
2040 rmt_adv
|= LPA_PAUSE_ASYM
;
2043 tg3_setup_flow_control(tp
, lcl_adv
, rmt_adv
);
2045 mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
2047 if (mac_mode
!= tp
->mac_mode
) {
2048 tp
->mac_mode
= mac_mode
;
2049 tw32_f(MAC_MODE
, tp
->mac_mode
);
2053 if (tg3_asic_rev(tp
) == ASIC_REV_5785
) {
2054 if (phydev
->speed
== SPEED_10
)
2056 MAC_MI_STAT_10MBPS_MODE
|
2057 MAC_MI_STAT_LNKSTAT_ATTN_ENAB
);
2059 tw32(MAC_MI_STAT
, MAC_MI_STAT_LNKSTAT_ATTN_ENAB
);
2062 if (phydev
->speed
== SPEED_1000
&& phydev
->duplex
== DUPLEX_HALF
)
2063 tw32(MAC_TX_LENGTHS
,
2064 ((2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
2065 (6 << TX_LENGTHS_IPG_SHIFT
) |
2066 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT
)));
2068 tw32(MAC_TX_LENGTHS
,
2069 ((2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
2070 (6 << TX_LENGTHS_IPG_SHIFT
) |
2071 (32 << TX_LENGTHS_SLOT_TIME_SHIFT
)));
2073 if (phydev
->link
!= tp
->old_link
||
2074 phydev
->speed
!= tp
->link_config
.active_speed
||
2075 phydev
->duplex
!= tp
->link_config
.active_duplex
||
2076 oldflowctrl
!= tp
->link_config
.active_flowctrl
)
2079 tp
->old_link
= phydev
->link
;
2080 tp
->link_config
.active_speed
= phydev
->speed
;
2081 tp
->link_config
.active_duplex
= phydev
->duplex
;
2083 spin_unlock_bh(&tp
->lock
);
2086 tg3_link_report(tp
);
2089 static int tg3_phy_init(struct tg3
*tp
)
2091 struct phy_device
*phydev
;
2093 if (tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
)
2096 /* Bring the PHY back to a known state. */
2099 phydev
= tp
->mdio_bus
->phy_map
[tp
->phy_addr
];
2101 /* Attach the MAC to the PHY. */
2102 phydev
= phy_connect(tp
->dev
, dev_name(&phydev
->dev
),
2103 tg3_adjust_link
, phydev
->interface
);
2104 if (IS_ERR(phydev
)) {
2105 dev_err(&tp
->pdev
->dev
, "Could not attach to PHY\n");
2106 return PTR_ERR(phydev
);
2109 /* Mask with MAC supported features. */
2110 switch (phydev
->interface
) {
2111 case PHY_INTERFACE_MODE_GMII
:
2112 case PHY_INTERFACE_MODE_RGMII
:
2113 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
2114 phydev
->supported
&= (PHY_GBIT_FEATURES
|
2116 SUPPORTED_Asym_Pause
);
2120 case PHY_INTERFACE_MODE_MII
:
2121 phydev
->supported
&= (PHY_BASIC_FEATURES
|
2123 SUPPORTED_Asym_Pause
);
2126 phy_disconnect(tp
->mdio_bus
->phy_map
[tp
->phy_addr
]);
2130 tp
->phy_flags
|= TG3_PHYFLG_IS_CONNECTED
;
2132 phydev
->advertising
= phydev
->supported
;
2137 static void tg3_phy_start(struct tg3
*tp
)
2139 struct phy_device
*phydev
;
2141 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
2144 phydev
= tp
->mdio_bus
->phy_map
[tp
->phy_addr
];
2146 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
) {
2147 tp
->phy_flags
&= ~TG3_PHYFLG_IS_LOW_POWER
;
2148 phydev
->speed
= tp
->link_config
.speed
;
2149 phydev
->duplex
= tp
->link_config
.duplex
;
2150 phydev
->autoneg
= tp
->link_config
.autoneg
;
2151 phydev
->advertising
= tp
->link_config
.advertising
;
2156 phy_start_aneg(phydev
);
2159 static void tg3_phy_stop(struct tg3
*tp
)
2161 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
2164 phy_stop(tp
->mdio_bus
->phy_map
[tp
->phy_addr
]);
2167 static void tg3_phy_fini(struct tg3
*tp
)
2169 if (tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
) {
2170 phy_disconnect(tp
->mdio_bus
->phy_map
[tp
->phy_addr
]);
2171 tp
->phy_flags
&= ~TG3_PHYFLG_IS_CONNECTED
;
2175 static int tg3_phy_set_extloopbk(struct tg3
*tp
)
2180 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
)
2183 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5401
) {
2184 /* Cannot do read-modify-write on 5401 */
2185 err
= tg3_phy_auxctl_write(tp
,
2186 MII_TG3_AUXCTL_SHDWSEL_AUXCTL
,
2187 MII_TG3_AUXCTL_ACTL_EXTLOOPBK
|
2192 err
= tg3_phy_auxctl_read(tp
,
2193 MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, &val
);
2197 val
|= MII_TG3_AUXCTL_ACTL_EXTLOOPBK
;
2198 err
= tg3_phy_auxctl_write(tp
,
2199 MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, val
);
2205 static void tg3_phy_fet_toggle_apd(struct tg3
*tp
, bool enable
)
2209 if (!tg3_readphy(tp
, MII_TG3_FET_TEST
, &phytest
)) {
2212 tg3_writephy(tp
, MII_TG3_FET_TEST
,
2213 phytest
| MII_TG3_FET_SHADOW_EN
);
2214 if (!tg3_readphy(tp
, MII_TG3_FET_SHDW_AUXSTAT2
, &phy
)) {
2216 phy
|= MII_TG3_FET_SHDW_AUXSTAT2_APD
;
2218 phy
&= ~MII_TG3_FET_SHDW_AUXSTAT2_APD
;
2219 tg3_writephy(tp
, MII_TG3_FET_SHDW_AUXSTAT2
, phy
);
2221 tg3_writephy(tp
, MII_TG3_FET_TEST
, phytest
);
2225 static void tg3_phy_toggle_apd(struct tg3
*tp
, bool enable
)
2229 if (!tg3_flag(tp
, 5705_PLUS
) ||
2230 (tg3_flag(tp
, 5717_PLUS
) &&
2231 (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)))
2234 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
2235 tg3_phy_fet_toggle_apd(tp
, enable
);
2239 reg
= MII_TG3_MISC_SHDW_SCR5_LPED
|
2240 MII_TG3_MISC_SHDW_SCR5_DLPTLM
|
2241 MII_TG3_MISC_SHDW_SCR5_SDTL
|
2242 MII_TG3_MISC_SHDW_SCR5_C125OE
;
2243 if (tg3_asic_rev(tp
) != ASIC_REV_5784
|| !enable
)
2244 reg
|= MII_TG3_MISC_SHDW_SCR5_DLLAPD
;
2246 tg3_phy_shdw_write(tp
, MII_TG3_MISC_SHDW_SCR5_SEL
, reg
);
2249 reg
= MII_TG3_MISC_SHDW_APD_WKTM_84MS
;
2251 reg
|= MII_TG3_MISC_SHDW_APD_ENABLE
;
2253 tg3_phy_shdw_write(tp
, MII_TG3_MISC_SHDW_APD_SEL
, reg
);
2256 static void tg3_phy_toggle_automdix(struct tg3
*tp
, bool enable
)
2260 if (!tg3_flag(tp
, 5705_PLUS
) ||
2261 (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
2264 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
2267 if (!tg3_readphy(tp
, MII_TG3_FET_TEST
, &ephy
)) {
2268 u32 reg
= MII_TG3_FET_SHDW_MISCCTRL
;
2270 tg3_writephy(tp
, MII_TG3_FET_TEST
,
2271 ephy
| MII_TG3_FET_SHADOW_EN
);
2272 if (!tg3_readphy(tp
, reg
, &phy
)) {
2274 phy
|= MII_TG3_FET_SHDW_MISCCTRL_MDIX
;
2276 phy
&= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX
;
2277 tg3_writephy(tp
, reg
, phy
);
2279 tg3_writephy(tp
, MII_TG3_FET_TEST
, ephy
);
2284 ret
= tg3_phy_auxctl_read(tp
,
2285 MII_TG3_AUXCTL_SHDWSEL_MISC
, &phy
);
2288 phy
|= MII_TG3_AUXCTL_MISC_FORCE_AMDIX
;
2290 phy
&= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX
;
2291 tg3_phy_auxctl_write(tp
,
2292 MII_TG3_AUXCTL_SHDWSEL_MISC
, phy
);
2297 static void tg3_phy_set_wirespeed(struct tg3
*tp
)
2302 if (tp
->phy_flags
& TG3_PHYFLG_NO_ETH_WIRE_SPEED
)
2305 ret
= tg3_phy_auxctl_read(tp
, MII_TG3_AUXCTL_SHDWSEL_MISC
, &val
);
2307 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_MISC
,
2308 val
| MII_TG3_AUXCTL_MISC_WIRESPD_EN
);
2311 static void tg3_phy_apply_otp(struct tg3
*tp
)
2320 if (tg3_phy_toggle_auxctl_smdsp(tp
, true))
2323 phy
= ((otp
& TG3_OTP_AGCTGT_MASK
) >> TG3_OTP_AGCTGT_SHIFT
);
2324 phy
|= MII_TG3_DSP_TAP1_AGCTGT_DFLT
;
2325 tg3_phydsp_write(tp
, MII_TG3_DSP_TAP1
, phy
);
2327 phy
= ((otp
& TG3_OTP_HPFFLTR_MASK
) >> TG3_OTP_HPFFLTR_SHIFT
) |
2328 ((otp
& TG3_OTP_HPFOVER_MASK
) >> TG3_OTP_HPFOVER_SHIFT
);
2329 tg3_phydsp_write(tp
, MII_TG3_DSP_AADJ1CH0
, phy
);
2331 phy
= ((otp
& TG3_OTP_LPFDIS_MASK
) >> TG3_OTP_LPFDIS_SHIFT
);
2332 phy
|= MII_TG3_DSP_AADJ1CH3_ADCCKADJ
;
2333 tg3_phydsp_write(tp
, MII_TG3_DSP_AADJ1CH3
, phy
);
2335 phy
= ((otp
& TG3_OTP_VDAC_MASK
) >> TG3_OTP_VDAC_SHIFT
);
2336 tg3_phydsp_write(tp
, MII_TG3_DSP_EXP75
, phy
);
2338 phy
= ((otp
& TG3_OTP_10BTAMP_MASK
) >> TG3_OTP_10BTAMP_SHIFT
);
2339 tg3_phydsp_write(tp
, MII_TG3_DSP_EXP96
, phy
);
2341 phy
= ((otp
& TG3_OTP_ROFF_MASK
) >> TG3_OTP_ROFF_SHIFT
) |
2342 ((otp
& TG3_OTP_RCOFF_MASK
) >> TG3_OTP_RCOFF_SHIFT
);
2343 tg3_phydsp_write(tp
, MII_TG3_DSP_EXP97
, phy
);
2345 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2348 static void tg3_eee_pull_config(struct tg3
*tp
, struct ethtool_eee
*eee
)
2351 struct ethtool_eee
*dest
= &tp
->eee
;
2353 if (!(tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
))
2359 if (tg3_phy_cl45_read(tp
, MDIO_MMD_AN
, TG3_CL45_D7_EEERES_STAT
, &val
))
2362 /* Pull eee_active */
2363 if (val
== TG3_CL45_D7_EEERES_STAT_LP_1000T
||
2364 val
== TG3_CL45_D7_EEERES_STAT_LP_100TX
) {
2365 dest
->eee_active
= 1;
2367 dest
->eee_active
= 0;
2369 /* Pull lp advertised settings */
2370 if (tg3_phy_cl45_read(tp
, MDIO_MMD_AN
, MDIO_AN_EEE_LPABLE
, &val
))
2372 dest
->lp_advertised
= mmd_eee_adv_to_ethtool_adv_t(val
);
2374 /* Pull advertised and eee_enabled settings */
2375 if (tg3_phy_cl45_read(tp
, MDIO_MMD_AN
, MDIO_AN_EEE_ADV
, &val
))
2377 dest
->eee_enabled
= !!val
;
2378 dest
->advertised
= mmd_eee_adv_to_ethtool_adv_t(val
);
2380 /* Pull tx_lpi_enabled */
2381 val
= tr32(TG3_CPMU_EEE_MODE
);
2382 dest
->tx_lpi_enabled
= !!(val
& TG3_CPMU_EEEMD_LPI_IN_TX
);
2384 /* Pull lpi timer value */
2385 dest
->tx_lpi_timer
= tr32(TG3_CPMU_EEE_DBTMR1
) & 0xffff;
2388 static void tg3_phy_eee_adjust(struct tg3
*tp
, bool current_link_up
)
2392 if (!(tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
))
2397 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
&&
2399 tp
->link_config
.active_duplex
== DUPLEX_FULL
&&
2400 (tp
->link_config
.active_speed
== SPEED_100
||
2401 tp
->link_config
.active_speed
== SPEED_1000
)) {
2404 if (tp
->link_config
.active_speed
== SPEED_1000
)
2405 eeectl
= TG3_CPMU_EEE_CTRL_EXIT_16_5_US
;
2407 eeectl
= TG3_CPMU_EEE_CTRL_EXIT_36_US
;
2409 tw32(TG3_CPMU_EEE_CTRL
, eeectl
);
2411 tg3_eee_pull_config(tp
, NULL
);
2412 if (tp
->eee
.eee_active
)
2416 if (!tp
->setlpicnt
) {
2417 if (current_link_up
&&
2418 !tg3_phy_toggle_auxctl_smdsp(tp
, true)) {
2419 tg3_phydsp_write(tp
, MII_TG3_DSP_TAP26
, 0x0000);
2420 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2423 val
= tr32(TG3_CPMU_EEE_MODE
);
2424 tw32(TG3_CPMU_EEE_MODE
, val
& ~TG3_CPMU_EEEMD_LPI_ENABLE
);
2428 static void tg3_phy_eee_enable(struct tg3
*tp
)
2432 if (tp
->link_config
.active_speed
== SPEED_1000
&&
2433 (tg3_asic_rev(tp
) == ASIC_REV_5717
||
2434 tg3_asic_rev(tp
) == ASIC_REV_5719
||
2435 tg3_flag(tp
, 57765_CLASS
)) &&
2436 !tg3_phy_toggle_auxctl_smdsp(tp
, true)) {
2437 val
= MII_TG3_DSP_TAP26_ALNOKO
|
2438 MII_TG3_DSP_TAP26_RMRXSTO
;
2439 tg3_phydsp_write(tp
, MII_TG3_DSP_TAP26
, val
);
2440 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2443 val
= tr32(TG3_CPMU_EEE_MODE
);
2444 tw32(TG3_CPMU_EEE_MODE
, val
| TG3_CPMU_EEEMD_LPI_ENABLE
);
2447 static int tg3_wait_macro_done(struct tg3
*tp
)
2454 if (!tg3_readphy(tp
, MII_TG3_DSP_CONTROL
, &tmp32
)) {
2455 if ((tmp32
& 0x1000) == 0)
2465 static int tg3_phy_write_and_check_testpat(struct tg3
*tp
, int *resetp
)
2467 static const u32 test_pat
[4][6] = {
2468 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2469 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2470 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2471 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2475 for (chan
= 0; chan
< 4; chan
++) {
2478 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
2479 (chan
* 0x2000) | 0x0200);
2480 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0002);
2482 for (i
= 0; i
< 6; i
++)
2483 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
,
2486 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0202);
2487 if (tg3_wait_macro_done(tp
)) {
2492 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
2493 (chan
* 0x2000) | 0x0200);
2494 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0082);
2495 if (tg3_wait_macro_done(tp
)) {
2500 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0802);
2501 if (tg3_wait_macro_done(tp
)) {
2506 for (i
= 0; i
< 6; i
+= 2) {
2509 if (tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &low
) ||
2510 tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &high
) ||
2511 tg3_wait_macro_done(tp
)) {
2517 if (low
!= test_pat
[chan
][i
] ||
2518 high
!= test_pat
[chan
][i
+1]) {
2519 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x000b);
2520 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x4001);
2521 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x4005);
2531 static int tg3_phy_reset_chanpat(struct tg3
*tp
)
2535 for (chan
= 0; chan
< 4; chan
++) {
2538 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
2539 (chan
* 0x2000) | 0x0200);
2540 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0002);
2541 for (i
= 0; i
< 6; i
++)
2542 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x000);
2543 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0202);
2544 if (tg3_wait_macro_done(tp
))
2551 static int tg3_phy_reset_5703_4_5(struct tg3
*tp
)
2553 u32 reg32
, phy9_orig
;
2554 int retries
, do_phy_reset
, err
;
2560 err
= tg3_bmcr_reset(tp
);
2566 /* Disable transmitter and interrupt. */
2567 if (tg3_readphy(tp
, MII_TG3_EXT_CTRL
, ®32
))
2571 tg3_writephy(tp
, MII_TG3_EXT_CTRL
, reg32
);
2573 /* Set full-duplex, 1000 mbps. */
2574 tg3_writephy(tp
, MII_BMCR
,
2575 BMCR_FULLDPLX
| BMCR_SPEED1000
);
2577 /* Set to master mode. */
2578 if (tg3_readphy(tp
, MII_CTRL1000
, &phy9_orig
))
2581 tg3_writephy(tp
, MII_CTRL1000
,
2582 CTL1000_AS_MASTER
| CTL1000_ENABLE_MASTER
);
2584 err
= tg3_phy_toggle_auxctl_smdsp(tp
, true);
2588 /* Block the PHY control access. */
2589 tg3_phydsp_write(tp
, 0x8005, 0x0800);
2591 err
= tg3_phy_write_and_check_testpat(tp
, &do_phy_reset
);
2594 } while (--retries
);
2596 err
= tg3_phy_reset_chanpat(tp
);
2600 tg3_phydsp_write(tp
, 0x8005, 0x0000);
2602 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x8200);
2603 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0000);
2605 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2607 tg3_writephy(tp
, MII_CTRL1000
, phy9_orig
);
2609 if (!tg3_readphy(tp
, MII_TG3_EXT_CTRL
, ®32
)) {
2611 tg3_writephy(tp
, MII_TG3_EXT_CTRL
, reg32
);
2618 static void tg3_carrier_off(struct tg3
*tp
)
2620 netif_carrier_off(tp
->dev
);
2621 tp
->link_up
= false;
2624 static void tg3_warn_mgmt_link_flap(struct tg3
*tp
)
2626 if (tg3_flag(tp
, ENABLE_ASF
))
2627 netdev_warn(tp
->dev
,
2628 "Management side-band traffic will be interrupted during phy settings change\n");
2631 /* This will reset the tigon3 PHY if there is no valid
2632 * link unless the FORCE argument is non-zero.
2634 static int tg3_phy_reset(struct tg3
*tp
)
2639 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
2640 val
= tr32(GRC_MISC_CFG
);
2641 tw32_f(GRC_MISC_CFG
, val
& ~GRC_MISC_CFG_EPHY_IDDQ
);
2644 err
= tg3_readphy(tp
, MII_BMSR
, &val
);
2645 err
|= tg3_readphy(tp
, MII_BMSR
, &val
);
2649 if (netif_running(tp
->dev
) && tp
->link_up
) {
2650 netif_carrier_off(tp
->dev
);
2651 tg3_link_report(tp
);
2654 if (tg3_asic_rev(tp
) == ASIC_REV_5703
||
2655 tg3_asic_rev(tp
) == ASIC_REV_5704
||
2656 tg3_asic_rev(tp
) == ASIC_REV_5705
) {
2657 err
= tg3_phy_reset_5703_4_5(tp
);
2664 if (tg3_asic_rev(tp
) == ASIC_REV_5784
&&
2665 tg3_chip_rev(tp
) != CHIPREV_5784_AX
) {
2666 cpmuctrl
= tr32(TG3_CPMU_CTRL
);
2667 if (cpmuctrl
& CPMU_CTRL_GPHY_10MB_RXONLY
)
2669 cpmuctrl
& ~CPMU_CTRL_GPHY_10MB_RXONLY
);
2672 err
= tg3_bmcr_reset(tp
);
2676 if (cpmuctrl
& CPMU_CTRL_GPHY_10MB_RXONLY
) {
2677 val
= MII_TG3_DSP_EXP8_AEDW
| MII_TG3_DSP_EXP8_REJ2MHz
;
2678 tg3_phydsp_write(tp
, MII_TG3_DSP_EXP8
, val
);
2680 tw32(TG3_CPMU_CTRL
, cpmuctrl
);
2683 if (tg3_chip_rev(tp
) == CHIPREV_5784_AX
||
2684 tg3_chip_rev(tp
) == CHIPREV_5761_AX
) {
2685 val
= tr32(TG3_CPMU_LSPD_1000MB_CLK
);
2686 if ((val
& CPMU_LSPD_1000MB_MACCLK_MASK
) ==
2687 CPMU_LSPD_1000MB_MACCLK_12_5
) {
2688 val
&= ~CPMU_LSPD_1000MB_MACCLK_MASK
;
2690 tw32_f(TG3_CPMU_LSPD_1000MB_CLK
, val
);
2694 if (tg3_flag(tp
, 5717_PLUS
) &&
2695 (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
))
2698 tg3_phy_apply_otp(tp
);
2700 if (tp
->phy_flags
& TG3_PHYFLG_ENABLE_APD
)
2701 tg3_phy_toggle_apd(tp
, true);
2703 tg3_phy_toggle_apd(tp
, false);
2706 if ((tp
->phy_flags
& TG3_PHYFLG_ADC_BUG
) &&
2707 !tg3_phy_toggle_auxctl_smdsp(tp
, true)) {
2708 tg3_phydsp_write(tp
, 0x201f, 0x2aaa);
2709 tg3_phydsp_write(tp
, 0x000a, 0x0323);
2710 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2713 if (tp
->phy_flags
& TG3_PHYFLG_5704_A0_BUG
) {
2714 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8d68);
2715 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8d68);
2718 if (tp
->phy_flags
& TG3_PHYFLG_BER_BUG
) {
2719 if (!tg3_phy_toggle_auxctl_smdsp(tp
, true)) {
2720 tg3_phydsp_write(tp
, 0x000a, 0x310b);
2721 tg3_phydsp_write(tp
, 0x201f, 0x9506);
2722 tg3_phydsp_write(tp
, 0x401f, 0x14e2);
2723 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2725 } else if (tp
->phy_flags
& TG3_PHYFLG_JITTER_BUG
) {
2726 if (!tg3_phy_toggle_auxctl_smdsp(tp
, true)) {
2727 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x000a);
2728 if (tp
->phy_flags
& TG3_PHYFLG_ADJUST_TRIM
) {
2729 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x110b);
2730 tg3_writephy(tp
, MII_TG3_TEST1
,
2731 MII_TG3_TEST1_TRIM_EN
| 0x4);
2733 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x010b);
2735 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2739 /* Set Extended packet length bit (bit 14) on all chips that */
2740 /* support jumbo frames */
2741 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5401
) {
2742 /* Cannot do read-modify-write on 5401 */
2743 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, 0x4c20);
2744 } else if (tg3_flag(tp
, JUMBO_CAPABLE
)) {
2745 /* Set bit 14 with read-modify-write to preserve other bits */
2746 err
= tg3_phy_auxctl_read(tp
,
2747 MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, &val
);
2749 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_AUXCTL
,
2750 val
| MII_TG3_AUXCTL_ACTL_EXTPKTLEN
);
2753 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2754 * jumbo frames transmission.
2756 if (tg3_flag(tp
, JUMBO_CAPABLE
)) {
2757 if (!tg3_readphy(tp
, MII_TG3_EXT_CTRL
, &val
))
2758 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
2759 val
| MII_TG3_EXT_CTRL_FIFO_ELASTIC
);
2762 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
2763 /* adjust output voltage */
2764 tg3_writephy(tp
, MII_TG3_FET_PTEST
, 0x12);
2767 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5762_A0
)
2768 tg3_phydsp_write(tp
, 0xffb, 0x4000);
2770 tg3_phy_toggle_automdix(tp
, true);
2771 tg3_phy_set_wirespeed(tp
);
2775 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2776 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2777 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2778 TG3_GPIO_MSG_NEED_VAUX)
2779 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2780 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2781 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2782 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2783 (TG3_GPIO_MSG_DRVR_PRES << 12))
2785 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2786 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2787 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2788 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2789 (TG3_GPIO_MSG_NEED_VAUX << 12))
2791 static inline u32
tg3_set_function_status(struct tg3
*tp
, u32 newstat
)
2795 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
2796 tg3_asic_rev(tp
) == ASIC_REV_5719
)
2797 status
= tg3_ape_read32(tp
, TG3_APE_GPIO_MSG
);
2799 status
= tr32(TG3_CPMU_DRV_STATUS
);
2801 shift
= TG3_APE_GPIO_MSG_SHIFT
+ 4 * tp
->pci_fn
;
2802 status
&= ~(TG3_GPIO_MSG_MASK
<< shift
);
2803 status
|= (newstat
<< shift
);
2805 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
2806 tg3_asic_rev(tp
) == ASIC_REV_5719
)
2807 tg3_ape_write32(tp
, TG3_APE_GPIO_MSG
, status
);
2809 tw32(TG3_CPMU_DRV_STATUS
, status
);
2811 return status
>> TG3_APE_GPIO_MSG_SHIFT
;
2814 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3
*tp
)
2816 if (!tg3_flag(tp
, IS_NIC
))
2819 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
2820 tg3_asic_rev(tp
) == ASIC_REV_5719
||
2821 tg3_asic_rev(tp
) == ASIC_REV_5720
) {
2822 if (tg3_ape_lock(tp
, TG3_APE_LOCK_GPIO
))
2825 tg3_set_function_status(tp
, TG3_GPIO_MSG_DRVR_PRES
);
2827 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
,
2828 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2830 tg3_ape_unlock(tp
, TG3_APE_LOCK_GPIO
);
2832 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
,
2833 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2839 static void tg3_pwrsrc_die_with_vmain(struct tg3
*tp
)
2843 if (!tg3_flag(tp
, IS_NIC
) ||
2844 tg3_asic_rev(tp
) == ASIC_REV_5700
||
2845 tg3_asic_rev(tp
) == ASIC_REV_5701
)
2848 grc_local_ctrl
= tp
->grc_local_ctrl
| GRC_LCLCTRL_GPIO_OE1
;
2850 tw32_wait_f(GRC_LOCAL_CTRL
,
2851 grc_local_ctrl
| GRC_LCLCTRL_GPIO_OUTPUT1
,
2852 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2854 tw32_wait_f(GRC_LOCAL_CTRL
,
2856 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2858 tw32_wait_f(GRC_LOCAL_CTRL
,
2859 grc_local_ctrl
| GRC_LCLCTRL_GPIO_OUTPUT1
,
2860 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2863 static void tg3_pwrsrc_switch_to_vaux(struct tg3
*tp
)
2865 if (!tg3_flag(tp
, IS_NIC
))
2868 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
2869 tg3_asic_rev(tp
) == ASIC_REV_5701
) {
2870 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
2871 (GRC_LCLCTRL_GPIO_OE0
|
2872 GRC_LCLCTRL_GPIO_OE1
|
2873 GRC_LCLCTRL_GPIO_OE2
|
2874 GRC_LCLCTRL_GPIO_OUTPUT0
|
2875 GRC_LCLCTRL_GPIO_OUTPUT1
),
2876 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2877 } else if (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5761
||
2878 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5761S
) {
2879 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2880 u32 grc_local_ctrl
= GRC_LCLCTRL_GPIO_OE0
|
2881 GRC_LCLCTRL_GPIO_OE1
|
2882 GRC_LCLCTRL_GPIO_OE2
|
2883 GRC_LCLCTRL_GPIO_OUTPUT0
|
2884 GRC_LCLCTRL_GPIO_OUTPUT1
|
2886 tw32_wait_f(GRC_LOCAL_CTRL
, grc_local_ctrl
,
2887 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2889 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OUTPUT2
;
2890 tw32_wait_f(GRC_LOCAL_CTRL
, grc_local_ctrl
,
2891 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2893 grc_local_ctrl
&= ~GRC_LCLCTRL_GPIO_OUTPUT0
;
2894 tw32_wait_f(GRC_LOCAL_CTRL
, grc_local_ctrl
,
2895 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2898 u32 grc_local_ctrl
= 0;
2900 /* Workaround to prevent overdrawing Amps. */
2901 if (tg3_asic_rev(tp
) == ASIC_REV_5714
) {
2902 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE3
;
2903 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
2905 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2908 /* On 5753 and variants, GPIO2 cannot be used. */
2909 no_gpio2
= tp
->nic_sram_data_cfg
&
2910 NIC_SRAM_DATA_CFG_NO_GPIO2
;
2912 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE0
|
2913 GRC_LCLCTRL_GPIO_OE1
|
2914 GRC_LCLCTRL_GPIO_OE2
|
2915 GRC_LCLCTRL_GPIO_OUTPUT1
|
2916 GRC_LCLCTRL_GPIO_OUTPUT2
;
2918 grc_local_ctrl
&= ~(GRC_LCLCTRL_GPIO_OE2
|
2919 GRC_LCLCTRL_GPIO_OUTPUT2
);
2921 tw32_wait_f(GRC_LOCAL_CTRL
,
2922 tp
->grc_local_ctrl
| grc_local_ctrl
,
2923 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2925 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OUTPUT0
;
2927 tw32_wait_f(GRC_LOCAL_CTRL
,
2928 tp
->grc_local_ctrl
| grc_local_ctrl
,
2929 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2932 grc_local_ctrl
&= ~GRC_LCLCTRL_GPIO_OUTPUT2
;
2933 tw32_wait_f(GRC_LOCAL_CTRL
,
2934 tp
->grc_local_ctrl
| grc_local_ctrl
,
2935 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2940 static void tg3_frob_aux_power_5717(struct tg3
*tp
, bool wol_enable
)
2944 /* Serialize power state transitions */
2945 if (tg3_ape_lock(tp
, TG3_APE_LOCK_GPIO
))
2948 if (tg3_flag(tp
, ENABLE_ASF
) || tg3_flag(tp
, ENABLE_APE
) || wol_enable
)
2949 msg
= TG3_GPIO_MSG_NEED_VAUX
;
2951 msg
= tg3_set_function_status(tp
, msg
);
2953 if (msg
& TG3_GPIO_MSG_ALL_DRVR_PRES_MASK
)
2956 if (msg
& TG3_GPIO_MSG_ALL_NEED_VAUX_MASK
)
2957 tg3_pwrsrc_switch_to_vaux(tp
);
2959 tg3_pwrsrc_die_with_vmain(tp
);
2962 tg3_ape_unlock(tp
, TG3_APE_LOCK_GPIO
);
2965 static void tg3_frob_aux_power(struct tg3
*tp
, bool include_wol
)
2967 bool need_vaux
= false;
2969 /* The GPIOs do something completely different on 57765. */
2970 if (!tg3_flag(tp
, IS_NIC
) || tg3_flag(tp
, 57765_CLASS
))
2973 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
2974 tg3_asic_rev(tp
) == ASIC_REV_5719
||
2975 tg3_asic_rev(tp
) == ASIC_REV_5720
) {
2976 tg3_frob_aux_power_5717(tp
, include_wol
?
2977 tg3_flag(tp
, WOL_ENABLE
) != 0 : 0);
2981 if (tp
->pdev_peer
&& tp
->pdev_peer
!= tp
->pdev
) {
2982 struct net_device
*dev_peer
;
2984 dev_peer
= pci_get_drvdata(tp
->pdev_peer
);
2986 /* remove_one() may have been run on the peer. */
2988 struct tg3
*tp_peer
= netdev_priv(dev_peer
);
2990 if (tg3_flag(tp_peer
, INIT_COMPLETE
))
2993 if ((include_wol
&& tg3_flag(tp_peer
, WOL_ENABLE
)) ||
2994 tg3_flag(tp_peer
, ENABLE_ASF
))
2999 if ((include_wol
&& tg3_flag(tp
, WOL_ENABLE
)) ||
3000 tg3_flag(tp
, ENABLE_ASF
))
3004 tg3_pwrsrc_switch_to_vaux(tp
);
3006 tg3_pwrsrc_die_with_vmain(tp
);
3009 static int tg3_5700_link_polarity(struct tg3
*tp
, u32 speed
)
3011 if (tp
->led_ctrl
== LED_CTRL_MODE_PHY_2
)
3013 else if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5411
) {
3014 if (speed
!= SPEED_10
)
3016 } else if (speed
== SPEED_10
)
3022 static bool tg3_phy_power_bug(struct tg3
*tp
)
3024 switch (tg3_asic_rev(tp
)) {
3029 if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)
3038 if ((tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
3047 static bool tg3_phy_led_bug(struct tg3
*tp
)
3049 switch (tg3_asic_rev(tp
)) {
3052 if ((tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) &&
3061 static void tg3_power_down_phy(struct tg3
*tp
, bool do_low_power
)
3065 if (tp
->phy_flags
& TG3_PHYFLG_KEEP_LINK_ON_PWRDN
)
3068 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
3069 if (tg3_asic_rev(tp
) == ASIC_REV_5704
) {
3070 u32 sg_dig_ctrl
= tr32(SG_DIG_CTRL
);
3071 u32 serdes_cfg
= tr32(MAC_SERDES_CFG
);
3074 SG_DIG_USING_HW_AUTONEG
| SG_DIG_SOFT_RESET
;
3075 tw32(SG_DIG_CTRL
, sg_dig_ctrl
);
3076 tw32(MAC_SERDES_CFG
, serdes_cfg
| (1 << 15));
3081 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
3083 val
= tr32(GRC_MISC_CFG
);
3084 tw32_f(GRC_MISC_CFG
, val
| GRC_MISC_CFG_EPHY_IDDQ
);
3087 } else if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
3089 if (!tg3_readphy(tp
, MII_TG3_FET_TEST
, &phytest
)) {
3092 tg3_writephy(tp
, MII_ADVERTISE
, 0);
3093 tg3_writephy(tp
, MII_BMCR
,
3094 BMCR_ANENABLE
| BMCR_ANRESTART
);
3096 tg3_writephy(tp
, MII_TG3_FET_TEST
,
3097 phytest
| MII_TG3_FET_SHADOW_EN
);
3098 if (!tg3_readphy(tp
, MII_TG3_FET_SHDW_AUXMODE4
, &phy
)) {
3099 phy
|= MII_TG3_FET_SHDW_AUXMODE4_SBPD
;
3101 MII_TG3_FET_SHDW_AUXMODE4
,
3104 tg3_writephy(tp
, MII_TG3_FET_TEST
, phytest
);
3107 } else if (do_low_power
) {
3108 if (!tg3_phy_led_bug(tp
))
3109 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
3110 MII_TG3_EXT_CTRL_FORCE_LED_OFF
);
3112 val
= MII_TG3_AUXCTL_PCTL_100TX_LPWR
|
3113 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE
|
3114 MII_TG3_AUXCTL_PCTL_VREG_11V
;
3115 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_PWRCTL
, val
);
3118 /* The PHY should not be powered down on some chips because
3121 if (tg3_phy_power_bug(tp
))
3124 if (tg3_chip_rev(tp
) == CHIPREV_5784_AX
||
3125 tg3_chip_rev(tp
) == CHIPREV_5761_AX
) {
3126 val
= tr32(TG3_CPMU_LSPD_1000MB_CLK
);
3127 val
&= ~CPMU_LSPD_1000MB_MACCLK_MASK
;
3128 val
|= CPMU_LSPD_1000MB_MACCLK_12_5
;
3129 tw32_f(TG3_CPMU_LSPD_1000MB_CLK
, val
);
3132 tg3_writephy(tp
, MII_BMCR
, BMCR_PDOWN
);
3135 /* tp->lock is held. */
3136 static int tg3_nvram_lock(struct tg3
*tp
)
3138 if (tg3_flag(tp
, NVRAM
)) {
3141 if (tp
->nvram_lock_cnt
== 0) {
3142 tw32(NVRAM_SWARB
, SWARB_REQ_SET1
);
3143 for (i
= 0; i
< 8000; i
++) {
3144 if (tr32(NVRAM_SWARB
) & SWARB_GNT1
)
3149 tw32(NVRAM_SWARB
, SWARB_REQ_CLR1
);
3153 tp
->nvram_lock_cnt
++;
3158 /* tp->lock is held. */
3159 static void tg3_nvram_unlock(struct tg3
*tp
)
3161 if (tg3_flag(tp
, NVRAM
)) {
3162 if (tp
->nvram_lock_cnt
> 0)
3163 tp
->nvram_lock_cnt
--;
3164 if (tp
->nvram_lock_cnt
== 0)
3165 tw32_f(NVRAM_SWARB
, SWARB_REQ_CLR1
);
3169 /* tp->lock is held. */
3170 static void tg3_enable_nvram_access(struct tg3
*tp
)
3172 if (tg3_flag(tp
, 5750_PLUS
) && !tg3_flag(tp
, PROTECTED_NVRAM
)) {
3173 u32 nvaccess
= tr32(NVRAM_ACCESS
);
3175 tw32(NVRAM_ACCESS
, nvaccess
| ACCESS_ENABLE
);
3179 /* tp->lock is held. */
3180 static void tg3_disable_nvram_access(struct tg3
*tp
)
3182 if (tg3_flag(tp
, 5750_PLUS
) && !tg3_flag(tp
, PROTECTED_NVRAM
)) {
3183 u32 nvaccess
= tr32(NVRAM_ACCESS
);
3185 tw32(NVRAM_ACCESS
, nvaccess
& ~ACCESS_ENABLE
);
3189 static int tg3_nvram_read_using_eeprom(struct tg3
*tp
,
3190 u32 offset
, u32
*val
)
3195 if (offset
> EEPROM_ADDR_ADDR_MASK
|| (offset
% 4) != 0)
3198 tmp
= tr32(GRC_EEPROM_ADDR
) & ~(EEPROM_ADDR_ADDR_MASK
|
3199 EEPROM_ADDR_DEVID_MASK
|
3201 tw32(GRC_EEPROM_ADDR
,
3203 (0 << EEPROM_ADDR_DEVID_SHIFT
) |
3204 ((offset
<< EEPROM_ADDR_ADDR_SHIFT
) &
3205 EEPROM_ADDR_ADDR_MASK
) |
3206 EEPROM_ADDR_READ
| EEPROM_ADDR_START
);
3208 for (i
= 0; i
< 1000; i
++) {
3209 tmp
= tr32(GRC_EEPROM_ADDR
);
3211 if (tmp
& EEPROM_ADDR_COMPLETE
)
3215 if (!(tmp
& EEPROM_ADDR_COMPLETE
))
3218 tmp
= tr32(GRC_EEPROM_DATA
);
3221 * The data will always be opposite the native endian
3222 * format. Perform a blind byteswap to compensate.
3229 #define NVRAM_CMD_TIMEOUT 10000
3231 static int tg3_nvram_exec_cmd(struct tg3
*tp
, u32 nvram_cmd
)
3235 tw32(NVRAM_CMD
, nvram_cmd
);
3236 for (i
= 0; i
< NVRAM_CMD_TIMEOUT
; i
++) {
3238 if (tr32(NVRAM_CMD
) & NVRAM_CMD_DONE
) {
3244 if (i
== NVRAM_CMD_TIMEOUT
)
3250 static u32
tg3_nvram_phys_addr(struct tg3
*tp
, u32 addr
)
3252 if (tg3_flag(tp
, NVRAM
) &&
3253 tg3_flag(tp
, NVRAM_BUFFERED
) &&
3254 tg3_flag(tp
, FLASH
) &&
3255 !tg3_flag(tp
, NO_NVRAM_ADDR_TRANS
) &&
3256 (tp
->nvram_jedecnum
== JEDEC_ATMEL
))
3258 addr
= ((addr
/ tp
->nvram_pagesize
) <<
3259 ATMEL_AT45DB0X1B_PAGE_POS
) +
3260 (addr
% tp
->nvram_pagesize
);
3265 static u32
tg3_nvram_logical_addr(struct tg3
*tp
, u32 addr
)
3267 if (tg3_flag(tp
, NVRAM
) &&
3268 tg3_flag(tp
, NVRAM_BUFFERED
) &&
3269 tg3_flag(tp
, FLASH
) &&
3270 !tg3_flag(tp
, NO_NVRAM_ADDR_TRANS
) &&
3271 (tp
->nvram_jedecnum
== JEDEC_ATMEL
))
3273 addr
= ((addr
>> ATMEL_AT45DB0X1B_PAGE_POS
) *
3274 tp
->nvram_pagesize
) +
3275 (addr
& ((1 << ATMEL_AT45DB0X1B_PAGE_POS
) - 1));
3280 /* NOTE: Data read in from NVRAM is byteswapped according to
3281 * the byteswapping settings for all other register accesses.
3282 * tg3 devices are BE devices, so on a BE machine, the data
3283 * returned will be exactly as it is seen in NVRAM. On a LE
3284 * machine, the 32-bit value will be byteswapped.
3286 static int tg3_nvram_read(struct tg3
*tp
, u32 offset
, u32
*val
)
3290 if (!tg3_flag(tp
, NVRAM
))
3291 return tg3_nvram_read_using_eeprom(tp
, offset
, val
);
3293 offset
= tg3_nvram_phys_addr(tp
, offset
);
3295 if (offset
> NVRAM_ADDR_MSK
)
3298 ret
= tg3_nvram_lock(tp
);
3302 tg3_enable_nvram_access(tp
);
3304 tw32(NVRAM_ADDR
, offset
);
3305 ret
= tg3_nvram_exec_cmd(tp
, NVRAM_CMD_RD
| NVRAM_CMD_GO
|
3306 NVRAM_CMD_FIRST
| NVRAM_CMD_LAST
| NVRAM_CMD_DONE
);
3309 *val
= tr32(NVRAM_RDDATA
);
3311 tg3_disable_nvram_access(tp
);
3313 tg3_nvram_unlock(tp
);
3318 /* Ensures NVRAM data is in bytestream format. */
3319 static int tg3_nvram_read_be32(struct tg3
*tp
, u32 offset
, __be32
*val
)
3322 int res
= tg3_nvram_read(tp
, offset
, &v
);
3324 *val
= cpu_to_be32(v
);
3328 static int tg3_nvram_write_block_using_eeprom(struct tg3
*tp
,
3329 u32 offset
, u32 len
, u8
*buf
)
3334 for (i
= 0; i
< len
; i
+= 4) {
3340 memcpy(&data
, buf
+ i
, 4);
3343 * The SEEPROM interface expects the data to always be opposite
3344 * the native endian format. We accomplish this by reversing
3345 * all the operations that would have been performed on the
3346 * data from a call to tg3_nvram_read_be32().
3348 tw32(GRC_EEPROM_DATA
, swab32(be32_to_cpu(data
)));
3350 val
= tr32(GRC_EEPROM_ADDR
);
3351 tw32(GRC_EEPROM_ADDR
, val
| EEPROM_ADDR_COMPLETE
);
3353 val
&= ~(EEPROM_ADDR_ADDR_MASK
| EEPROM_ADDR_DEVID_MASK
|
3355 tw32(GRC_EEPROM_ADDR
, val
|
3356 (0 << EEPROM_ADDR_DEVID_SHIFT
) |
3357 (addr
& EEPROM_ADDR_ADDR_MASK
) |
3361 for (j
= 0; j
< 1000; j
++) {
3362 val
= tr32(GRC_EEPROM_ADDR
);
3364 if (val
& EEPROM_ADDR_COMPLETE
)
3368 if (!(val
& EEPROM_ADDR_COMPLETE
)) {
3377 /* offset and length are dword aligned */
3378 static int tg3_nvram_write_block_unbuffered(struct tg3
*tp
, u32 offset
, u32 len
,
3382 u32 pagesize
= tp
->nvram_pagesize
;
3383 u32 pagemask
= pagesize
- 1;
3387 tmp
= kmalloc(pagesize
, GFP_KERNEL
);
3393 u32 phy_addr
, page_off
, size
;
3395 phy_addr
= offset
& ~pagemask
;
3397 for (j
= 0; j
< pagesize
; j
+= 4) {
3398 ret
= tg3_nvram_read_be32(tp
, phy_addr
+ j
,
3399 (__be32
*) (tmp
+ j
));
3406 page_off
= offset
& pagemask
;
3413 memcpy(tmp
+ page_off
, buf
, size
);
3415 offset
= offset
+ (pagesize
- page_off
);
3417 tg3_enable_nvram_access(tp
);
3420 * Before we can erase the flash page, we need
3421 * to issue a special "write enable" command.
3423 nvram_cmd
= NVRAM_CMD_WREN
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
3425 if (tg3_nvram_exec_cmd(tp
, nvram_cmd
))
3428 /* Erase the target page */
3429 tw32(NVRAM_ADDR
, phy_addr
);
3431 nvram_cmd
= NVRAM_CMD_GO
| NVRAM_CMD_DONE
| NVRAM_CMD_WR
|
3432 NVRAM_CMD_FIRST
| NVRAM_CMD_LAST
| NVRAM_CMD_ERASE
;
3434 if (tg3_nvram_exec_cmd(tp
, nvram_cmd
))
3437 /* Issue another write enable to start the write. */
3438 nvram_cmd
= NVRAM_CMD_WREN
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
3440 if (tg3_nvram_exec_cmd(tp
, nvram_cmd
))
3443 for (j
= 0; j
< pagesize
; j
+= 4) {
3446 data
= *((__be32
*) (tmp
+ j
));
3448 tw32(NVRAM_WRDATA
, be32_to_cpu(data
));
3450 tw32(NVRAM_ADDR
, phy_addr
+ j
);
3452 nvram_cmd
= NVRAM_CMD_GO
| NVRAM_CMD_DONE
|
3456 nvram_cmd
|= NVRAM_CMD_FIRST
;
3457 else if (j
== (pagesize
- 4))
3458 nvram_cmd
|= NVRAM_CMD_LAST
;
3460 ret
= tg3_nvram_exec_cmd(tp
, nvram_cmd
);
3468 nvram_cmd
= NVRAM_CMD_WRDI
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
3469 tg3_nvram_exec_cmd(tp
, nvram_cmd
);
3476 /* offset and length are dword aligned */
3477 static int tg3_nvram_write_block_buffered(struct tg3
*tp
, u32 offset
, u32 len
,
3482 for (i
= 0; i
< len
; i
+= 4, offset
+= 4) {
3483 u32 page_off
, phy_addr
, nvram_cmd
;
3486 memcpy(&data
, buf
+ i
, 4);
3487 tw32(NVRAM_WRDATA
, be32_to_cpu(data
));
3489 page_off
= offset
% tp
->nvram_pagesize
;
3491 phy_addr
= tg3_nvram_phys_addr(tp
, offset
);
3493 nvram_cmd
= NVRAM_CMD_GO
| NVRAM_CMD_DONE
| NVRAM_CMD_WR
;
3495 if (page_off
== 0 || i
== 0)
3496 nvram_cmd
|= NVRAM_CMD_FIRST
;
3497 if (page_off
== (tp
->nvram_pagesize
- 4))
3498 nvram_cmd
|= NVRAM_CMD_LAST
;
3501 nvram_cmd
|= NVRAM_CMD_LAST
;
3503 if ((nvram_cmd
& NVRAM_CMD_FIRST
) ||
3504 !tg3_flag(tp
, FLASH
) ||
3505 !tg3_flag(tp
, 57765_PLUS
))
3506 tw32(NVRAM_ADDR
, phy_addr
);
3508 if (tg3_asic_rev(tp
) != ASIC_REV_5752
&&
3509 !tg3_flag(tp
, 5755_PLUS
) &&
3510 (tp
->nvram_jedecnum
== JEDEC_ST
) &&
3511 (nvram_cmd
& NVRAM_CMD_FIRST
)) {
3514 cmd
= NVRAM_CMD_WREN
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
3515 ret
= tg3_nvram_exec_cmd(tp
, cmd
);
3519 if (!tg3_flag(tp
, FLASH
)) {
3520 /* We always do complete word writes to eeprom. */
3521 nvram_cmd
|= (NVRAM_CMD_FIRST
| NVRAM_CMD_LAST
);
3524 ret
= tg3_nvram_exec_cmd(tp
, nvram_cmd
);
3531 /* offset and length are dword aligned */
3532 static int tg3_nvram_write_block(struct tg3
*tp
, u32 offset
, u32 len
, u8
*buf
)
3536 if (tg3_flag(tp
, EEPROM_WRITE_PROT
)) {
3537 tw32_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
&
3538 ~GRC_LCLCTRL_GPIO_OUTPUT1
);
3542 if (!tg3_flag(tp
, NVRAM
)) {
3543 ret
= tg3_nvram_write_block_using_eeprom(tp
, offset
, len
, buf
);
3547 ret
= tg3_nvram_lock(tp
);
3551 tg3_enable_nvram_access(tp
);
3552 if (tg3_flag(tp
, 5750_PLUS
) && !tg3_flag(tp
, PROTECTED_NVRAM
))
3553 tw32(NVRAM_WRITE1
, 0x406);
3555 grc_mode
= tr32(GRC_MODE
);
3556 tw32(GRC_MODE
, grc_mode
| GRC_MODE_NVRAM_WR_ENABLE
);
3558 if (tg3_flag(tp
, NVRAM_BUFFERED
) || !tg3_flag(tp
, FLASH
)) {
3559 ret
= tg3_nvram_write_block_buffered(tp
, offset
, len
,
3562 ret
= tg3_nvram_write_block_unbuffered(tp
, offset
, len
,
3566 grc_mode
= tr32(GRC_MODE
);
3567 tw32(GRC_MODE
, grc_mode
& ~GRC_MODE_NVRAM_WR_ENABLE
);
3569 tg3_disable_nvram_access(tp
);
3570 tg3_nvram_unlock(tp
);
3573 if (tg3_flag(tp
, EEPROM_WRITE_PROT
)) {
3574 tw32_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
);
3581 #define RX_CPU_SCRATCH_BASE 0x30000
3582 #define RX_CPU_SCRATCH_SIZE 0x04000
3583 #define TX_CPU_SCRATCH_BASE 0x34000
3584 #define TX_CPU_SCRATCH_SIZE 0x04000
3586 /* tp->lock is held. */
3587 static int tg3_pause_cpu(struct tg3
*tp
, u32 cpu_base
)
3590 const int iters
= 10000;
3592 for (i
= 0; i
< iters
; i
++) {
3593 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
3594 tw32(cpu_base
+ CPU_MODE
, CPU_MODE_HALT
);
3595 if (tr32(cpu_base
+ CPU_MODE
) & CPU_MODE_HALT
)
3597 if (pci_channel_offline(tp
->pdev
))
3601 return (i
== iters
) ? -EBUSY
: 0;
3604 /* tp->lock is held. */
3605 static int tg3_rxcpu_pause(struct tg3
*tp
)
3607 int rc
= tg3_pause_cpu(tp
, RX_CPU_BASE
);
3609 tw32(RX_CPU_BASE
+ CPU_STATE
, 0xffffffff);
3610 tw32_f(RX_CPU_BASE
+ CPU_MODE
, CPU_MODE_HALT
);
3616 /* tp->lock is held. */
3617 static int tg3_txcpu_pause(struct tg3
*tp
)
3619 return tg3_pause_cpu(tp
, TX_CPU_BASE
);
3622 /* tp->lock is held. */
3623 static void tg3_resume_cpu(struct tg3
*tp
, u32 cpu_base
)
3625 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
3626 tw32_f(cpu_base
+ CPU_MODE
, 0x00000000);
3629 /* tp->lock is held. */
3630 static void tg3_rxcpu_resume(struct tg3
*tp
)
3632 tg3_resume_cpu(tp
, RX_CPU_BASE
);
3635 /* tp->lock is held. */
3636 static int tg3_halt_cpu(struct tg3
*tp
, u32 cpu_base
)
3640 BUG_ON(cpu_base
== TX_CPU_BASE
&& tg3_flag(tp
, 5705_PLUS
));
3642 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
3643 u32 val
= tr32(GRC_VCPU_EXT_CTRL
);
3645 tw32(GRC_VCPU_EXT_CTRL
, val
| GRC_VCPU_EXT_CTRL_HALT_CPU
);
3648 if (cpu_base
== RX_CPU_BASE
) {
3649 rc
= tg3_rxcpu_pause(tp
);
3652 * There is only an Rx CPU for the 5750 derivative in the
3655 if (tg3_flag(tp
, IS_SSB_CORE
))
3658 rc
= tg3_txcpu_pause(tp
);
3662 netdev_err(tp
->dev
, "%s timed out, %s CPU\n",
3663 __func__
, cpu_base
== RX_CPU_BASE
? "RX" : "TX");
3667 /* Clear firmware's nvram arbitration. */
3668 if (tg3_flag(tp
, NVRAM
))
3669 tw32(NVRAM_SWARB
, SWARB_REQ_CLR0
);
3673 static int tg3_fw_data_len(struct tg3
*tp
,
3674 const struct tg3_firmware_hdr
*fw_hdr
)
3678 /* Non fragmented firmware have one firmware header followed by a
3679 * contiguous chunk of data to be written. The length field in that
3680 * header is not the length of data to be written but the complete
3681 * length of the bss. The data length is determined based on
3682 * tp->fw->size minus headers.
3684 * Fragmented firmware have a main header followed by multiple
3685 * fragments. Each fragment is identical to non fragmented firmware
3686 * with a firmware header followed by a contiguous chunk of data. In
3687 * the main header, the length field is unused and set to 0xffffffff.
3688 * In each fragment header the length is the entire size of that
3689 * fragment i.e. fragment data + header length. Data length is
3690 * therefore length field in the header minus TG3_FW_HDR_LEN.
3692 if (tp
->fw_len
== 0xffffffff)
3693 fw_len
= be32_to_cpu(fw_hdr
->len
);
3695 fw_len
= tp
->fw
->size
;
3697 return (fw_len
- TG3_FW_HDR_LEN
) / sizeof(u32
);
3700 /* tp->lock is held. */
3701 static int tg3_load_firmware_cpu(struct tg3
*tp
, u32 cpu_base
,
3702 u32 cpu_scratch_base
, int cpu_scratch_size
,
3703 const struct tg3_firmware_hdr
*fw_hdr
)
3706 void (*write_op
)(struct tg3
*, u32
, u32
);
3707 int total_len
= tp
->fw
->size
;
3709 if (cpu_base
== TX_CPU_BASE
&& tg3_flag(tp
, 5705_PLUS
)) {
3711 "%s: Trying to load TX cpu firmware which is 5705\n",
3716 if (tg3_flag(tp
, 5705_PLUS
) && tg3_asic_rev(tp
) != ASIC_REV_57766
)
3717 write_op
= tg3_write_mem
;
3719 write_op
= tg3_write_indirect_reg32
;
3721 if (tg3_asic_rev(tp
) != ASIC_REV_57766
) {
3722 /* It is possible that bootcode is still loading at this point.
3723 * Get the nvram lock first before halting the cpu.
3725 int lock_err
= tg3_nvram_lock(tp
);
3726 err
= tg3_halt_cpu(tp
, cpu_base
);
3728 tg3_nvram_unlock(tp
);
3732 for (i
= 0; i
< cpu_scratch_size
; i
+= sizeof(u32
))
3733 write_op(tp
, cpu_scratch_base
+ i
, 0);
3734 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
3735 tw32(cpu_base
+ CPU_MODE
,
3736 tr32(cpu_base
+ CPU_MODE
) | CPU_MODE_HALT
);
3738 /* Subtract additional main header for fragmented firmware and
3739 * advance to the first fragment
3741 total_len
-= TG3_FW_HDR_LEN
;
3746 u32
*fw_data
= (u32
*)(fw_hdr
+ 1);
3747 for (i
= 0; i
< tg3_fw_data_len(tp
, fw_hdr
); i
++)
3748 write_op(tp
, cpu_scratch_base
+
3749 (be32_to_cpu(fw_hdr
->base_addr
) & 0xffff) +
3751 be32_to_cpu(fw_data
[i
]));
3753 total_len
-= be32_to_cpu(fw_hdr
->len
);
3755 /* Advance to next fragment */
3756 fw_hdr
= (struct tg3_firmware_hdr
*)
3757 ((void *)fw_hdr
+ be32_to_cpu(fw_hdr
->len
));
3758 } while (total_len
> 0);
3766 /* tp->lock is held. */
3767 static int tg3_pause_cpu_and_set_pc(struct tg3
*tp
, u32 cpu_base
, u32 pc
)
3770 const int iters
= 5;
3772 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
3773 tw32_f(cpu_base
+ CPU_PC
, pc
);
3775 for (i
= 0; i
< iters
; i
++) {
3776 if (tr32(cpu_base
+ CPU_PC
) == pc
)
3778 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
3779 tw32(cpu_base
+ CPU_MODE
, CPU_MODE_HALT
);
3780 tw32_f(cpu_base
+ CPU_PC
, pc
);
3784 return (i
== iters
) ? -EBUSY
: 0;
3787 /* tp->lock is held. */
3788 static int tg3_load_5701_a0_firmware_fix(struct tg3
*tp
)
3790 const struct tg3_firmware_hdr
*fw_hdr
;
3793 fw_hdr
= (struct tg3_firmware_hdr
*)tp
->fw
->data
;
3795 /* Firmware blob starts with version numbers, followed by
3796 start address and length. We are setting complete length.
3797 length = end_address_of_bss - start_address_of_text.
3798 Remainder is the blob to be loaded contiguously
3799 from start address. */
3801 err
= tg3_load_firmware_cpu(tp
, RX_CPU_BASE
,
3802 RX_CPU_SCRATCH_BASE
, RX_CPU_SCRATCH_SIZE
,
3807 err
= tg3_load_firmware_cpu(tp
, TX_CPU_BASE
,
3808 TX_CPU_SCRATCH_BASE
, TX_CPU_SCRATCH_SIZE
,
3813 /* Now startup only the RX cpu. */
3814 err
= tg3_pause_cpu_and_set_pc(tp
, RX_CPU_BASE
,
3815 be32_to_cpu(fw_hdr
->base_addr
));
3817 netdev_err(tp
->dev
, "%s fails to set RX CPU PC, is %08x "
3818 "should be %08x\n", __func__
,
3819 tr32(RX_CPU_BASE
+ CPU_PC
),
3820 be32_to_cpu(fw_hdr
->base_addr
));
3824 tg3_rxcpu_resume(tp
);
3829 static int tg3_validate_rxcpu_state(struct tg3
*tp
)
3831 const int iters
= 1000;
3835 /* Wait for boot code to complete initialization and enter service
3836 * loop. It is then safe to download service patches
3838 for (i
= 0; i
< iters
; i
++) {
3839 if (tr32(RX_CPU_HWBKPT
) == TG3_SBROM_IN_SERVICE_LOOP
)
3846 netdev_err(tp
->dev
, "Boot code not ready for service patches\n");
3850 val
= tg3_read_indirect_reg32(tp
, TG3_57766_FW_HANDSHAKE
);
3852 netdev_warn(tp
->dev
,
3853 "Other patches exist. Not downloading EEE patch\n");
3860 /* tp->lock is held. */
3861 static void tg3_load_57766_firmware(struct tg3
*tp
)
3863 struct tg3_firmware_hdr
*fw_hdr
;
3865 if (!tg3_flag(tp
, NO_NVRAM
))
3868 if (tg3_validate_rxcpu_state(tp
))
3874 /* This firmware blob has a different format than older firmware
3875 * releases as given below. The main difference is we have fragmented
3876 * data to be written to non-contiguous locations.
3878 * In the beginning we have a firmware header identical to other
3879 * firmware which consists of version, base addr and length. The length
3880 * here is unused and set to 0xffffffff.
3882 * This is followed by a series of firmware fragments which are
3883 * individually identical to previous firmware. i.e. they have the
3884 * firmware header and followed by data for that fragment. The version
3885 * field of the individual fragment header is unused.
3888 fw_hdr
= (struct tg3_firmware_hdr
*)tp
->fw
->data
;
3889 if (be32_to_cpu(fw_hdr
->base_addr
) != TG3_57766_FW_BASE_ADDR
)
3892 if (tg3_rxcpu_pause(tp
))
3895 /* tg3_load_firmware_cpu() will always succeed for the 57766 */
3896 tg3_load_firmware_cpu(tp
, 0, TG3_57766_FW_BASE_ADDR
, 0, fw_hdr
);
3898 tg3_rxcpu_resume(tp
);
3901 /* tp->lock is held. */
3902 static int tg3_load_tso_firmware(struct tg3
*tp
)
3904 const struct tg3_firmware_hdr
*fw_hdr
;
3905 unsigned long cpu_base
, cpu_scratch_base
, cpu_scratch_size
;
3908 if (!tg3_flag(tp
, FW_TSO
))
3911 fw_hdr
= (struct tg3_firmware_hdr
*)tp
->fw
->data
;
3913 /* Firmware blob starts with version numbers, followed by
3914 start address and length. We are setting complete length.
3915 length = end_address_of_bss - start_address_of_text.
3916 Remainder is the blob to be loaded contiguously
3917 from start address. */
3919 cpu_scratch_size
= tp
->fw_len
;
3921 if (tg3_asic_rev(tp
) == ASIC_REV_5705
) {
3922 cpu_base
= RX_CPU_BASE
;
3923 cpu_scratch_base
= NIC_SRAM_MBUF_POOL_BASE5705
;
3925 cpu_base
= TX_CPU_BASE
;
3926 cpu_scratch_base
= TX_CPU_SCRATCH_BASE
;
3927 cpu_scratch_size
= TX_CPU_SCRATCH_SIZE
;
3930 err
= tg3_load_firmware_cpu(tp
, cpu_base
,
3931 cpu_scratch_base
, cpu_scratch_size
,
3936 /* Now startup the cpu. */
3937 err
= tg3_pause_cpu_and_set_pc(tp
, cpu_base
,
3938 be32_to_cpu(fw_hdr
->base_addr
));
3941 "%s fails to set CPU PC, is %08x should be %08x\n",
3942 __func__
, tr32(cpu_base
+ CPU_PC
),
3943 be32_to_cpu(fw_hdr
->base_addr
));
3947 tg3_resume_cpu(tp
, cpu_base
);
3952 /* tp->lock is held. */
3953 static void __tg3_set_mac_addr(struct tg3
*tp
, bool skip_mac_1
)
3955 u32 addr_high
, addr_low
;
3958 addr_high
= ((tp
->dev
->dev_addr
[0] << 8) |
3959 tp
->dev
->dev_addr
[1]);
3960 addr_low
= ((tp
->dev
->dev_addr
[2] << 24) |
3961 (tp
->dev
->dev_addr
[3] << 16) |
3962 (tp
->dev
->dev_addr
[4] << 8) |
3963 (tp
->dev
->dev_addr
[5] << 0));
3964 for (i
= 0; i
< 4; i
++) {
3965 if (i
== 1 && skip_mac_1
)
3967 tw32(MAC_ADDR_0_HIGH
+ (i
* 8), addr_high
);
3968 tw32(MAC_ADDR_0_LOW
+ (i
* 8), addr_low
);
3971 if (tg3_asic_rev(tp
) == ASIC_REV_5703
||
3972 tg3_asic_rev(tp
) == ASIC_REV_5704
) {
3973 for (i
= 0; i
< 12; i
++) {
3974 tw32(MAC_EXTADDR_0_HIGH
+ (i
* 8), addr_high
);
3975 tw32(MAC_EXTADDR_0_LOW
+ (i
* 8), addr_low
);
3979 addr_high
= (tp
->dev
->dev_addr
[0] +
3980 tp
->dev
->dev_addr
[1] +
3981 tp
->dev
->dev_addr
[2] +
3982 tp
->dev
->dev_addr
[3] +
3983 tp
->dev
->dev_addr
[4] +
3984 tp
->dev
->dev_addr
[5]) &
3985 TX_BACKOFF_SEED_MASK
;
3986 tw32(MAC_TX_BACKOFF_SEED
, addr_high
);
3989 static void tg3_enable_register_access(struct tg3
*tp
)
3992 * Make sure register accesses (indirect or otherwise) will function
3995 pci_write_config_dword(tp
->pdev
,
3996 TG3PCI_MISC_HOST_CTRL
, tp
->misc_host_ctrl
);
3999 static int tg3_power_up(struct tg3
*tp
)
4003 tg3_enable_register_access(tp
);
4005 err
= pci_set_power_state(tp
->pdev
, PCI_D0
);
4007 /* Switch out of Vaux if it is a NIC */
4008 tg3_pwrsrc_switch_to_vmain(tp
);
4010 netdev_err(tp
->dev
, "Transition to D0 failed\n");
4016 static int tg3_setup_phy(struct tg3
*, bool);
4018 static int tg3_power_down_prepare(struct tg3
*tp
)
4021 bool device_should_wake
, do_low_power
;
4023 tg3_enable_register_access(tp
);
4025 /* Restore the CLKREQ setting. */
4026 if (tg3_flag(tp
, CLKREQ_BUG
))
4027 pcie_capability_set_word(tp
->pdev
, PCI_EXP_LNKCTL
,
4028 PCI_EXP_LNKCTL_CLKREQ_EN
);
4030 misc_host_ctrl
= tr32(TG3PCI_MISC_HOST_CTRL
);
4031 tw32(TG3PCI_MISC_HOST_CTRL
,
4032 misc_host_ctrl
| MISC_HOST_CTRL_MASK_PCI_INT
);
4034 device_should_wake
= device_may_wakeup(&tp
->pdev
->dev
) &&
4035 tg3_flag(tp
, WOL_ENABLE
);
4037 if (tg3_flag(tp
, USE_PHYLIB
)) {
4038 do_low_power
= false;
4039 if ((tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
) &&
4040 !(tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)) {
4041 struct phy_device
*phydev
;
4042 u32 phyid
, advertising
;
4044 phydev
= tp
->mdio_bus
->phy_map
[tp
->phy_addr
];
4046 tp
->phy_flags
|= TG3_PHYFLG_IS_LOW_POWER
;
4048 tp
->link_config
.speed
= phydev
->speed
;
4049 tp
->link_config
.duplex
= phydev
->duplex
;
4050 tp
->link_config
.autoneg
= phydev
->autoneg
;
4051 tp
->link_config
.advertising
= phydev
->advertising
;
4053 advertising
= ADVERTISED_TP
|
4055 ADVERTISED_Autoneg
|
4056 ADVERTISED_10baseT_Half
;
4058 if (tg3_flag(tp
, ENABLE_ASF
) || device_should_wake
) {
4059 if (tg3_flag(tp
, WOL_SPEED_100MB
))
4061 ADVERTISED_100baseT_Half
|
4062 ADVERTISED_100baseT_Full
|
4063 ADVERTISED_10baseT_Full
;
4065 advertising
|= ADVERTISED_10baseT_Full
;
4068 phydev
->advertising
= advertising
;
4070 phy_start_aneg(phydev
);
4072 phyid
= phydev
->drv
->phy_id
& phydev
->drv
->phy_id_mask
;
4073 if (phyid
!= PHY_ID_BCMAC131
) {
4074 phyid
&= PHY_BCM_OUI_MASK
;
4075 if (phyid
== PHY_BCM_OUI_1
||
4076 phyid
== PHY_BCM_OUI_2
||
4077 phyid
== PHY_BCM_OUI_3
)
4078 do_low_power
= true;
4082 do_low_power
= true;
4084 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
))
4085 tp
->phy_flags
|= TG3_PHYFLG_IS_LOW_POWER
;
4087 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
4088 tg3_setup_phy(tp
, false);
4091 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
4094 val
= tr32(GRC_VCPU_EXT_CTRL
);
4095 tw32(GRC_VCPU_EXT_CTRL
, val
| GRC_VCPU_EXT_CTRL_DISABLE_WOL
);
4096 } else if (!tg3_flag(tp
, ENABLE_ASF
)) {
4100 for (i
= 0; i
< 200; i
++) {
4101 tg3_read_mem(tp
, NIC_SRAM_FW_ASF_STATUS_MBOX
, &val
);
4102 if (val
== ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1
)
4107 if (tg3_flag(tp
, WOL_CAP
))
4108 tg3_write_mem(tp
, NIC_SRAM_WOL_MBOX
, WOL_SIGNATURE
|
4109 WOL_DRV_STATE_SHUTDOWN
|
4113 if (device_should_wake
) {
4116 if (!(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)) {
4118 !(tp
->phy_flags
& TG3_PHYFLG_IS_FET
)) {
4119 tg3_phy_auxctl_write(tp
,
4120 MII_TG3_AUXCTL_SHDWSEL_PWRCTL
,
4121 MII_TG3_AUXCTL_PCTL_WOL_EN
|
4122 MII_TG3_AUXCTL_PCTL_100TX_LPWR
|
4123 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC
);
4127 if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)
4128 mac_mode
= MAC_MODE_PORT_MODE_GMII
;
4129 else if (tp
->phy_flags
&
4130 TG3_PHYFLG_KEEP_LINK_ON_PWRDN
) {
4131 if (tp
->link_config
.active_speed
== SPEED_1000
)
4132 mac_mode
= MAC_MODE_PORT_MODE_GMII
;
4134 mac_mode
= MAC_MODE_PORT_MODE_MII
;
4136 mac_mode
= MAC_MODE_PORT_MODE_MII
;
4138 mac_mode
|= tp
->mac_mode
& MAC_MODE_LINK_POLARITY
;
4139 if (tg3_asic_rev(tp
) == ASIC_REV_5700
) {
4140 u32 speed
= tg3_flag(tp
, WOL_SPEED_100MB
) ?
4141 SPEED_100
: SPEED_10
;
4142 if (tg3_5700_link_polarity(tp
, speed
))
4143 mac_mode
|= MAC_MODE_LINK_POLARITY
;
4145 mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
4148 mac_mode
= MAC_MODE_PORT_MODE_TBI
;
4151 if (!tg3_flag(tp
, 5750_PLUS
))
4152 tw32(MAC_LED_CTRL
, tp
->led_ctrl
);
4154 mac_mode
|= MAC_MODE_MAGIC_PKT_ENABLE
;
4155 if ((tg3_flag(tp
, 5705_PLUS
) && !tg3_flag(tp
, 5780_CLASS
)) &&
4156 (tg3_flag(tp
, ENABLE_ASF
) || tg3_flag(tp
, ENABLE_APE
)))
4157 mac_mode
|= MAC_MODE_KEEP_FRAME_IN_WOL
;
4159 if (tg3_flag(tp
, ENABLE_APE
))
4160 mac_mode
|= MAC_MODE_APE_TX_EN
|
4161 MAC_MODE_APE_RX_EN
|
4162 MAC_MODE_TDE_ENABLE
;
4164 tw32_f(MAC_MODE
, mac_mode
);
4167 tw32_f(MAC_RX_MODE
, RX_MODE_ENABLE
);
4171 if (!tg3_flag(tp
, WOL_SPEED_100MB
) &&
4172 (tg3_asic_rev(tp
) == ASIC_REV_5700
||
4173 tg3_asic_rev(tp
) == ASIC_REV_5701
)) {
4176 base_val
= tp
->pci_clock_ctrl
;
4177 base_val
|= (CLOCK_CTRL_RXCLK_DISABLE
|
4178 CLOCK_CTRL_TXCLK_DISABLE
);
4180 tw32_wait_f(TG3PCI_CLOCK_CTRL
, base_val
| CLOCK_CTRL_ALTCLK
|
4181 CLOCK_CTRL_PWRDOWN_PLL133
, 40);
4182 } else if (tg3_flag(tp
, 5780_CLASS
) ||
4183 tg3_flag(tp
, CPMU_PRESENT
) ||
4184 tg3_asic_rev(tp
) == ASIC_REV_5906
) {
4186 } else if (!(tg3_flag(tp
, 5750_PLUS
) && tg3_flag(tp
, ENABLE_ASF
))) {
4187 u32 newbits1
, newbits2
;
4189 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
4190 tg3_asic_rev(tp
) == ASIC_REV_5701
) {
4191 newbits1
= (CLOCK_CTRL_RXCLK_DISABLE
|
4192 CLOCK_CTRL_TXCLK_DISABLE
|
4194 newbits2
= newbits1
| CLOCK_CTRL_44MHZ_CORE
;
4195 } else if (tg3_flag(tp
, 5705_PLUS
)) {
4196 newbits1
= CLOCK_CTRL_625_CORE
;
4197 newbits2
= newbits1
| CLOCK_CTRL_ALTCLK
;
4199 newbits1
= CLOCK_CTRL_ALTCLK
;
4200 newbits2
= newbits1
| CLOCK_CTRL_44MHZ_CORE
;
4203 tw32_wait_f(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
| newbits1
,
4206 tw32_wait_f(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
| newbits2
,
4209 if (!tg3_flag(tp
, 5705_PLUS
)) {
4212 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
4213 tg3_asic_rev(tp
) == ASIC_REV_5701
) {
4214 newbits3
= (CLOCK_CTRL_RXCLK_DISABLE
|
4215 CLOCK_CTRL_TXCLK_DISABLE
|
4216 CLOCK_CTRL_44MHZ_CORE
);
4218 newbits3
= CLOCK_CTRL_44MHZ_CORE
;
4221 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
4222 tp
->pci_clock_ctrl
| newbits3
, 40);
4226 if (!(device_should_wake
) && !tg3_flag(tp
, ENABLE_ASF
))
4227 tg3_power_down_phy(tp
, do_low_power
);
4229 tg3_frob_aux_power(tp
, true);
4231 /* Workaround for unstable PLL clock */
4232 if ((!tg3_flag(tp
, IS_SSB_CORE
)) &&
4233 ((tg3_chip_rev(tp
) == CHIPREV_5750_AX
) ||
4234 (tg3_chip_rev(tp
) == CHIPREV_5750_BX
))) {
4235 u32 val
= tr32(0x7d00);
4237 val
&= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4239 if (!tg3_flag(tp
, ENABLE_ASF
)) {
4242 err
= tg3_nvram_lock(tp
);
4243 tg3_halt_cpu(tp
, RX_CPU_BASE
);
4245 tg3_nvram_unlock(tp
);
4249 tg3_write_sig_post_reset(tp
, RESET_KIND_SHUTDOWN
);
4251 tg3_ape_driver_state_change(tp
, RESET_KIND_SHUTDOWN
);
4256 static void tg3_power_down(struct tg3
*tp
)
4258 pci_wake_from_d3(tp
->pdev
, tg3_flag(tp
, WOL_ENABLE
));
4259 pci_set_power_state(tp
->pdev
, PCI_D3hot
);
4262 static void tg3_aux_stat_to_speed_duplex(struct tg3
*tp
, u32 val
, u16
*speed
, u8
*duplex
)
4264 switch (val
& MII_TG3_AUX_STAT_SPDMASK
) {
4265 case MII_TG3_AUX_STAT_10HALF
:
4267 *duplex
= DUPLEX_HALF
;
4270 case MII_TG3_AUX_STAT_10FULL
:
4272 *duplex
= DUPLEX_FULL
;
4275 case MII_TG3_AUX_STAT_100HALF
:
4277 *duplex
= DUPLEX_HALF
;
4280 case MII_TG3_AUX_STAT_100FULL
:
4282 *duplex
= DUPLEX_FULL
;
4285 case MII_TG3_AUX_STAT_1000HALF
:
4286 *speed
= SPEED_1000
;
4287 *duplex
= DUPLEX_HALF
;
4290 case MII_TG3_AUX_STAT_1000FULL
:
4291 *speed
= SPEED_1000
;
4292 *duplex
= DUPLEX_FULL
;
4296 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
4297 *speed
= (val
& MII_TG3_AUX_STAT_100
) ? SPEED_100
:
4299 *duplex
= (val
& MII_TG3_AUX_STAT_FULL
) ? DUPLEX_FULL
:
4303 *speed
= SPEED_UNKNOWN
;
4304 *duplex
= DUPLEX_UNKNOWN
;
4309 static int tg3_phy_autoneg_cfg(struct tg3
*tp
, u32 advertise
, u32 flowctrl
)
4314 new_adv
= ADVERTISE_CSMA
;
4315 new_adv
|= ethtool_adv_to_mii_adv_t(advertise
) & ADVERTISE_ALL
;
4316 new_adv
|= mii_advertise_flowctrl(flowctrl
);
4318 err
= tg3_writephy(tp
, MII_ADVERTISE
, new_adv
);
4322 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
4323 new_adv
= ethtool_adv_to_mii_ctrl1000_t(advertise
);
4325 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
||
4326 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B0
)
4327 new_adv
|= CTL1000_AS_MASTER
| CTL1000_ENABLE_MASTER
;
4329 err
= tg3_writephy(tp
, MII_CTRL1000
, new_adv
);
4334 if (!(tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
))
4337 tw32(TG3_CPMU_EEE_MODE
,
4338 tr32(TG3_CPMU_EEE_MODE
) & ~TG3_CPMU_EEEMD_LPI_ENABLE
);
4340 err
= tg3_phy_toggle_auxctl_smdsp(tp
, true);
4345 /* Advertise 100-BaseTX EEE ability */
4346 if (advertise
& ADVERTISED_100baseT_Full
)
4347 val
|= MDIO_AN_EEE_ADV_100TX
;
4348 /* Advertise 1000-BaseT EEE ability */
4349 if (advertise
& ADVERTISED_1000baseT_Full
)
4350 val
|= MDIO_AN_EEE_ADV_1000T
;
4352 if (!tp
->eee
.eee_enabled
) {
4354 tp
->eee
.advertised
= 0;
4356 tp
->eee
.advertised
= advertise
&
4357 (ADVERTISED_100baseT_Full
|
4358 ADVERTISED_1000baseT_Full
);
4361 err
= tg3_phy_cl45_write(tp
, MDIO_MMD_AN
, MDIO_AN_EEE_ADV
, val
);
4365 switch (tg3_asic_rev(tp
)) {
4367 case ASIC_REV_57765
:
4368 case ASIC_REV_57766
:
4370 /* If we advertised any eee advertisements above... */
4372 val
= MII_TG3_DSP_TAP26_ALNOKO
|
4373 MII_TG3_DSP_TAP26_RMRXSTO
|
4374 MII_TG3_DSP_TAP26_OPCSINPT
;
4375 tg3_phydsp_write(tp
, MII_TG3_DSP_TAP26
, val
);
4379 if (!tg3_phydsp_read(tp
, MII_TG3_DSP_CH34TP2
, &val
))
4380 tg3_phydsp_write(tp
, MII_TG3_DSP_CH34TP2
, val
|
4381 MII_TG3_DSP_CH34TP2_HIBW01
);
4384 err2
= tg3_phy_toggle_auxctl_smdsp(tp
, false);
4393 static void tg3_phy_copper_begin(struct tg3
*tp
)
4395 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
||
4396 (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)) {
4399 if ((tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
) &&
4400 !(tp
->phy_flags
& TG3_PHYFLG_KEEP_LINK_ON_PWRDN
)) {
4401 adv
= ADVERTISED_10baseT_Half
|
4402 ADVERTISED_10baseT_Full
;
4403 if (tg3_flag(tp
, WOL_SPEED_100MB
))
4404 adv
|= ADVERTISED_100baseT_Half
|
4405 ADVERTISED_100baseT_Full
;
4406 if (tp
->phy_flags
& TG3_PHYFLG_1G_ON_VAUX_OK
) {
4407 if (!(tp
->phy_flags
&
4408 TG3_PHYFLG_DISABLE_1G_HD_ADV
))
4409 adv
|= ADVERTISED_1000baseT_Half
;
4410 adv
|= ADVERTISED_1000baseT_Full
;
4413 fc
= FLOW_CTRL_TX
| FLOW_CTRL_RX
;
4415 adv
= tp
->link_config
.advertising
;
4416 if (tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)
4417 adv
&= ~(ADVERTISED_1000baseT_Half
|
4418 ADVERTISED_1000baseT_Full
);
4420 fc
= tp
->link_config
.flowctrl
;
4423 tg3_phy_autoneg_cfg(tp
, adv
, fc
);
4425 if ((tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
) &&
4426 (tp
->phy_flags
& TG3_PHYFLG_KEEP_LINK_ON_PWRDN
)) {
4427 /* Normally during power down we want to autonegotiate
4428 * the lowest possible speed for WOL. However, to avoid
4429 * link flap, we leave it untouched.
4434 tg3_writephy(tp
, MII_BMCR
,
4435 BMCR_ANENABLE
| BMCR_ANRESTART
);
4438 u32 bmcr
, orig_bmcr
;
4440 tp
->link_config
.active_speed
= tp
->link_config
.speed
;
4441 tp
->link_config
.active_duplex
= tp
->link_config
.duplex
;
4443 if (tg3_asic_rev(tp
) == ASIC_REV_5714
) {
4444 /* With autoneg disabled, 5715 only links up when the
4445 * advertisement register has the configured speed
4448 tg3_writephy(tp
, MII_ADVERTISE
, ADVERTISE_ALL
);
4452 switch (tp
->link_config
.speed
) {
4458 bmcr
|= BMCR_SPEED100
;
4462 bmcr
|= BMCR_SPEED1000
;
4466 if (tp
->link_config
.duplex
== DUPLEX_FULL
)
4467 bmcr
|= BMCR_FULLDPLX
;
4469 if (!tg3_readphy(tp
, MII_BMCR
, &orig_bmcr
) &&
4470 (bmcr
!= orig_bmcr
)) {
4471 tg3_writephy(tp
, MII_BMCR
, BMCR_LOOPBACK
);
4472 for (i
= 0; i
< 1500; i
++) {
4476 if (tg3_readphy(tp
, MII_BMSR
, &tmp
) ||
4477 tg3_readphy(tp
, MII_BMSR
, &tmp
))
4479 if (!(tmp
& BMSR_LSTATUS
)) {
4484 tg3_writephy(tp
, MII_BMCR
, bmcr
);
4490 static int tg3_phy_pull_config(struct tg3
*tp
)
4495 err
= tg3_readphy(tp
, MII_BMCR
, &val
);
4499 if (!(val
& BMCR_ANENABLE
)) {
4500 tp
->link_config
.autoneg
= AUTONEG_DISABLE
;
4501 tp
->link_config
.advertising
= 0;
4502 tg3_flag_clear(tp
, PAUSE_AUTONEG
);
4506 switch (val
& (BMCR_SPEED1000
| BMCR_SPEED100
)) {
4508 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)
4511 tp
->link_config
.speed
= SPEED_10
;
4514 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)
4517 tp
->link_config
.speed
= SPEED_100
;
4519 case BMCR_SPEED1000
:
4520 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
4521 tp
->link_config
.speed
= SPEED_1000
;
4529 if (val
& BMCR_FULLDPLX
)
4530 tp
->link_config
.duplex
= DUPLEX_FULL
;
4532 tp
->link_config
.duplex
= DUPLEX_HALF
;
4534 tp
->link_config
.flowctrl
= FLOW_CTRL_RX
| FLOW_CTRL_TX
;
4540 tp
->link_config
.autoneg
= AUTONEG_ENABLE
;
4541 tp
->link_config
.advertising
= ADVERTISED_Autoneg
;
4542 tg3_flag_set(tp
, PAUSE_AUTONEG
);
4544 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)) {
4547 err
= tg3_readphy(tp
, MII_ADVERTISE
, &val
);
4551 adv
= mii_adv_to_ethtool_adv_t(val
& ADVERTISE_ALL
);
4552 tp
->link_config
.advertising
|= adv
| ADVERTISED_TP
;
4554 tp
->link_config
.flowctrl
= tg3_decode_flowctrl_1000T(val
);
4556 tp
->link_config
.advertising
|= ADVERTISED_FIBRE
;
4559 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
4562 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)) {
4563 err
= tg3_readphy(tp
, MII_CTRL1000
, &val
);
4567 adv
= mii_ctrl1000_to_ethtool_adv_t(val
);
4569 err
= tg3_readphy(tp
, MII_ADVERTISE
, &val
);
4573 adv
= tg3_decode_flowctrl_1000X(val
);
4574 tp
->link_config
.flowctrl
= adv
;
4576 val
&= (ADVERTISE_1000XHALF
| ADVERTISE_1000XFULL
);
4577 adv
= mii_adv_to_ethtool_adv_x(val
);
4580 tp
->link_config
.advertising
|= adv
;
4587 static int tg3_init_5401phy_dsp(struct tg3
*tp
)
4591 /* Turn off tap power management. */
4592 /* Set Extended packet length bit */
4593 err
= tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, 0x4c20);
4595 err
|= tg3_phydsp_write(tp
, 0x0012, 0x1804);
4596 err
|= tg3_phydsp_write(tp
, 0x0013, 0x1204);
4597 err
|= tg3_phydsp_write(tp
, 0x8006, 0x0132);
4598 err
|= tg3_phydsp_write(tp
, 0x8006, 0x0232);
4599 err
|= tg3_phydsp_write(tp
, 0x201f, 0x0a20);
4606 static bool tg3_phy_eee_config_ok(struct tg3
*tp
)
4608 struct ethtool_eee eee
;
4610 if (!(tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
))
4613 tg3_eee_pull_config(tp
, &eee
);
4615 if (tp
->eee
.eee_enabled
) {
4616 if (tp
->eee
.advertised
!= eee
.advertised
||
4617 tp
->eee
.tx_lpi_timer
!= eee
.tx_lpi_timer
||
4618 tp
->eee
.tx_lpi_enabled
!= eee
.tx_lpi_enabled
)
4621 /* EEE is disabled but we're advertising */
4629 static bool tg3_phy_copper_an_config_ok(struct tg3
*tp
, u32
*lcladv
)
4631 u32 advmsk
, tgtadv
, advertising
;
4633 advertising
= tp
->link_config
.advertising
;
4634 tgtadv
= ethtool_adv_to_mii_adv_t(advertising
) & ADVERTISE_ALL
;
4636 advmsk
= ADVERTISE_ALL
;
4637 if (tp
->link_config
.active_duplex
== DUPLEX_FULL
) {
4638 tgtadv
|= mii_advertise_flowctrl(tp
->link_config
.flowctrl
);
4639 advmsk
|= ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
;
4642 if (tg3_readphy(tp
, MII_ADVERTISE
, lcladv
))
4645 if ((*lcladv
& advmsk
) != tgtadv
)
4648 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
4651 tgtadv
= ethtool_adv_to_mii_ctrl1000_t(advertising
);
4653 if (tg3_readphy(tp
, MII_CTRL1000
, &tg3_ctrl
))
4657 (tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
||
4658 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B0
)) {
4659 tgtadv
|= CTL1000_AS_MASTER
| CTL1000_ENABLE_MASTER
;
4660 tg3_ctrl
&= (ADVERTISE_1000HALF
| ADVERTISE_1000FULL
|
4661 CTL1000_AS_MASTER
| CTL1000_ENABLE_MASTER
);
4663 tg3_ctrl
&= (ADVERTISE_1000HALF
| ADVERTISE_1000FULL
);
4666 if (tg3_ctrl
!= tgtadv
)
4673 static bool tg3_phy_copper_fetch_rmtadv(struct tg3
*tp
, u32
*rmtadv
)
4677 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
4680 if (tg3_readphy(tp
, MII_STAT1000
, &val
))
4683 lpeth
= mii_stat1000_to_ethtool_lpa_t(val
);
4686 if (tg3_readphy(tp
, MII_LPA
, rmtadv
))
4689 lpeth
|= mii_lpa_to_ethtool_lpa_t(*rmtadv
);
4690 tp
->link_config
.rmt_adv
= lpeth
;
4695 static bool tg3_test_and_report_link_chg(struct tg3
*tp
, bool curr_link_up
)
4697 if (curr_link_up
!= tp
->link_up
) {
4699 netif_carrier_on(tp
->dev
);
4701 netif_carrier_off(tp
->dev
);
4702 if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)
4703 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
4706 tg3_link_report(tp
);
4713 static void tg3_clear_mac_status(struct tg3
*tp
)
4718 MAC_STATUS_SYNC_CHANGED
|
4719 MAC_STATUS_CFG_CHANGED
|
4720 MAC_STATUS_MI_COMPLETION
|
4721 MAC_STATUS_LNKSTATE_CHANGED
);
4725 static void tg3_setup_eee(struct tg3
*tp
)
4729 val
= TG3_CPMU_EEE_LNKIDL_PCIE_NL0
|
4730 TG3_CPMU_EEE_LNKIDL_UART_IDL
;
4731 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_57765_A0
)
4732 val
|= TG3_CPMU_EEE_LNKIDL_APE_TX_MT
;
4734 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL
, val
);
4736 tw32_f(TG3_CPMU_EEE_CTRL
,
4737 TG3_CPMU_EEE_CTRL_EXIT_20_1_US
);
4739 val
= TG3_CPMU_EEEMD_ERLY_L1_XIT_DET
|
4740 (tp
->eee
.tx_lpi_enabled
? TG3_CPMU_EEEMD_LPI_IN_TX
: 0) |
4741 TG3_CPMU_EEEMD_LPI_IN_RX
|
4742 TG3_CPMU_EEEMD_EEE_ENABLE
;
4744 if (tg3_asic_rev(tp
) != ASIC_REV_5717
)
4745 val
|= TG3_CPMU_EEEMD_SND_IDX_DET_EN
;
4747 if (tg3_flag(tp
, ENABLE_APE
))
4748 val
|= TG3_CPMU_EEEMD_APE_TX_DET_EN
;
4750 tw32_f(TG3_CPMU_EEE_MODE
, tp
->eee
.eee_enabled
? val
: 0);
4752 tw32_f(TG3_CPMU_EEE_DBTMR1
,
4753 TG3_CPMU_DBTMR1_PCIEXIT_2047US
|
4754 (tp
->eee
.tx_lpi_timer
& 0xffff));
4756 tw32_f(TG3_CPMU_EEE_DBTMR2
,
4757 TG3_CPMU_DBTMR2_APE_TX_2047US
|
4758 TG3_CPMU_DBTMR2_TXIDXEQ_2047US
);
4761 static int tg3_setup_copper_phy(struct tg3
*tp
, bool force_reset
)
4763 bool current_link_up
;
4765 u32 lcl_adv
, rmt_adv
;
4770 tg3_clear_mac_status(tp
);
4772 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
4774 (tp
->mi_mode
& ~MAC_MI_MODE_AUTO_POLL
));
4778 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_PWRCTL
, 0);
4780 /* Some third-party PHYs need to be reset on link going
4783 if ((tg3_asic_rev(tp
) == ASIC_REV_5703
||
4784 tg3_asic_rev(tp
) == ASIC_REV_5704
||
4785 tg3_asic_rev(tp
) == ASIC_REV_5705
) &&
4787 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4788 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
4789 !(bmsr
& BMSR_LSTATUS
))
4795 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5401
) {
4796 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4797 if (tg3_readphy(tp
, MII_BMSR
, &bmsr
) ||
4798 !tg3_flag(tp
, INIT_COMPLETE
))
4801 if (!(bmsr
& BMSR_LSTATUS
)) {
4802 err
= tg3_init_5401phy_dsp(tp
);
4806 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4807 for (i
= 0; i
< 1000; i
++) {
4809 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
4810 (bmsr
& BMSR_LSTATUS
)) {
4816 if ((tp
->phy_id
& TG3_PHY_ID_REV_MASK
) ==
4817 TG3_PHY_REV_BCM5401_B0
&&
4818 !(bmsr
& BMSR_LSTATUS
) &&
4819 tp
->link_config
.active_speed
== SPEED_1000
) {
4820 err
= tg3_phy_reset(tp
);
4822 err
= tg3_init_5401phy_dsp(tp
);
4827 } else if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
||
4828 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B0
) {
4829 /* 5701 {A0,B0} CRC bug workaround */
4830 tg3_writephy(tp
, 0x15, 0x0a75);
4831 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8c68);
4832 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8d68);
4833 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8c68);
4836 /* Clear pending interrupts... */
4837 tg3_readphy(tp
, MII_TG3_ISTAT
, &val
);
4838 tg3_readphy(tp
, MII_TG3_ISTAT
, &val
);
4840 if (tp
->phy_flags
& TG3_PHYFLG_USE_MI_INTERRUPT
)
4841 tg3_writephy(tp
, MII_TG3_IMASK
, ~MII_TG3_INT_LINKCHG
);
4842 else if (!(tp
->phy_flags
& TG3_PHYFLG_IS_FET
))
4843 tg3_writephy(tp
, MII_TG3_IMASK
, ~0);
4845 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
4846 tg3_asic_rev(tp
) == ASIC_REV_5701
) {
4847 if (tp
->led_ctrl
== LED_CTRL_MODE_PHY_1
)
4848 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
4849 MII_TG3_EXT_CTRL_LNK3_LED_MODE
);
4851 tg3_writephy(tp
, MII_TG3_EXT_CTRL
, 0);
4854 current_link_up
= false;
4855 current_speed
= SPEED_UNKNOWN
;
4856 current_duplex
= DUPLEX_UNKNOWN
;
4857 tp
->phy_flags
&= ~TG3_PHYFLG_MDIX_STATE
;
4858 tp
->link_config
.rmt_adv
= 0;
4860 if (tp
->phy_flags
& TG3_PHYFLG_CAPACITIVE_COUPLING
) {
4861 err
= tg3_phy_auxctl_read(tp
,
4862 MII_TG3_AUXCTL_SHDWSEL_MISCTEST
,
4864 if (!err
&& !(val
& (1 << 10))) {
4865 tg3_phy_auxctl_write(tp
,
4866 MII_TG3_AUXCTL_SHDWSEL_MISCTEST
,
4873 for (i
= 0; i
< 100; i
++) {
4874 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4875 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
4876 (bmsr
& BMSR_LSTATUS
))
4881 if (bmsr
& BMSR_LSTATUS
) {
4884 tg3_readphy(tp
, MII_TG3_AUX_STAT
, &aux_stat
);
4885 for (i
= 0; i
< 2000; i
++) {
4887 if (!tg3_readphy(tp
, MII_TG3_AUX_STAT
, &aux_stat
) &&
4892 tg3_aux_stat_to_speed_duplex(tp
, aux_stat
,
4897 for (i
= 0; i
< 200; i
++) {
4898 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
4899 if (tg3_readphy(tp
, MII_BMCR
, &bmcr
))
4901 if (bmcr
&& bmcr
!= 0x7fff)
4909 tp
->link_config
.active_speed
= current_speed
;
4910 tp
->link_config
.active_duplex
= current_duplex
;
4912 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) {
4913 bool eee_config_ok
= tg3_phy_eee_config_ok(tp
);
4915 if ((bmcr
& BMCR_ANENABLE
) &&
4917 tg3_phy_copper_an_config_ok(tp
, &lcl_adv
) &&
4918 tg3_phy_copper_fetch_rmtadv(tp
, &rmt_adv
))
4919 current_link_up
= true;
4921 /* EEE settings changes take effect only after a phy
4922 * reset. If we have skipped a reset due to Link Flap
4923 * Avoidance being enabled, do it now.
4925 if (!eee_config_ok
&&
4926 (tp
->phy_flags
& TG3_PHYFLG_KEEP_LINK_ON_PWRDN
) &&
4932 if (!(bmcr
& BMCR_ANENABLE
) &&
4933 tp
->link_config
.speed
== current_speed
&&
4934 tp
->link_config
.duplex
== current_duplex
) {
4935 current_link_up
= true;
4939 if (current_link_up
&&
4940 tp
->link_config
.active_duplex
== DUPLEX_FULL
) {
4943 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
4944 reg
= MII_TG3_FET_GEN_STAT
;
4945 bit
= MII_TG3_FET_GEN_STAT_MDIXSTAT
;
4947 reg
= MII_TG3_EXT_STAT
;
4948 bit
= MII_TG3_EXT_STAT_MDIX
;
4951 if (!tg3_readphy(tp
, reg
, &val
) && (val
& bit
))
4952 tp
->phy_flags
|= TG3_PHYFLG_MDIX_STATE
;
4954 tg3_setup_flow_control(tp
, lcl_adv
, rmt_adv
);
4959 if (!current_link_up
|| (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)) {
4960 tg3_phy_copper_begin(tp
);
4962 if (tg3_flag(tp
, ROBOSWITCH
)) {
4963 current_link_up
= true;
4964 /* FIXME: when BCM5325 switch is used use 100 MBit/s */
4965 current_speed
= SPEED_1000
;
4966 current_duplex
= DUPLEX_FULL
;
4967 tp
->link_config
.active_speed
= current_speed
;
4968 tp
->link_config
.active_duplex
= current_duplex
;
4971 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4972 if ((!tg3_readphy(tp
, MII_BMSR
, &bmsr
) && (bmsr
& BMSR_LSTATUS
)) ||
4973 (tp
->mac_mode
& MAC_MODE_PORT_INT_LPBACK
))
4974 current_link_up
= true;
4977 tp
->mac_mode
&= ~MAC_MODE_PORT_MODE_MASK
;
4978 if (current_link_up
) {
4979 if (tp
->link_config
.active_speed
== SPEED_100
||
4980 tp
->link_config
.active_speed
== SPEED_10
)
4981 tp
->mac_mode
|= MAC_MODE_PORT_MODE_MII
;
4983 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
4984 } else if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
)
4985 tp
->mac_mode
|= MAC_MODE_PORT_MODE_MII
;
4987 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
4989 /* In order for the 5750 core in BCM4785 chip to work properly
4990 * in RGMII mode, the Led Control Register must be set up.
4992 if (tg3_flag(tp
, RGMII_MODE
)) {
4993 u32 led_ctrl
= tr32(MAC_LED_CTRL
);
4994 led_ctrl
&= ~(LED_CTRL_1000MBPS_ON
| LED_CTRL_100MBPS_ON
);
4996 if (tp
->link_config
.active_speed
== SPEED_10
)
4997 led_ctrl
|= LED_CTRL_LNKLED_OVERRIDE
;
4998 else if (tp
->link_config
.active_speed
== SPEED_100
)
4999 led_ctrl
|= (LED_CTRL_LNKLED_OVERRIDE
|
5000 LED_CTRL_100MBPS_ON
);
5001 else if (tp
->link_config
.active_speed
== SPEED_1000
)
5002 led_ctrl
|= (LED_CTRL_LNKLED_OVERRIDE
|
5003 LED_CTRL_1000MBPS_ON
);
5005 tw32(MAC_LED_CTRL
, led_ctrl
);
5009 tp
->mac_mode
&= ~MAC_MODE_HALF_DUPLEX
;
5010 if (tp
->link_config
.active_duplex
== DUPLEX_HALF
)
5011 tp
->mac_mode
|= MAC_MODE_HALF_DUPLEX
;
5013 if (tg3_asic_rev(tp
) == ASIC_REV_5700
) {
5014 if (current_link_up
&&
5015 tg3_5700_link_polarity(tp
, tp
->link_config
.active_speed
))
5016 tp
->mac_mode
|= MAC_MODE_LINK_POLARITY
;
5018 tp
->mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
5021 /* ??? Without this setting Netgear GA302T PHY does not
5022 * ??? send/receive packets...
5024 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5411
&&
5025 tg3_chip_rev_id(tp
) == CHIPREV_ID_5700_ALTIMA
) {
5026 tp
->mi_mode
|= MAC_MI_MODE_AUTO_POLL
;
5027 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
5031 tw32_f(MAC_MODE
, tp
->mac_mode
);
5034 tg3_phy_eee_adjust(tp
, current_link_up
);
5036 if (tg3_flag(tp
, USE_LINKCHG_REG
)) {
5037 /* Polled via timer. */
5038 tw32_f(MAC_EVENT
, 0);
5040 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
5044 if (tg3_asic_rev(tp
) == ASIC_REV_5700
&&
5046 tp
->link_config
.active_speed
== SPEED_1000
&&
5047 (tg3_flag(tp
, PCIX_MODE
) || tg3_flag(tp
, PCI_HIGH_SPEED
))) {
5050 (MAC_STATUS_SYNC_CHANGED
|
5051 MAC_STATUS_CFG_CHANGED
));
5054 NIC_SRAM_FIRMWARE_MBOX
,
5055 NIC_SRAM_FIRMWARE_MBOX_MAGIC2
);
5058 /* Prevent send BD corruption. */
5059 if (tg3_flag(tp
, CLKREQ_BUG
)) {
5060 if (tp
->link_config
.active_speed
== SPEED_100
||
5061 tp
->link_config
.active_speed
== SPEED_10
)
5062 pcie_capability_clear_word(tp
->pdev
, PCI_EXP_LNKCTL
,
5063 PCI_EXP_LNKCTL_CLKREQ_EN
);
5065 pcie_capability_set_word(tp
->pdev
, PCI_EXP_LNKCTL
,
5066 PCI_EXP_LNKCTL_CLKREQ_EN
);
5069 tg3_test_and_report_link_chg(tp
, current_link_up
);
5074 struct tg3_fiber_aneginfo
{
5076 #define ANEG_STATE_UNKNOWN 0
5077 #define ANEG_STATE_AN_ENABLE 1
5078 #define ANEG_STATE_RESTART_INIT 2
5079 #define ANEG_STATE_RESTART 3
5080 #define ANEG_STATE_DISABLE_LINK_OK 4
5081 #define ANEG_STATE_ABILITY_DETECT_INIT 5
5082 #define ANEG_STATE_ABILITY_DETECT 6
5083 #define ANEG_STATE_ACK_DETECT_INIT 7
5084 #define ANEG_STATE_ACK_DETECT 8
5085 #define ANEG_STATE_COMPLETE_ACK_INIT 9
5086 #define ANEG_STATE_COMPLETE_ACK 10
5087 #define ANEG_STATE_IDLE_DETECT_INIT 11
5088 #define ANEG_STATE_IDLE_DETECT 12
5089 #define ANEG_STATE_LINK_OK 13
5090 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
5091 #define ANEG_STATE_NEXT_PAGE_WAIT 15
5094 #define MR_AN_ENABLE 0x00000001
5095 #define MR_RESTART_AN 0x00000002
5096 #define MR_AN_COMPLETE 0x00000004
5097 #define MR_PAGE_RX 0x00000008
5098 #define MR_NP_LOADED 0x00000010
5099 #define MR_TOGGLE_TX 0x00000020
5100 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
5101 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
5102 #define MR_LP_ADV_SYM_PAUSE 0x00000100
5103 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
5104 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
5105 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
5106 #define MR_LP_ADV_NEXT_PAGE 0x00001000
5107 #define MR_TOGGLE_RX 0x00002000
5108 #define MR_NP_RX 0x00004000
5110 #define MR_LINK_OK 0x80000000
5112 unsigned long link_time
, cur_time
;
5114 u32 ability_match_cfg
;
5115 int ability_match_count
;
5117 char ability_match
, idle_match
, ack_match
;
5119 u32 txconfig
, rxconfig
;
5120 #define ANEG_CFG_NP 0x00000080
5121 #define ANEG_CFG_ACK 0x00000040
5122 #define ANEG_CFG_RF2 0x00000020
5123 #define ANEG_CFG_RF1 0x00000010
5124 #define ANEG_CFG_PS2 0x00000001
5125 #define ANEG_CFG_PS1 0x00008000
5126 #define ANEG_CFG_HD 0x00004000
5127 #define ANEG_CFG_FD 0x00002000
5128 #define ANEG_CFG_INVAL 0x00001f06
5133 #define ANEG_TIMER_ENAB 2
5134 #define ANEG_FAILED -1
5136 #define ANEG_STATE_SETTLE_TIME 10000
5138 static int tg3_fiber_aneg_smachine(struct tg3
*tp
,
5139 struct tg3_fiber_aneginfo
*ap
)
5142 unsigned long delta
;
5146 if (ap
->state
== ANEG_STATE_UNKNOWN
) {
5150 ap
->ability_match_cfg
= 0;
5151 ap
->ability_match_count
= 0;
5152 ap
->ability_match
= 0;
5158 if (tr32(MAC_STATUS
) & MAC_STATUS_RCVD_CFG
) {
5159 rx_cfg_reg
= tr32(MAC_RX_AUTO_NEG
);
5161 if (rx_cfg_reg
!= ap
->ability_match_cfg
) {
5162 ap
->ability_match_cfg
= rx_cfg_reg
;
5163 ap
->ability_match
= 0;
5164 ap
->ability_match_count
= 0;
5166 if (++ap
->ability_match_count
> 1) {
5167 ap
->ability_match
= 1;
5168 ap
->ability_match_cfg
= rx_cfg_reg
;
5171 if (rx_cfg_reg
& ANEG_CFG_ACK
)
5179 ap
->ability_match_cfg
= 0;
5180 ap
->ability_match_count
= 0;
5181 ap
->ability_match
= 0;
5187 ap
->rxconfig
= rx_cfg_reg
;
5190 switch (ap
->state
) {
5191 case ANEG_STATE_UNKNOWN
:
5192 if (ap
->flags
& (MR_AN_ENABLE
| MR_RESTART_AN
))
5193 ap
->state
= ANEG_STATE_AN_ENABLE
;
5196 case ANEG_STATE_AN_ENABLE
:
5197 ap
->flags
&= ~(MR_AN_COMPLETE
| MR_PAGE_RX
);
5198 if (ap
->flags
& MR_AN_ENABLE
) {
5201 ap
->ability_match_cfg
= 0;
5202 ap
->ability_match_count
= 0;
5203 ap
->ability_match
= 0;
5207 ap
->state
= ANEG_STATE_RESTART_INIT
;
5209 ap
->state
= ANEG_STATE_DISABLE_LINK_OK
;
5213 case ANEG_STATE_RESTART_INIT
:
5214 ap
->link_time
= ap
->cur_time
;
5215 ap
->flags
&= ~(MR_NP_LOADED
);
5217 tw32(MAC_TX_AUTO_NEG
, 0);
5218 tp
->mac_mode
|= MAC_MODE_SEND_CONFIGS
;
5219 tw32_f(MAC_MODE
, tp
->mac_mode
);
5222 ret
= ANEG_TIMER_ENAB
;
5223 ap
->state
= ANEG_STATE_RESTART
;
5226 case ANEG_STATE_RESTART
:
5227 delta
= ap
->cur_time
- ap
->link_time
;
5228 if (delta
> ANEG_STATE_SETTLE_TIME
)
5229 ap
->state
= ANEG_STATE_ABILITY_DETECT_INIT
;
5231 ret
= ANEG_TIMER_ENAB
;
5234 case ANEG_STATE_DISABLE_LINK_OK
:
5238 case ANEG_STATE_ABILITY_DETECT_INIT
:
5239 ap
->flags
&= ~(MR_TOGGLE_TX
);
5240 ap
->txconfig
= ANEG_CFG_FD
;
5241 flowctrl
= tg3_advert_flowctrl_1000X(tp
->link_config
.flowctrl
);
5242 if (flowctrl
& ADVERTISE_1000XPAUSE
)
5243 ap
->txconfig
|= ANEG_CFG_PS1
;
5244 if (flowctrl
& ADVERTISE_1000XPSE_ASYM
)
5245 ap
->txconfig
|= ANEG_CFG_PS2
;
5246 tw32(MAC_TX_AUTO_NEG
, ap
->txconfig
);
5247 tp
->mac_mode
|= MAC_MODE_SEND_CONFIGS
;
5248 tw32_f(MAC_MODE
, tp
->mac_mode
);
5251 ap
->state
= ANEG_STATE_ABILITY_DETECT
;
5254 case ANEG_STATE_ABILITY_DETECT
:
5255 if (ap
->ability_match
!= 0 && ap
->rxconfig
!= 0)
5256 ap
->state
= ANEG_STATE_ACK_DETECT_INIT
;
5259 case ANEG_STATE_ACK_DETECT_INIT
:
5260 ap
->txconfig
|= ANEG_CFG_ACK
;
5261 tw32(MAC_TX_AUTO_NEG
, ap
->txconfig
);
5262 tp
->mac_mode
|= MAC_MODE_SEND_CONFIGS
;
5263 tw32_f(MAC_MODE
, tp
->mac_mode
);
5266 ap
->state
= ANEG_STATE_ACK_DETECT
;
5269 case ANEG_STATE_ACK_DETECT
:
5270 if (ap
->ack_match
!= 0) {
5271 if ((ap
->rxconfig
& ~ANEG_CFG_ACK
) ==
5272 (ap
->ability_match_cfg
& ~ANEG_CFG_ACK
)) {
5273 ap
->state
= ANEG_STATE_COMPLETE_ACK_INIT
;
5275 ap
->state
= ANEG_STATE_AN_ENABLE
;
5277 } else if (ap
->ability_match
!= 0 &&
5278 ap
->rxconfig
== 0) {
5279 ap
->state
= ANEG_STATE_AN_ENABLE
;
5283 case ANEG_STATE_COMPLETE_ACK_INIT
:
5284 if (ap
->rxconfig
& ANEG_CFG_INVAL
) {
5288 ap
->flags
&= ~(MR_LP_ADV_FULL_DUPLEX
|
5289 MR_LP_ADV_HALF_DUPLEX
|
5290 MR_LP_ADV_SYM_PAUSE
|
5291 MR_LP_ADV_ASYM_PAUSE
|
5292 MR_LP_ADV_REMOTE_FAULT1
|
5293 MR_LP_ADV_REMOTE_FAULT2
|
5294 MR_LP_ADV_NEXT_PAGE
|
5297 if (ap
->rxconfig
& ANEG_CFG_FD
)
5298 ap
->flags
|= MR_LP_ADV_FULL_DUPLEX
;
5299 if (ap
->rxconfig
& ANEG_CFG_HD
)
5300 ap
->flags
|= MR_LP_ADV_HALF_DUPLEX
;
5301 if (ap
->rxconfig
& ANEG_CFG_PS1
)
5302 ap
->flags
|= MR_LP_ADV_SYM_PAUSE
;
5303 if (ap
->rxconfig
& ANEG_CFG_PS2
)
5304 ap
->flags
|= MR_LP_ADV_ASYM_PAUSE
;
5305 if (ap
->rxconfig
& ANEG_CFG_RF1
)
5306 ap
->flags
|= MR_LP_ADV_REMOTE_FAULT1
;
5307 if (ap
->rxconfig
& ANEG_CFG_RF2
)
5308 ap
->flags
|= MR_LP_ADV_REMOTE_FAULT2
;
5309 if (ap
->rxconfig
& ANEG_CFG_NP
)
5310 ap
->flags
|= MR_LP_ADV_NEXT_PAGE
;
5312 ap
->link_time
= ap
->cur_time
;
5314 ap
->flags
^= (MR_TOGGLE_TX
);
5315 if (ap
->rxconfig
& 0x0008)
5316 ap
->flags
|= MR_TOGGLE_RX
;
5317 if (ap
->rxconfig
& ANEG_CFG_NP
)
5318 ap
->flags
|= MR_NP_RX
;
5319 ap
->flags
|= MR_PAGE_RX
;
5321 ap
->state
= ANEG_STATE_COMPLETE_ACK
;
5322 ret
= ANEG_TIMER_ENAB
;
5325 case ANEG_STATE_COMPLETE_ACK
:
5326 if (ap
->ability_match
!= 0 &&
5327 ap
->rxconfig
== 0) {
5328 ap
->state
= ANEG_STATE_AN_ENABLE
;
5331 delta
= ap
->cur_time
- ap
->link_time
;
5332 if (delta
> ANEG_STATE_SETTLE_TIME
) {
5333 if (!(ap
->flags
& (MR_LP_ADV_NEXT_PAGE
))) {
5334 ap
->state
= ANEG_STATE_IDLE_DETECT_INIT
;
5336 if ((ap
->txconfig
& ANEG_CFG_NP
) == 0 &&
5337 !(ap
->flags
& MR_NP_RX
)) {
5338 ap
->state
= ANEG_STATE_IDLE_DETECT_INIT
;
5346 case ANEG_STATE_IDLE_DETECT_INIT
:
5347 ap
->link_time
= ap
->cur_time
;
5348 tp
->mac_mode
&= ~MAC_MODE_SEND_CONFIGS
;
5349 tw32_f(MAC_MODE
, tp
->mac_mode
);
5352 ap
->state
= ANEG_STATE_IDLE_DETECT
;
5353 ret
= ANEG_TIMER_ENAB
;
5356 case ANEG_STATE_IDLE_DETECT
:
5357 if (ap
->ability_match
!= 0 &&
5358 ap
->rxconfig
== 0) {
5359 ap
->state
= ANEG_STATE_AN_ENABLE
;
5362 delta
= ap
->cur_time
- ap
->link_time
;
5363 if (delta
> ANEG_STATE_SETTLE_TIME
) {
5364 /* XXX another gem from the Broadcom driver :( */
5365 ap
->state
= ANEG_STATE_LINK_OK
;
5369 case ANEG_STATE_LINK_OK
:
5370 ap
->flags
|= (MR_AN_COMPLETE
| MR_LINK_OK
);
5374 case ANEG_STATE_NEXT_PAGE_WAIT_INIT
:
5375 /* ??? unimplemented */
5378 case ANEG_STATE_NEXT_PAGE_WAIT
:
5379 /* ??? unimplemented */
5390 static int fiber_autoneg(struct tg3
*tp
, u32
*txflags
, u32
*rxflags
)
5393 struct tg3_fiber_aneginfo aninfo
;
5394 int status
= ANEG_FAILED
;
5398 tw32_f(MAC_TX_AUTO_NEG
, 0);
5400 tmp
= tp
->mac_mode
& ~MAC_MODE_PORT_MODE_MASK
;
5401 tw32_f(MAC_MODE
, tmp
| MAC_MODE_PORT_MODE_GMII
);
5404 tw32_f(MAC_MODE
, tp
->mac_mode
| MAC_MODE_SEND_CONFIGS
);
5407 memset(&aninfo
, 0, sizeof(aninfo
));
5408 aninfo
.flags
|= MR_AN_ENABLE
;
5409 aninfo
.state
= ANEG_STATE_UNKNOWN
;
5410 aninfo
.cur_time
= 0;
5412 while (++tick
< 195000) {
5413 status
= tg3_fiber_aneg_smachine(tp
, &aninfo
);
5414 if (status
== ANEG_DONE
|| status
== ANEG_FAILED
)
5420 tp
->mac_mode
&= ~MAC_MODE_SEND_CONFIGS
;
5421 tw32_f(MAC_MODE
, tp
->mac_mode
);
5424 *txflags
= aninfo
.txconfig
;
5425 *rxflags
= aninfo
.flags
;
5427 if (status
== ANEG_DONE
&&
5428 (aninfo
.flags
& (MR_AN_COMPLETE
| MR_LINK_OK
|
5429 MR_LP_ADV_FULL_DUPLEX
)))
5435 static void tg3_init_bcm8002(struct tg3
*tp
)
5437 u32 mac_status
= tr32(MAC_STATUS
);
5440 /* Reset when initting first time or we have a link. */
5441 if (tg3_flag(tp
, INIT_COMPLETE
) &&
5442 !(mac_status
& MAC_STATUS_PCS_SYNCED
))
5445 /* Set PLL lock range. */
5446 tg3_writephy(tp
, 0x16, 0x8007);
5449 tg3_writephy(tp
, MII_BMCR
, BMCR_RESET
);
5451 /* Wait for reset to complete. */
5452 /* XXX schedule_timeout() ... */
5453 for (i
= 0; i
< 500; i
++)
5456 /* Config mode; select PMA/Ch 1 regs. */
5457 tg3_writephy(tp
, 0x10, 0x8411);
5459 /* Enable auto-lock and comdet, select txclk for tx. */
5460 tg3_writephy(tp
, 0x11, 0x0a10);
5462 tg3_writephy(tp
, 0x18, 0x00a0);
5463 tg3_writephy(tp
, 0x16, 0x41ff);
5465 /* Assert and deassert POR. */
5466 tg3_writephy(tp
, 0x13, 0x0400);
5468 tg3_writephy(tp
, 0x13, 0x0000);
5470 tg3_writephy(tp
, 0x11, 0x0a50);
5472 tg3_writephy(tp
, 0x11, 0x0a10);
5474 /* Wait for signal to stabilize */
5475 /* XXX schedule_timeout() ... */
5476 for (i
= 0; i
< 15000; i
++)
5479 /* Deselect the channel register so we can read the PHYID
5482 tg3_writephy(tp
, 0x10, 0x8011);
5485 static bool tg3_setup_fiber_hw_autoneg(struct tg3
*tp
, u32 mac_status
)
5488 bool current_link_up
;
5489 u32 sg_dig_ctrl
, sg_dig_status
;
5490 u32 serdes_cfg
, expected_sg_dig_ctrl
;
5491 int workaround
, port_a
;
5494 expected_sg_dig_ctrl
= 0;
5497 current_link_up
= false;
5499 if (tg3_chip_rev_id(tp
) != CHIPREV_ID_5704_A0
&&
5500 tg3_chip_rev_id(tp
) != CHIPREV_ID_5704_A1
) {
5502 if (tr32(TG3PCI_DUAL_MAC_CTRL
) & DUAL_MAC_CTRL_ID
)
5505 /* preserve bits 0-11,13,14 for signal pre-emphasis */
5506 /* preserve bits 20-23 for voltage regulator */
5507 serdes_cfg
= tr32(MAC_SERDES_CFG
) & 0x00f06fff;
5510 sg_dig_ctrl
= tr32(SG_DIG_CTRL
);
5512 if (tp
->link_config
.autoneg
!= AUTONEG_ENABLE
) {
5513 if (sg_dig_ctrl
& SG_DIG_USING_HW_AUTONEG
) {
5515 u32 val
= serdes_cfg
;
5521 tw32_f(MAC_SERDES_CFG
, val
);
5524 tw32_f(SG_DIG_CTRL
, SG_DIG_COMMON_SETUP
);
5526 if (mac_status
& MAC_STATUS_PCS_SYNCED
) {
5527 tg3_setup_flow_control(tp
, 0, 0);
5528 current_link_up
= true;
5533 /* Want auto-negotiation. */
5534 expected_sg_dig_ctrl
= SG_DIG_USING_HW_AUTONEG
| SG_DIG_COMMON_SETUP
;
5536 flowctrl
= tg3_advert_flowctrl_1000X(tp
->link_config
.flowctrl
);
5537 if (flowctrl
& ADVERTISE_1000XPAUSE
)
5538 expected_sg_dig_ctrl
|= SG_DIG_PAUSE_CAP
;
5539 if (flowctrl
& ADVERTISE_1000XPSE_ASYM
)
5540 expected_sg_dig_ctrl
|= SG_DIG_ASYM_PAUSE
;
5542 if (sg_dig_ctrl
!= expected_sg_dig_ctrl
) {
5543 if ((tp
->phy_flags
& TG3_PHYFLG_PARALLEL_DETECT
) &&
5544 tp
->serdes_counter
&&
5545 ((mac_status
& (MAC_STATUS_PCS_SYNCED
|
5546 MAC_STATUS_RCVD_CFG
)) ==
5547 MAC_STATUS_PCS_SYNCED
)) {
5548 tp
->serdes_counter
--;
5549 current_link_up
= true;
5554 tw32_f(MAC_SERDES_CFG
, serdes_cfg
| 0xc011000);
5555 tw32_f(SG_DIG_CTRL
, expected_sg_dig_ctrl
| SG_DIG_SOFT_RESET
);
5557 tw32_f(SG_DIG_CTRL
, expected_sg_dig_ctrl
);
5559 tp
->serdes_counter
= SERDES_AN_TIMEOUT_5704S
;
5560 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
5561 } else if (mac_status
& (MAC_STATUS_PCS_SYNCED
|
5562 MAC_STATUS_SIGNAL_DET
)) {
5563 sg_dig_status
= tr32(SG_DIG_STATUS
);
5564 mac_status
= tr32(MAC_STATUS
);
5566 if ((sg_dig_status
& SG_DIG_AUTONEG_COMPLETE
) &&
5567 (mac_status
& MAC_STATUS_PCS_SYNCED
)) {
5568 u32 local_adv
= 0, remote_adv
= 0;
5570 if (sg_dig_ctrl
& SG_DIG_PAUSE_CAP
)
5571 local_adv
|= ADVERTISE_1000XPAUSE
;
5572 if (sg_dig_ctrl
& SG_DIG_ASYM_PAUSE
)
5573 local_adv
|= ADVERTISE_1000XPSE_ASYM
;
5575 if (sg_dig_status
& SG_DIG_PARTNER_PAUSE_CAPABLE
)
5576 remote_adv
|= LPA_1000XPAUSE
;
5577 if (sg_dig_status
& SG_DIG_PARTNER_ASYM_PAUSE
)
5578 remote_adv
|= LPA_1000XPAUSE_ASYM
;
5580 tp
->link_config
.rmt_adv
=
5581 mii_adv_to_ethtool_adv_x(remote_adv
);
5583 tg3_setup_flow_control(tp
, local_adv
, remote_adv
);
5584 current_link_up
= true;
5585 tp
->serdes_counter
= 0;
5586 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
5587 } else if (!(sg_dig_status
& SG_DIG_AUTONEG_COMPLETE
)) {
5588 if (tp
->serdes_counter
)
5589 tp
->serdes_counter
--;
5592 u32 val
= serdes_cfg
;
5599 tw32_f(MAC_SERDES_CFG
, val
);
5602 tw32_f(SG_DIG_CTRL
, SG_DIG_COMMON_SETUP
);
5605 /* Link parallel detection - link is up */
5606 /* only if we have PCS_SYNC and not */
5607 /* receiving config code words */
5608 mac_status
= tr32(MAC_STATUS
);
5609 if ((mac_status
& MAC_STATUS_PCS_SYNCED
) &&
5610 !(mac_status
& MAC_STATUS_RCVD_CFG
)) {
5611 tg3_setup_flow_control(tp
, 0, 0);
5612 current_link_up
= true;
5614 TG3_PHYFLG_PARALLEL_DETECT
;
5615 tp
->serdes_counter
=
5616 SERDES_PARALLEL_DET_TIMEOUT
;
5618 goto restart_autoneg
;
5622 tp
->serdes_counter
= SERDES_AN_TIMEOUT_5704S
;
5623 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
5627 return current_link_up
;
5630 static bool tg3_setup_fiber_by_hand(struct tg3
*tp
, u32 mac_status
)
5632 bool current_link_up
= false;
5634 if (!(mac_status
& MAC_STATUS_PCS_SYNCED
))
5637 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) {
5638 u32 txflags
, rxflags
;
5641 if (fiber_autoneg(tp
, &txflags
, &rxflags
)) {
5642 u32 local_adv
= 0, remote_adv
= 0;
5644 if (txflags
& ANEG_CFG_PS1
)
5645 local_adv
|= ADVERTISE_1000XPAUSE
;
5646 if (txflags
& ANEG_CFG_PS2
)
5647 local_adv
|= ADVERTISE_1000XPSE_ASYM
;
5649 if (rxflags
& MR_LP_ADV_SYM_PAUSE
)
5650 remote_adv
|= LPA_1000XPAUSE
;
5651 if (rxflags
& MR_LP_ADV_ASYM_PAUSE
)
5652 remote_adv
|= LPA_1000XPAUSE_ASYM
;
5654 tp
->link_config
.rmt_adv
=
5655 mii_adv_to_ethtool_adv_x(remote_adv
);
5657 tg3_setup_flow_control(tp
, local_adv
, remote_adv
);
5659 current_link_up
= true;
5661 for (i
= 0; i
< 30; i
++) {
5664 (MAC_STATUS_SYNC_CHANGED
|
5665 MAC_STATUS_CFG_CHANGED
));
5667 if ((tr32(MAC_STATUS
) &
5668 (MAC_STATUS_SYNC_CHANGED
|
5669 MAC_STATUS_CFG_CHANGED
)) == 0)
5673 mac_status
= tr32(MAC_STATUS
);
5674 if (!current_link_up
&&
5675 (mac_status
& MAC_STATUS_PCS_SYNCED
) &&
5676 !(mac_status
& MAC_STATUS_RCVD_CFG
))
5677 current_link_up
= true;
5679 tg3_setup_flow_control(tp
, 0, 0);
5681 /* Forcing 1000FD link up. */
5682 current_link_up
= true;
5684 tw32_f(MAC_MODE
, (tp
->mac_mode
| MAC_MODE_SEND_CONFIGS
));
5687 tw32_f(MAC_MODE
, tp
->mac_mode
);
5692 return current_link_up
;
5695 static int tg3_setup_fiber_phy(struct tg3
*tp
, bool force_reset
)
5698 u16 orig_active_speed
;
5699 u8 orig_active_duplex
;
5701 bool current_link_up
;
5704 orig_pause_cfg
= tp
->link_config
.active_flowctrl
;
5705 orig_active_speed
= tp
->link_config
.active_speed
;
5706 orig_active_duplex
= tp
->link_config
.active_duplex
;
5708 if (!tg3_flag(tp
, HW_AUTONEG
) &&
5710 tg3_flag(tp
, INIT_COMPLETE
)) {
5711 mac_status
= tr32(MAC_STATUS
);
5712 mac_status
&= (MAC_STATUS_PCS_SYNCED
|
5713 MAC_STATUS_SIGNAL_DET
|
5714 MAC_STATUS_CFG_CHANGED
|
5715 MAC_STATUS_RCVD_CFG
);
5716 if (mac_status
== (MAC_STATUS_PCS_SYNCED
|
5717 MAC_STATUS_SIGNAL_DET
)) {
5718 tw32_f(MAC_STATUS
, (MAC_STATUS_SYNC_CHANGED
|
5719 MAC_STATUS_CFG_CHANGED
));
5724 tw32_f(MAC_TX_AUTO_NEG
, 0);
5726 tp
->mac_mode
&= ~(MAC_MODE_PORT_MODE_MASK
| MAC_MODE_HALF_DUPLEX
);
5727 tp
->mac_mode
|= MAC_MODE_PORT_MODE_TBI
;
5728 tw32_f(MAC_MODE
, tp
->mac_mode
);
5731 if (tp
->phy_id
== TG3_PHY_ID_BCM8002
)
5732 tg3_init_bcm8002(tp
);
5734 /* Enable link change event even when serdes polling. */
5735 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
5738 current_link_up
= false;
5739 tp
->link_config
.rmt_adv
= 0;
5740 mac_status
= tr32(MAC_STATUS
);
5742 if (tg3_flag(tp
, HW_AUTONEG
))
5743 current_link_up
= tg3_setup_fiber_hw_autoneg(tp
, mac_status
);
5745 current_link_up
= tg3_setup_fiber_by_hand(tp
, mac_status
);
5747 tp
->napi
[0].hw_status
->status
=
5748 (SD_STATUS_UPDATED
|
5749 (tp
->napi
[0].hw_status
->status
& ~SD_STATUS_LINK_CHG
));
5751 for (i
= 0; i
< 100; i
++) {
5752 tw32_f(MAC_STATUS
, (MAC_STATUS_SYNC_CHANGED
|
5753 MAC_STATUS_CFG_CHANGED
));
5755 if ((tr32(MAC_STATUS
) & (MAC_STATUS_SYNC_CHANGED
|
5756 MAC_STATUS_CFG_CHANGED
|
5757 MAC_STATUS_LNKSTATE_CHANGED
)) == 0)
5761 mac_status
= tr32(MAC_STATUS
);
5762 if ((mac_status
& MAC_STATUS_PCS_SYNCED
) == 0) {
5763 current_link_up
= false;
5764 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
&&
5765 tp
->serdes_counter
== 0) {
5766 tw32_f(MAC_MODE
, (tp
->mac_mode
|
5767 MAC_MODE_SEND_CONFIGS
));
5769 tw32_f(MAC_MODE
, tp
->mac_mode
);
5773 if (current_link_up
) {
5774 tp
->link_config
.active_speed
= SPEED_1000
;
5775 tp
->link_config
.active_duplex
= DUPLEX_FULL
;
5776 tw32(MAC_LED_CTRL
, (tp
->led_ctrl
|
5777 LED_CTRL_LNKLED_OVERRIDE
|
5778 LED_CTRL_1000MBPS_ON
));
5780 tp
->link_config
.active_speed
= SPEED_UNKNOWN
;
5781 tp
->link_config
.active_duplex
= DUPLEX_UNKNOWN
;
5782 tw32(MAC_LED_CTRL
, (tp
->led_ctrl
|
5783 LED_CTRL_LNKLED_OVERRIDE
|
5784 LED_CTRL_TRAFFIC_OVERRIDE
));
5787 if (!tg3_test_and_report_link_chg(tp
, current_link_up
)) {
5788 u32 now_pause_cfg
= tp
->link_config
.active_flowctrl
;
5789 if (orig_pause_cfg
!= now_pause_cfg
||
5790 orig_active_speed
!= tp
->link_config
.active_speed
||
5791 orig_active_duplex
!= tp
->link_config
.active_duplex
)
5792 tg3_link_report(tp
);
5798 static int tg3_setup_fiber_mii_phy(struct tg3
*tp
, bool force_reset
)
5802 u16 current_speed
= SPEED_UNKNOWN
;
5803 u8 current_duplex
= DUPLEX_UNKNOWN
;
5804 bool current_link_up
= false;
5805 u32 local_adv
, remote_adv
, sgsr
;
5807 if ((tg3_asic_rev(tp
) == ASIC_REV_5719
||
5808 tg3_asic_rev(tp
) == ASIC_REV_5720
) &&
5809 !tg3_readphy(tp
, SERDES_TG3_1000X_STATUS
, &sgsr
) &&
5810 (sgsr
& SERDES_TG3_SGMII_MODE
)) {
5815 tp
->mac_mode
&= ~MAC_MODE_PORT_MODE_MASK
;
5817 if (!(sgsr
& SERDES_TG3_LINK_UP
)) {
5818 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
5820 current_link_up
= true;
5821 if (sgsr
& SERDES_TG3_SPEED_1000
) {
5822 current_speed
= SPEED_1000
;
5823 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
5824 } else if (sgsr
& SERDES_TG3_SPEED_100
) {
5825 current_speed
= SPEED_100
;
5826 tp
->mac_mode
|= MAC_MODE_PORT_MODE_MII
;
5828 current_speed
= SPEED_10
;
5829 tp
->mac_mode
|= MAC_MODE_PORT_MODE_MII
;
5832 if (sgsr
& SERDES_TG3_FULL_DUPLEX
)
5833 current_duplex
= DUPLEX_FULL
;
5835 current_duplex
= DUPLEX_HALF
;
5838 tw32_f(MAC_MODE
, tp
->mac_mode
);
5841 tg3_clear_mac_status(tp
);
5843 goto fiber_setup_done
;
5846 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
5847 tw32_f(MAC_MODE
, tp
->mac_mode
);
5850 tg3_clear_mac_status(tp
);
5855 tp
->link_config
.rmt_adv
= 0;
5857 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
5858 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
5859 if (tg3_asic_rev(tp
) == ASIC_REV_5714
) {
5860 if (tr32(MAC_TX_STATUS
) & TX_STATUS_LINK_UP
)
5861 bmsr
|= BMSR_LSTATUS
;
5863 bmsr
&= ~BMSR_LSTATUS
;
5866 err
|= tg3_readphy(tp
, MII_BMCR
, &bmcr
);
5868 if ((tp
->link_config
.autoneg
== AUTONEG_ENABLE
) && !force_reset
&&
5869 (tp
->phy_flags
& TG3_PHYFLG_PARALLEL_DETECT
)) {
5870 /* do nothing, just check for link up at the end */
5871 } else if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) {
5874 err
|= tg3_readphy(tp
, MII_ADVERTISE
, &adv
);
5875 newadv
= adv
& ~(ADVERTISE_1000XFULL
| ADVERTISE_1000XHALF
|
5876 ADVERTISE_1000XPAUSE
|
5877 ADVERTISE_1000XPSE_ASYM
|
5880 newadv
|= tg3_advert_flowctrl_1000X(tp
->link_config
.flowctrl
);
5881 newadv
|= ethtool_adv_to_mii_adv_x(tp
->link_config
.advertising
);
5883 if ((newadv
!= adv
) || !(bmcr
& BMCR_ANENABLE
)) {
5884 tg3_writephy(tp
, MII_ADVERTISE
, newadv
);
5885 bmcr
|= BMCR_ANENABLE
| BMCR_ANRESTART
;
5886 tg3_writephy(tp
, MII_BMCR
, bmcr
);
5888 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
5889 tp
->serdes_counter
= SERDES_AN_TIMEOUT_5714S
;
5890 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
5897 bmcr
&= ~BMCR_SPEED1000
;
5898 new_bmcr
= bmcr
& ~(BMCR_ANENABLE
| BMCR_FULLDPLX
);
5900 if (tp
->link_config
.duplex
== DUPLEX_FULL
)
5901 new_bmcr
|= BMCR_FULLDPLX
;
5903 if (new_bmcr
!= bmcr
) {
5904 /* BMCR_SPEED1000 is a reserved bit that needs
5905 * to be set on write.
5907 new_bmcr
|= BMCR_SPEED1000
;
5909 /* Force a linkdown */
5913 err
|= tg3_readphy(tp
, MII_ADVERTISE
, &adv
);
5914 adv
&= ~(ADVERTISE_1000XFULL
|
5915 ADVERTISE_1000XHALF
|
5917 tg3_writephy(tp
, MII_ADVERTISE
, adv
);
5918 tg3_writephy(tp
, MII_BMCR
, bmcr
|
5922 tg3_carrier_off(tp
);
5924 tg3_writephy(tp
, MII_BMCR
, new_bmcr
);
5926 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
5927 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
5928 if (tg3_asic_rev(tp
) == ASIC_REV_5714
) {
5929 if (tr32(MAC_TX_STATUS
) & TX_STATUS_LINK_UP
)
5930 bmsr
|= BMSR_LSTATUS
;
5932 bmsr
&= ~BMSR_LSTATUS
;
5934 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
5938 if (bmsr
& BMSR_LSTATUS
) {
5939 current_speed
= SPEED_1000
;
5940 current_link_up
= true;
5941 if (bmcr
& BMCR_FULLDPLX
)
5942 current_duplex
= DUPLEX_FULL
;
5944 current_duplex
= DUPLEX_HALF
;
5949 if (bmcr
& BMCR_ANENABLE
) {
5952 err
|= tg3_readphy(tp
, MII_ADVERTISE
, &local_adv
);
5953 err
|= tg3_readphy(tp
, MII_LPA
, &remote_adv
);
5954 common
= local_adv
& remote_adv
;
5955 if (common
& (ADVERTISE_1000XHALF
|
5956 ADVERTISE_1000XFULL
)) {
5957 if (common
& ADVERTISE_1000XFULL
)
5958 current_duplex
= DUPLEX_FULL
;
5960 current_duplex
= DUPLEX_HALF
;
5962 tp
->link_config
.rmt_adv
=
5963 mii_adv_to_ethtool_adv_x(remote_adv
);
5964 } else if (!tg3_flag(tp
, 5780_CLASS
)) {
5965 /* Link is up via parallel detect */
5967 current_link_up
= false;
5973 if (current_link_up
&& current_duplex
== DUPLEX_FULL
)
5974 tg3_setup_flow_control(tp
, local_adv
, remote_adv
);
5976 tp
->mac_mode
&= ~MAC_MODE_HALF_DUPLEX
;
5977 if (tp
->link_config
.active_duplex
== DUPLEX_HALF
)
5978 tp
->mac_mode
|= MAC_MODE_HALF_DUPLEX
;
5980 tw32_f(MAC_MODE
, tp
->mac_mode
);
5983 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
5985 tp
->link_config
.active_speed
= current_speed
;
5986 tp
->link_config
.active_duplex
= current_duplex
;
5988 tg3_test_and_report_link_chg(tp
, current_link_up
);
5992 static void tg3_serdes_parallel_detect(struct tg3
*tp
)
5994 if (tp
->serdes_counter
) {
5995 /* Give autoneg time to complete. */
5996 tp
->serdes_counter
--;
6001 (tp
->link_config
.autoneg
== AUTONEG_ENABLE
)) {
6004 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
6005 if (bmcr
& BMCR_ANENABLE
) {
6008 /* Select shadow register 0x1f */
6009 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x7c00);
6010 tg3_readphy(tp
, MII_TG3_MISC_SHDW
, &phy1
);
6012 /* Select expansion interrupt status register */
6013 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
6014 MII_TG3_DSP_EXP1_INT_STAT
);
6015 tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &phy2
);
6016 tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &phy2
);
6018 if ((phy1
& 0x10) && !(phy2
& 0x20)) {
6019 /* We have signal detect and not receiving
6020 * config code words, link is up by parallel
6024 bmcr
&= ~BMCR_ANENABLE
;
6025 bmcr
|= BMCR_SPEED1000
| BMCR_FULLDPLX
;
6026 tg3_writephy(tp
, MII_BMCR
, bmcr
);
6027 tp
->phy_flags
|= TG3_PHYFLG_PARALLEL_DETECT
;
6030 } else if (tp
->link_up
&&
6031 (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) &&
6032 (tp
->phy_flags
& TG3_PHYFLG_PARALLEL_DETECT
)) {
6035 /* Select expansion interrupt status register */
6036 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
6037 MII_TG3_DSP_EXP1_INT_STAT
);
6038 tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &phy2
);
6042 /* Config code words received, turn on autoneg. */
6043 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
6044 tg3_writephy(tp
, MII_BMCR
, bmcr
| BMCR_ANENABLE
);
6046 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
6052 static int tg3_setup_phy(struct tg3
*tp
, bool force_reset
)
6057 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
6058 err
= tg3_setup_fiber_phy(tp
, force_reset
);
6059 else if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)
6060 err
= tg3_setup_fiber_mii_phy(tp
, force_reset
);
6062 err
= tg3_setup_copper_phy(tp
, force_reset
);
6064 if (tg3_chip_rev(tp
) == CHIPREV_5784_AX
) {
6067 val
= tr32(TG3_CPMU_CLCK_STAT
) & CPMU_CLCK_STAT_MAC_CLCK_MASK
;
6068 if (val
== CPMU_CLCK_STAT_MAC_CLCK_62_5
)
6070 else if (val
== CPMU_CLCK_STAT_MAC_CLCK_6_25
)
6075 val
= tr32(GRC_MISC_CFG
) & ~GRC_MISC_CFG_PRESCALAR_MASK
;
6076 val
|= (scale
<< GRC_MISC_CFG_PRESCALAR_SHIFT
);
6077 tw32(GRC_MISC_CFG
, val
);
6080 val
= (2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
6081 (6 << TX_LENGTHS_IPG_SHIFT
);
6082 if (tg3_asic_rev(tp
) == ASIC_REV_5720
||
6083 tg3_asic_rev(tp
) == ASIC_REV_5762
)
6084 val
|= tr32(MAC_TX_LENGTHS
) &
6085 (TX_LENGTHS_JMB_FRM_LEN_MSK
|
6086 TX_LENGTHS_CNT_DWN_VAL_MSK
);
6088 if (tp
->link_config
.active_speed
== SPEED_1000
&&
6089 tp
->link_config
.active_duplex
== DUPLEX_HALF
)
6090 tw32(MAC_TX_LENGTHS
, val
|
6091 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT
));
6093 tw32(MAC_TX_LENGTHS
, val
|
6094 (32 << TX_LENGTHS_SLOT_TIME_SHIFT
));
6096 if (!tg3_flag(tp
, 5705_PLUS
)) {
6098 tw32(HOSTCC_STAT_COAL_TICKS
,
6099 tp
->coal
.stats_block_coalesce_usecs
);
6101 tw32(HOSTCC_STAT_COAL_TICKS
, 0);
6105 if (tg3_flag(tp
, ASPM_WORKAROUND
)) {
6106 val
= tr32(PCIE_PWR_MGMT_THRESH
);
6108 val
= (val
& ~PCIE_PWR_MGMT_L1_THRESH_MSK
) |
6111 val
|= PCIE_PWR_MGMT_L1_THRESH_MSK
;
6112 tw32(PCIE_PWR_MGMT_THRESH
, val
);
6118 /* tp->lock must be held */
6119 static u64
tg3_refclk_read(struct tg3
*tp
)
6121 u64 stamp
= tr32(TG3_EAV_REF_CLCK_LSB
);
6122 return stamp
| (u64
)tr32(TG3_EAV_REF_CLCK_MSB
) << 32;
6125 /* tp->lock must be held */
6126 static void tg3_refclk_write(struct tg3
*tp
, u64 newval
)
6128 u32 clock_ctl
= tr32(TG3_EAV_REF_CLCK_CTL
);
6130 tw32(TG3_EAV_REF_CLCK_CTL
, clock_ctl
| TG3_EAV_REF_CLCK_CTL_STOP
);
6131 tw32(TG3_EAV_REF_CLCK_LSB
, newval
& 0xffffffff);
6132 tw32(TG3_EAV_REF_CLCK_MSB
, newval
>> 32);
6133 tw32_f(TG3_EAV_REF_CLCK_CTL
, clock_ctl
| TG3_EAV_REF_CLCK_CTL_RESUME
);
6136 static inline void tg3_full_lock(struct tg3
*tp
, int irq_sync
);
6137 static inline void tg3_full_unlock(struct tg3
*tp
);
6138 static int tg3_get_ts_info(struct net_device
*dev
, struct ethtool_ts_info
*info
)
6140 struct tg3
*tp
= netdev_priv(dev
);
6142 info
->so_timestamping
= SOF_TIMESTAMPING_TX_SOFTWARE
|
6143 SOF_TIMESTAMPING_RX_SOFTWARE
|
6144 SOF_TIMESTAMPING_SOFTWARE
;
6146 if (tg3_flag(tp
, PTP_CAPABLE
)) {
6147 info
->so_timestamping
|= SOF_TIMESTAMPING_TX_HARDWARE
|
6148 SOF_TIMESTAMPING_RX_HARDWARE
|
6149 SOF_TIMESTAMPING_RAW_HARDWARE
;
6153 info
->phc_index
= ptp_clock_index(tp
->ptp_clock
);
6155 info
->phc_index
= -1;
6157 info
->tx_types
= (1 << HWTSTAMP_TX_OFF
) | (1 << HWTSTAMP_TX_ON
);
6159 info
->rx_filters
= (1 << HWTSTAMP_FILTER_NONE
) |
6160 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT
) |
6161 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT
) |
6162 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT
);
6166 static int tg3_ptp_adjfreq(struct ptp_clock_info
*ptp
, s32 ppb
)
6168 struct tg3
*tp
= container_of(ptp
, struct tg3
, ptp_info
);
6169 bool neg_adj
= false;
6177 /* Frequency adjustment is performed using hardware with a 24 bit
6178 * accumulator and a programmable correction value. On each clk, the
6179 * correction value gets added to the accumulator and when it
6180 * overflows, the time counter is incremented/decremented.
6182 * So conversion from ppb to correction value is
6183 * ppb * (1 << 24) / 1000000000
6185 correction
= div_u64((u64
)ppb
* (1 << 24), 1000000000ULL) &
6186 TG3_EAV_REF_CLK_CORRECT_MASK
;
6188 tg3_full_lock(tp
, 0);
6191 tw32(TG3_EAV_REF_CLK_CORRECT_CTL
,
6192 TG3_EAV_REF_CLK_CORRECT_EN
|
6193 (neg_adj
? TG3_EAV_REF_CLK_CORRECT_NEG
: 0) | correction
);
6195 tw32(TG3_EAV_REF_CLK_CORRECT_CTL
, 0);
6197 tg3_full_unlock(tp
);
6202 static int tg3_ptp_adjtime(struct ptp_clock_info
*ptp
, s64 delta
)
6204 struct tg3
*tp
= container_of(ptp
, struct tg3
, ptp_info
);
6206 tg3_full_lock(tp
, 0);
6207 tp
->ptp_adjust
+= delta
;
6208 tg3_full_unlock(tp
);
6213 static int tg3_ptp_gettime(struct ptp_clock_info
*ptp
, struct timespec
*ts
)
6217 struct tg3
*tp
= container_of(ptp
, struct tg3
, ptp_info
);
6219 tg3_full_lock(tp
, 0);
6220 ns
= tg3_refclk_read(tp
);
6221 ns
+= tp
->ptp_adjust
;
6222 tg3_full_unlock(tp
);
6224 ts
->tv_sec
= div_u64_rem(ns
, 1000000000, &remainder
);
6225 ts
->tv_nsec
= remainder
;
6230 static int tg3_ptp_settime(struct ptp_clock_info
*ptp
,
6231 const struct timespec
*ts
)
6234 struct tg3
*tp
= container_of(ptp
, struct tg3
, ptp_info
);
6236 ns
= timespec_to_ns(ts
);
6238 tg3_full_lock(tp
, 0);
6239 tg3_refclk_write(tp
, ns
);
6241 tg3_full_unlock(tp
);
6246 static int tg3_ptp_enable(struct ptp_clock_info
*ptp
,
6247 struct ptp_clock_request
*rq
, int on
)
6249 struct tg3
*tp
= container_of(ptp
, struct tg3
, ptp_info
);
6254 case PTP_CLK_REQ_PEROUT
:
6255 if (rq
->perout
.index
!= 0)
6258 tg3_full_lock(tp
, 0);
6259 clock_ctl
= tr32(TG3_EAV_REF_CLCK_CTL
);
6260 clock_ctl
&= ~TG3_EAV_CTL_TSYNC_GPIO_MASK
;
6265 nsec
= rq
->perout
.start
.sec
* 1000000000ULL +
6266 rq
->perout
.start
.nsec
;
6268 if (rq
->perout
.period
.sec
|| rq
->perout
.period
.nsec
) {
6269 netdev_warn(tp
->dev
,
6270 "Device supports only a one-shot timesync output, period must be 0\n");
6275 if (nsec
& (1ULL << 63)) {
6276 netdev_warn(tp
->dev
,
6277 "Start value (nsec) is over limit. Maximum size of start is only 63 bits\n");
6282 tw32(TG3_EAV_WATCHDOG0_LSB
, (nsec
& 0xffffffff));
6283 tw32(TG3_EAV_WATCHDOG0_MSB
,
6284 TG3_EAV_WATCHDOG0_EN
|
6285 ((nsec
>> 32) & TG3_EAV_WATCHDOG_MSB_MASK
));
6287 tw32(TG3_EAV_REF_CLCK_CTL
,
6288 clock_ctl
| TG3_EAV_CTL_TSYNC_WDOG0
);
6290 tw32(TG3_EAV_WATCHDOG0_MSB
, 0);
6291 tw32(TG3_EAV_REF_CLCK_CTL
, clock_ctl
);
6295 tg3_full_unlock(tp
);
6305 static const struct ptp_clock_info tg3_ptp_caps
= {
6306 .owner
= THIS_MODULE
,
6307 .name
= "tg3 clock",
6308 .max_adj
= 250000000,
6313 .adjfreq
= tg3_ptp_adjfreq
,
6314 .adjtime
= tg3_ptp_adjtime
,
6315 .gettime
= tg3_ptp_gettime
,
6316 .settime
= tg3_ptp_settime
,
6317 .enable
= tg3_ptp_enable
,
6320 static void tg3_hwclock_to_timestamp(struct tg3
*tp
, u64 hwclock
,
6321 struct skb_shared_hwtstamps
*timestamp
)
6323 memset(timestamp
, 0, sizeof(struct skb_shared_hwtstamps
));
6324 timestamp
->hwtstamp
= ns_to_ktime((hwclock
& TG3_TSTAMP_MASK
) +
6328 /* tp->lock must be held */
6329 static void tg3_ptp_init(struct tg3
*tp
)
6331 if (!tg3_flag(tp
, PTP_CAPABLE
))
6334 /* Initialize the hardware clock to the system time. */
6335 tg3_refclk_write(tp
, ktime_to_ns(ktime_get_real()));
6337 tp
->ptp_info
= tg3_ptp_caps
;
6340 /* tp->lock must be held */
6341 static void tg3_ptp_resume(struct tg3
*tp
)
6343 if (!tg3_flag(tp
, PTP_CAPABLE
))
6346 tg3_refclk_write(tp
, ktime_to_ns(ktime_get_real()) + tp
->ptp_adjust
);
6350 static void tg3_ptp_fini(struct tg3
*tp
)
6352 if (!tg3_flag(tp
, PTP_CAPABLE
) || !tp
->ptp_clock
)
6355 ptp_clock_unregister(tp
->ptp_clock
);
6356 tp
->ptp_clock
= NULL
;
6360 static inline int tg3_irq_sync(struct tg3
*tp
)
6362 return tp
->irq_sync
;
6365 static inline void tg3_rd32_loop(struct tg3
*tp
, u32
*dst
, u32 off
, u32 len
)
6369 dst
= (u32
*)((u8
*)dst
+ off
);
6370 for (i
= 0; i
< len
; i
+= sizeof(u32
))
6371 *dst
++ = tr32(off
+ i
);
6374 static void tg3_dump_legacy_regs(struct tg3
*tp
, u32
*regs
)
6376 tg3_rd32_loop(tp
, regs
, TG3PCI_VENDOR
, 0xb0);
6377 tg3_rd32_loop(tp
, regs
, MAILBOX_INTERRUPT_0
, 0x200);
6378 tg3_rd32_loop(tp
, regs
, MAC_MODE
, 0x4f0);
6379 tg3_rd32_loop(tp
, regs
, SNDDATAI_MODE
, 0xe0);
6380 tg3_rd32_loop(tp
, regs
, SNDDATAC_MODE
, 0x04);
6381 tg3_rd32_loop(tp
, regs
, SNDBDS_MODE
, 0x80);
6382 tg3_rd32_loop(tp
, regs
, SNDBDI_MODE
, 0x48);
6383 tg3_rd32_loop(tp
, regs
, SNDBDC_MODE
, 0x04);
6384 tg3_rd32_loop(tp
, regs
, RCVLPC_MODE
, 0x20);
6385 tg3_rd32_loop(tp
, regs
, RCVLPC_SELLST_BASE
, 0x15c);
6386 tg3_rd32_loop(tp
, regs
, RCVDBDI_MODE
, 0x0c);
6387 tg3_rd32_loop(tp
, regs
, RCVDBDI_JUMBO_BD
, 0x3c);
6388 tg3_rd32_loop(tp
, regs
, RCVDBDI_BD_PROD_IDX_0
, 0x44);
6389 tg3_rd32_loop(tp
, regs
, RCVDCC_MODE
, 0x04);
6390 tg3_rd32_loop(tp
, regs
, RCVBDI_MODE
, 0x20);
6391 tg3_rd32_loop(tp
, regs
, RCVCC_MODE
, 0x14);
6392 tg3_rd32_loop(tp
, regs
, RCVLSC_MODE
, 0x08);
6393 tg3_rd32_loop(tp
, regs
, MBFREE_MODE
, 0x08);
6394 tg3_rd32_loop(tp
, regs
, HOSTCC_MODE
, 0x100);
6396 if (tg3_flag(tp
, SUPPORT_MSIX
))
6397 tg3_rd32_loop(tp
, regs
, HOSTCC_RXCOL_TICKS_VEC1
, 0x180);
6399 tg3_rd32_loop(tp
, regs
, MEMARB_MODE
, 0x10);
6400 tg3_rd32_loop(tp
, regs
, BUFMGR_MODE
, 0x58);
6401 tg3_rd32_loop(tp
, regs
, RDMAC_MODE
, 0x08);
6402 tg3_rd32_loop(tp
, regs
, WDMAC_MODE
, 0x08);
6403 tg3_rd32_loop(tp
, regs
, RX_CPU_MODE
, 0x04);
6404 tg3_rd32_loop(tp
, regs
, RX_CPU_STATE
, 0x04);
6405 tg3_rd32_loop(tp
, regs
, RX_CPU_PGMCTR
, 0x04);
6406 tg3_rd32_loop(tp
, regs
, RX_CPU_HWBKPT
, 0x04);
6408 if (!tg3_flag(tp
, 5705_PLUS
)) {
6409 tg3_rd32_loop(tp
, regs
, TX_CPU_MODE
, 0x04);
6410 tg3_rd32_loop(tp
, regs
, TX_CPU_STATE
, 0x04);
6411 tg3_rd32_loop(tp
, regs
, TX_CPU_PGMCTR
, 0x04);
6414 tg3_rd32_loop(tp
, regs
, GRCMBOX_INTERRUPT_0
, 0x110);
6415 tg3_rd32_loop(tp
, regs
, FTQ_RESET
, 0x120);
6416 tg3_rd32_loop(tp
, regs
, MSGINT_MODE
, 0x0c);
6417 tg3_rd32_loop(tp
, regs
, DMAC_MODE
, 0x04);
6418 tg3_rd32_loop(tp
, regs
, GRC_MODE
, 0x4c);
6420 if (tg3_flag(tp
, NVRAM
))
6421 tg3_rd32_loop(tp
, regs
, NVRAM_CMD
, 0x24);
6424 static void tg3_dump_state(struct tg3
*tp
)
6429 regs
= kzalloc(TG3_REG_BLK_SIZE
, GFP_ATOMIC
);
6433 if (tg3_flag(tp
, PCI_EXPRESS
)) {
6434 /* Read up to but not including private PCI registers */
6435 for (i
= 0; i
< TG3_PCIE_TLDLPL_PORT
; i
+= sizeof(u32
))
6436 regs
[i
/ sizeof(u32
)] = tr32(i
);
6438 tg3_dump_legacy_regs(tp
, regs
);
6440 for (i
= 0; i
< TG3_REG_BLK_SIZE
/ sizeof(u32
); i
+= 4) {
6441 if (!regs
[i
+ 0] && !regs
[i
+ 1] &&
6442 !regs
[i
+ 2] && !regs
[i
+ 3])
6445 netdev_err(tp
->dev
, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6447 regs
[i
+ 0], regs
[i
+ 1], regs
[i
+ 2], regs
[i
+ 3]);
6452 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
6453 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
6455 /* SW status block */
6457 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6459 tnapi
->hw_status
->status
,
6460 tnapi
->hw_status
->status_tag
,
6461 tnapi
->hw_status
->rx_jumbo_consumer
,
6462 tnapi
->hw_status
->rx_consumer
,
6463 tnapi
->hw_status
->rx_mini_consumer
,
6464 tnapi
->hw_status
->idx
[0].rx_producer
,
6465 tnapi
->hw_status
->idx
[0].tx_consumer
);
6468 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6470 tnapi
->last_tag
, tnapi
->last_irq_tag
,
6471 tnapi
->tx_prod
, tnapi
->tx_cons
, tnapi
->tx_pending
,
6473 tnapi
->prodring
.rx_std_prod_idx
,
6474 tnapi
->prodring
.rx_std_cons_idx
,
6475 tnapi
->prodring
.rx_jmb_prod_idx
,
6476 tnapi
->prodring
.rx_jmb_cons_idx
);
6480 /* This is called whenever we suspect that the system chipset is re-
6481 * ordering the sequence of MMIO to the tx send mailbox. The symptom
6482 * is bogus tx completions. We try to recover by setting the
6483 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6486 static void tg3_tx_recover(struct tg3
*tp
)
6488 BUG_ON(tg3_flag(tp
, MBOX_WRITE_REORDER
) ||
6489 tp
->write32_tx_mbox
== tg3_write_indirect_mbox
);
6491 netdev_warn(tp
->dev
,
6492 "The system may be re-ordering memory-mapped I/O "
6493 "cycles to the network device, attempting to recover. "
6494 "Please report the problem to the driver maintainer "
6495 "and include system chipset information.\n");
6497 tg3_flag_set(tp
, TX_RECOVERY_PENDING
);
6500 static inline u32
tg3_tx_avail(struct tg3_napi
*tnapi
)
6502 /* Tell compiler to fetch tx indices from memory. */
6504 return tnapi
->tx_pending
-
6505 ((tnapi
->tx_prod
- tnapi
->tx_cons
) & (TG3_TX_RING_SIZE
- 1));
6508 /* Tigon3 never reports partial packet sends. So we do not
6509 * need special logic to handle SKBs that have not had all
6510 * of their frags sent yet, like SunGEM does.
6512 static void tg3_tx(struct tg3_napi
*tnapi
)
6514 struct tg3
*tp
= tnapi
->tp
;
6515 u32 hw_idx
= tnapi
->hw_status
->idx
[0].tx_consumer
;
6516 u32 sw_idx
= tnapi
->tx_cons
;
6517 struct netdev_queue
*txq
;
6518 int index
= tnapi
- tp
->napi
;
6519 unsigned int pkts_compl
= 0, bytes_compl
= 0;
6521 if (tg3_flag(tp
, ENABLE_TSS
))
6524 txq
= netdev_get_tx_queue(tp
->dev
, index
);
6526 while (sw_idx
!= hw_idx
) {
6527 struct tg3_tx_ring_info
*ri
= &tnapi
->tx_buffers
[sw_idx
];
6528 struct sk_buff
*skb
= ri
->skb
;
6531 if (unlikely(skb
== NULL
)) {
6536 if (tnapi
->tx_ring
[sw_idx
].len_flags
& TXD_FLAG_HWTSTAMP
) {
6537 struct skb_shared_hwtstamps timestamp
;
6538 u64 hwclock
= tr32(TG3_TX_TSTAMP_LSB
);
6539 hwclock
|= (u64
)tr32(TG3_TX_TSTAMP_MSB
) << 32;
6541 tg3_hwclock_to_timestamp(tp
, hwclock
, ×tamp
);
6543 skb_tstamp_tx(skb
, ×tamp
);
6546 pci_unmap_single(tp
->pdev
,
6547 dma_unmap_addr(ri
, mapping
),
6553 while (ri
->fragmented
) {
6554 ri
->fragmented
= false;
6555 sw_idx
= NEXT_TX(sw_idx
);
6556 ri
= &tnapi
->tx_buffers
[sw_idx
];
6559 sw_idx
= NEXT_TX(sw_idx
);
6561 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
6562 ri
= &tnapi
->tx_buffers
[sw_idx
];
6563 if (unlikely(ri
->skb
!= NULL
|| sw_idx
== hw_idx
))
6566 pci_unmap_page(tp
->pdev
,
6567 dma_unmap_addr(ri
, mapping
),
6568 skb_frag_size(&skb_shinfo(skb
)->frags
[i
]),
6571 while (ri
->fragmented
) {
6572 ri
->fragmented
= false;
6573 sw_idx
= NEXT_TX(sw_idx
);
6574 ri
= &tnapi
->tx_buffers
[sw_idx
];
6577 sw_idx
= NEXT_TX(sw_idx
);
6581 bytes_compl
+= skb
->len
;
6585 if (unlikely(tx_bug
)) {
6591 netdev_tx_completed_queue(txq
, pkts_compl
, bytes_compl
);
6593 tnapi
->tx_cons
= sw_idx
;
6595 /* Need to make the tx_cons update visible to tg3_start_xmit()
6596 * before checking for netif_queue_stopped(). Without the
6597 * memory barrier, there is a small possibility that tg3_start_xmit()
6598 * will miss it and cause the queue to be stopped forever.
6602 if (unlikely(netif_tx_queue_stopped(txq
) &&
6603 (tg3_tx_avail(tnapi
) > TG3_TX_WAKEUP_THRESH(tnapi
)))) {
6604 __netif_tx_lock(txq
, smp_processor_id());
6605 if (netif_tx_queue_stopped(txq
) &&
6606 (tg3_tx_avail(tnapi
) > TG3_TX_WAKEUP_THRESH(tnapi
)))
6607 netif_tx_wake_queue(txq
);
6608 __netif_tx_unlock(txq
);
6612 static void tg3_frag_free(bool is_frag
, void *data
)
6615 put_page(virt_to_head_page(data
));
6620 static void tg3_rx_data_free(struct tg3
*tp
, struct ring_info
*ri
, u32 map_sz
)
6622 unsigned int skb_size
= SKB_DATA_ALIGN(map_sz
+ TG3_RX_OFFSET(tp
)) +
6623 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
6628 pci_unmap_single(tp
->pdev
, dma_unmap_addr(ri
, mapping
),
6629 map_sz
, PCI_DMA_FROMDEVICE
);
6630 tg3_frag_free(skb_size
<= PAGE_SIZE
, ri
->data
);
6635 /* Returns size of skb allocated or < 0 on error.
6637 * We only need to fill in the address because the other members
6638 * of the RX descriptor are invariant, see tg3_init_rings.
6640 * Note the purposeful assymetry of cpu vs. chip accesses. For
6641 * posting buffers we only dirty the first cache line of the RX
6642 * descriptor (containing the address). Whereas for the RX status
6643 * buffers the cpu only reads the last cacheline of the RX descriptor
6644 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6646 static int tg3_alloc_rx_data(struct tg3
*tp
, struct tg3_rx_prodring_set
*tpr
,
6647 u32 opaque_key
, u32 dest_idx_unmasked
,
6648 unsigned int *frag_size
)
6650 struct tg3_rx_buffer_desc
*desc
;
6651 struct ring_info
*map
;
6654 int skb_size
, data_size
, dest_idx
;
6656 switch (opaque_key
) {
6657 case RXD_OPAQUE_RING_STD
:
6658 dest_idx
= dest_idx_unmasked
& tp
->rx_std_ring_mask
;
6659 desc
= &tpr
->rx_std
[dest_idx
];
6660 map
= &tpr
->rx_std_buffers
[dest_idx
];
6661 data_size
= tp
->rx_pkt_map_sz
;
6664 case RXD_OPAQUE_RING_JUMBO
:
6665 dest_idx
= dest_idx_unmasked
& tp
->rx_jmb_ring_mask
;
6666 desc
= &tpr
->rx_jmb
[dest_idx
].std
;
6667 map
= &tpr
->rx_jmb_buffers
[dest_idx
];
6668 data_size
= TG3_RX_JMB_MAP_SZ
;
6675 /* Do not overwrite any of the map or rp information
6676 * until we are sure we can commit to a new buffer.
6678 * Callers depend upon this behavior and assume that
6679 * we leave everything unchanged if we fail.
6681 skb_size
= SKB_DATA_ALIGN(data_size
+ TG3_RX_OFFSET(tp
)) +
6682 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
6683 if (skb_size
<= PAGE_SIZE
) {
6684 data
= netdev_alloc_frag(skb_size
);
6685 *frag_size
= skb_size
;
6687 data
= kmalloc(skb_size
, GFP_ATOMIC
);
6693 mapping
= pci_map_single(tp
->pdev
,
6694 data
+ TG3_RX_OFFSET(tp
),
6696 PCI_DMA_FROMDEVICE
);
6697 if (unlikely(pci_dma_mapping_error(tp
->pdev
, mapping
))) {
6698 tg3_frag_free(skb_size
<= PAGE_SIZE
, data
);
6703 dma_unmap_addr_set(map
, mapping
, mapping
);
6705 desc
->addr_hi
= ((u64
)mapping
>> 32);
6706 desc
->addr_lo
= ((u64
)mapping
& 0xffffffff);
6711 /* We only need to move over in the address because the other
6712 * members of the RX descriptor are invariant. See notes above
6713 * tg3_alloc_rx_data for full details.
6715 static void tg3_recycle_rx(struct tg3_napi
*tnapi
,
6716 struct tg3_rx_prodring_set
*dpr
,
6717 u32 opaque_key
, int src_idx
,
6718 u32 dest_idx_unmasked
)
6720 struct tg3
*tp
= tnapi
->tp
;
6721 struct tg3_rx_buffer_desc
*src_desc
, *dest_desc
;
6722 struct ring_info
*src_map
, *dest_map
;
6723 struct tg3_rx_prodring_set
*spr
= &tp
->napi
[0].prodring
;
6726 switch (opaque_key
) {
6727 case RXD_OPAQUE_RING_STD
:
6728 dest_idx
= dest_idx_unmasked
& tp
->rx_std_ring_mask
;
6729 dest_desc
= &dpr
->rx_std
[dest_idx
];
6730 dest_map
= &dpr
->rx_std_buffers
[dest_idx
];
6731 src_desc
= &spr
->rx_std
[src_idx
];
6732 src_map
= &spr
->rx_std_buffers
[src_idx
];
6735 case RXD_OPAQUE_RING_JUMBO
:
6736 dest_idx
= dest_idx_unmasked
& tp
->rx_jmb_ring_mask
;
6737 dest_desc
= &dpr
->rx_jmb
[dest_idx
].std
;
6738 dest_map
= &dpr
->rx_jmb_buffers
[dest_idx
];
6739 src_desc
= &spr
->rx_jmb
[src_idx
].std
;
6740 src_map
= &spr
->rx_jmb_buffers
[src_idx
];
6747 dest_map
->data
= src_map
->data
;
6748 dma_unmap_addr_set(dest_map
, mapping
,
6749 dma_unmap_addr(src_map
, mapping
));
6750 dest_desc
->addr_hi
= src_desc
->addr_hi
;
6751 dest_desc
->addr_lo
= src_desc
->addr_lo
;
6753 /* Ensure that the update to the skb happens after the physical
6754 * addresses have been transferred to the new BD location.
6758 src_map
->data
= NULL
;
6761 /* The RX ring scheme is composed of multiple rings which post fresh
6762 * buffers to the chip, and one special ring the chip uses to report
6763 * status back to the host.
6765 * The special ring reports the status of received packets to the
6766 * host. The chip does not write into the original descriptor the
6767 * RX buffer was obtained from. The chip simply takes the original
6768 * descriptor as provided by the host, updates the status and length
6769 * field, then writes this into the next status ring entry.
6771 * Each ring the host uses to post buffers to the chip is described
6772 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
6773 * it is first placed into the on-chip ram. When the packet's length
6774 * is known, it walks down the TG3_BDINFO entries to select the ring.
6775 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6776 * which is within the range of the new packet's length is chosen.
6778 * The "separate ring for rx status" scheme may sound queer, but it makes
6779 * sense from a cache coherency perspective. If only the host writes
6780 * to the buffer post rings, and only the chip writes to the rx status
6781 * rings, then cache lines never move beyond shared-modified state.
6782 * If both the host and chip were to write into the same ring, cache line
6783 * eviction could occur since both entities want it in an exclusive state.
6785 static int tg3_rx(struct tg3_napi
*tnapi
, int budget
)
6787 struct tg3
*tp
= tnapi
->tp
;
6788 u32 work_mask
, rx_std_posted
= 0;
6789 u32 std_prod_idx
, jmb_prod_idx
;
6790 u32 sw_idx
= tnapi
->rx_rcb_ptr
;
6793 struct tg3_rx_prodring_set
*tpr
= &tnapi
->prodring
;
6795 hw_idx
= *(tnapi
->rx_rcb_prod_idx
);
6797 * We need to order the read of hw_idx and the read of
6798 * the opaque cookie.
6803 std_prod_idx
= tpr
->rx_std_prod_idx
;
6804 jmb_prod_idx
= tpr
->rx_jmb_prod_idx
;
6805 while (sw_idx
!= hw_idx
&& budget
> 0) {
6806 struct ring_info
*ri
;
6807 struct tg3_rx_buffer_desc
*desc
= &tnapi
->rx_rcb
[sw_idx
];
6809 struct sk_buff
*skb
;
6810 dma_addr_t dma_addr
;
6811 u32 opaque_key
, desc_idx
, *post_ptr
;
6815 desc_idx
= desc
->opaque
& RXD_OPAQUE_INDEX_MASK
;
6816 opaque_key
= desc
->opaque
& RXD_OPAQUE_RING_MASK
;
6817 if (opaque_key
== RXD_OPAQUE_RING_STD
) {
6818 ri
= &tp
->napi
[0].prodring
.rx_std_buffers
[desc_idx
];
6819 dma_addr
= dma_unmap_addr(ri
, mapping
);
6821 post_ptr
= &std_prod_idx
;
6823 } else if (opaque_key
== RXD_OPAQUE_RING_JUMBO
) {
6824 ri
= &tp
->napi
[0].prodring
.rx_jmb_buffers
[desc_idx
];
6825 dma_addr
= dma_unmap_addr(ri
, mapping
);
6827 post_ptr
= &jmb_prod_idx
;
6829 goto next_pkt_nopost
;
6831 work_mask
|= opaque_key
;
6833 if ((desc
->err_vlan
& RXD_ERR_MASK
) != 0 &&
6834 (desc
->err_vlan
!= RXD_ERR_ODD_NIBBLE_RCVD_MII
)) {
6836 tg3_recycle_rx(tnapi
, tpr
, opaque_key
,
6837 desc_idx
, *post_ptr
);
6839 /* Other statistics kept track of by card. */
6844 prefetch(data
+ TG3_RX_OFFSET(tp
));
6845 len
= ((desc
->idx_len
& RXD_LEN_MASK
) >> RXD_LEN_SHIFT
) -
6848 if ((desc
->type_flags
& RXD_FLAG_PTPSTAT_MASK
) ==
6849 RXD_FLAG_PTPSTAT_PTPV1
||
6850 (desc
->type_flags
& RXD_FLAG_PTPSTAT_MASK
) ==
6851 RXD_FLAG_PTPSTAT_PTPV2
) {
6852 tstamp
= tr32(TG3_RX_TSTAMP_LSB
);
6853 tstamp
|= (u64
)tr32(TG3_RX_TSTAMP_MSB
) << 32;
6856 if (len
> TG3_RX_COPY_THRESH(tp
)) {
6858 unsigned int frag_size
;
6860 skb_size
= tg3_alloc_rx_data(tp
, tpr
, opaque_key
,
6861 *post_ptr
, &frag_size
);
6865 pci_unmap_single(tp
->pdev
, dma_addr
, skb_size
,
6866 PCI_DMA_FROMDEVICE
);
6868 /* Ensure that the update to the data happens
6869 * after the usage of the old DMA mapping.
6875 skb
= build_skb(data
, frag_size
);
6877 tg3_frag_free(frag_size
!= 0, data
);
6878 goto drop_it_no_recycle
;
6880 skb_reserve(skb
, TG3_RX_OFFSET(tp
));
6882 tg3_recycle_rx(tnapi
, tpr
, opaque_key
,
6883 desc_idx
, *post_ptr
);
6885 skb
= netdev_alloc_skb(tp
->dev
,
6886 len
+ TG3_RAW_IP_ALIGN
);
6888 goto drop_it_no_recycle
;
6890 skb_reserve(skb
, TG3_RAW_IP_ALIGN
);
6891 pci_dma_sync_single_for_cpu(tp
->pdev
, dma_addr
, len
, PCI_DMA_FROMDEVICE
);
6893 data
+ TG3_RX_OFFSET(tp
),
6895 pci_dma_sync_single_for_device(tp
->pdev
, dma_addr
, len
, PCI_DMA_FROMDEVICE
);
6900 tg3_hwclock_to_timestamp(tp
, tstamp
,
6901 skb_hwtstamps(skb
));
6903 if ((tp
->dev
->features
& NETIF_F_RXCSUM
) &&
6904 (desc
->type_flags
& RXD_FLAG_TCPUDP_CSUM
) &&
6905 (((desc
->ip_tcp_csum
& RXD_TCPCSUM_MASK
)
6906 >> RXD_TCPCSUM_SHIFT
) == 0xffff))
6907 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
6909 skb_checksum_none_assert(skb
);
6911 skb
->protocol
= eth_type_trans(skb
, tp
->dev
);
6913 if (len
> (tp
->dev
->mtu
+ ETH_HLEN
) &&
6914 skb
->protocol
!= htons(ETH_P_8021Q
)) {
6916 goto drop_it_no_recycle
;
6919 if (desc
->type_flags
& RXD_FLAG_VLAN
&&
6920 !(tp
->rx_mode
& RX_MODE_KEEP_VLAN_TAG
))
6921 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
),
6922 desc
->err_vlan
& RXD_VLAN_MASK
);
6924 napi_gro_receive(&tnapi
->napi
, skb
);
6932 if (unlikely(rx_std_posted
>= tp
->rx_std_max_post
)) {
6933 tpr
->rx_std_prod_idx
= std_prod_idx
&
6934 tp
->rx_std_ring_mask
;
6935 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG
,
6936 tpr
->rx_std_prod_idx
);
6937 work_mask
&= ~RXD_OPAQUE_RING_STD
;
6942 sw_idx
&= tp
->rx_ret_ring_mask
;
6944 /* Refresh hw_idx to see if there is new work */
6945 if (sw_idx
== hw_idx
) {
6946 hw_idx
= *(tnapi
->rx_rcb_prod_idx
);
6951 /* ACK the status ring. */
6952 tnapi
->rx_rcb_ptr
= sw_idx
;
6953 tw32_rx_mbox(tnapi
->consmbox
, sw_idx
);
6955 /* Refill RX ring(s). */
6956 if (!tg3_flag(tp
, ENABLE_RSS
)) {
6957 /* Sync BD data before updating mailbox */
6960 if (work_mask
& RXD_OPAQUE_RING_STD
) {
6961 tpr
->rx_std_prod_idx
= std_prod_idx
&
6962 tp
->rx_std_ring_mask
;
6963 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG
,
6964 tpr
->rx_std_prod_idx
);
6966 if (work_mask
& RXD_OPAQUE_RING_JUMBO
) {
6967 tpr
->rx_jmb_prod_idx
= jmb_prod_idx
&
6968 tp
->rx_jmb_ring_mask
;
6969 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG
,
6970 tpr
->rx_jmb_prod_idx
);
6973 } else if (work_mask
) {
6974 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6975 * updated before the producer indices can be updated.
6979 tpr
->rx_std_prod_idx
= std_prod_idx
& tp
->rx_std_ring_mask
;
6980 tpr
->rx_jmb_prod_idx
= jmb_prod_idx
& tp
->rx_jmb_ring_mask
;
6982 if (tnapi
!= &tp
->napi
[1]) {
6983 tp
->rx_refill
= true;
6984 napi_schedule(&tp
->napi
[1].napi
);
6991 static void tg3_poll_link(struct tg3
*tp
)
6993 /* handle link change and other phy events */
6994 if (!(tg3_flag(tp
, USE_LINKCHG_REG
) || tg3_flag(tp
, POLL_SERDES
))) {
6995 struct tg3_hw_status
*sblk
= tp
->napi
[0].hw_status
;
6997 if (sblk
->status
& SD_STATUS_LINK_CHG
) {
6998 sblk
->status
= SD_STATUS_UPDATED
|
6999 (sblk
->status
& ~SD_STATUS_LINK_CHG
);
7000 spin_lock(&tp
->lock
);
7001 if (tg3_flag(tp
, USE_PHYLIB
)) {
7003 (MAC_STATUS_SYNC_CHANGED
|
7004 MAC_STATUS_CFG_CHANGED
|
7005 MAC_STATUS_MI_COMPLETION
|
7006 MAC_STATUS_LNKSTATE_CHANGED
));
7009 tg3_setup_phy(tp
, false);
7010 spin_unlock(&tp
->lock
);
7015 static int tg3_rx_prodring_xfer(struct tg3
*tp
,
7016 struct tg3_rx_prodring_set
*dpr
,
7017 struct tg3_rx_prodring_set
*spr
)
7019 u32 si
, di
, cpycnt
, src_prod_idx
;
7023 src_prod_idx
= spr
->rx_std_prod_idx
;
7025 /* Make sure updates to the rx_std_buffers[] entries and the
7026 * standard producer index are seen in the correct order.
7030 if (spr
->rx_std_cons_idx
== src_prod_idx
)
7033 if (spr
->rx_std_cons_idx
< src_prod_idx
)
7034 cpycnt
= src_prod_idx
- spr
->rx_std_cons_idx
;
7036 cpycnt
= tp
->rx_std_ring_mask
+ 1 -
7037 spr
->rx_std_cons_idx
;
7039 cpycnt
= min(cpycnt
,
7040 tp
->rx_std_ring_mask
+ 1 - dpr
->rx_std_prod_idx
);
7042 si
= spr
->rx_std_cons_idx
;
7043 di
= dpr
->rx_std_prod_idx
;
7045 for (i
= di
; i
< di
+ cpycnt
; i
++) {
7046 if (dpr
->rx_std_buffers
[i
].data
) {
7056 /* Ensure that updates to the rx_std_buffers ring and the
7057 * shadowed hardware producer ring from tg3_recycle_skb() are
7058 * ordered correctly WRT the skb check above.
7062 memcpy(&dpr
->rx_std_buffers
[di
],
7063 &spr
->rx_std_buffers
[si
],
7064 cpycnt
* sizeof(struct ring_info
));
7066 for (i
= 0; i
< cpycnt
; i
++, di
++, si
++) {
7067 struct tg3_rx_buffer_desc
*sbd
, *dbd
;
7068 sbd
= &spr
->rx_std
[si
];
7069 dbd
= &dpr
->rx_std
[di
];
7070 dbd
->addr_hi
= sbd
->addr_hi
;
7071 dbd
->addr_lo
= sbd
->addr_lo
;
7074 spr
->rx_std_cons_idx
= (spr
->rx_std_cons_idx
+ cpycnt
) &
7075 tp
->rx_std_ring_mask
;
7076 dpr
->rx_std_prod_idx
= (dpr
->rx_std_prod_idx
+ cpycnt
) &
7077 tp
->rx_std_ring_mask
;
7081 src_prod_idx
= spr
->rx_jmb_prod_idx
;
7083 /* Make sure updates to the rx_jmb_buffers[] entries and
7084 * the jumbo producer index are seen in the correct order.
7088 if (spr
->rx_jmb_cons_idx
== src_prod_idx
)
7091 if (spr
->rx_jmb_cons_idx
< src_prod_idx
)
7092 cpycnt
= src_prod_idx
- spr
->rx_jmb_cons_idx
;
7094 cpycnt
= tp
->rx_jmb_ring_mask
+ 1 -
7095 spr
->rx_jmb_cons_idx
;
7097 cpycnt
= min(cpycnt
,
7098 tp
->rx_jmb_ring_mask
+ 1 - dpr
->rx_jmb_prod_idx
);
7100 si
= spr
->rx_jmb_cons_idx
;
7101 di
= dpr
->rx_jmb_prod_idx
;
7103 for (i
= di
; i
< di
+ cpycnt
; i
++) {
7104 if (dpr
->rx_jmb_buffers
[i
].data
) {
7114 /* Ensure that updates to the rx_jmb_buffers ring and the
7115 * shadowed hardware producer ring from tg3_recycle_skb() are
7116 * ordered correctly WRT the skb check above.
7120 memcpy(&dpr
->rx_jmb_buffers
[di
],
7121 &spr
->rx_jmb_buffers
[si
],
7122 cpycnt
* sizeof(struct ring_info
));
7124 for (i
= 0; i
< cpycnt
; i
++, di
++, si
++) {
7125 struct tg3_rx_buffer_desc
*sbd
, *dbd
;
7126 sbd
= &spr
->rx_jmb
[si
].std
;
7127 dbd
= &dpr
->rx_jmb
[di
].std
;
7128 dbd
->addr_hi
= sbd
->addr_hi
;
7129 dbd
->addr_lo
= sbd
->addr_lo
;
7132 spr
->rx_jmb_cons_idx
= (spr
->rx_jmb_cons_idx
+ cpycnt
) &
7133 tp
->rx_jmb_ring_mask
;
7134 dpr
->rx_jmb_prod_idx
= (dpr
->rx_jmb_prod_idx
+ cpycnt
) &
7135 tp
->rx_jmb_ring_mask
;
7141 static int tg3_poll_work(struct tg3_napi
*tnapi
, int work_done
, int budget
)
7143 struct tg3
*tp
= tnapi
->tp
;
7145 /* run TX completion thread */
7146 if (tnapi
->hw_status
->idx
[0].tx_consumer
!= tnapi
->tx_cons
) {
7148 if (unlikely(tg3_flag(tp
, TX_RECOVERY_PENDING
)))
7152 if (!tnapi
->rx_rcb_prod_idx
)
7155 /* run RX thread, within the bounds set by NAPI.
7156 * All RX "locking" is done by ensuring outside
7157 * code synchronizes with tg3->napi.poll()
7159 if (*(tnapi
->rx_rcb_prod_idx
) != tnapi
->rx_rcb_ptr
)
7160 work_done
+= tg3_rx(tnapi
, budget
- work_done
);
7162 if (tg3_flag(tp
, ENABLE_RSS
) && tnapi
== &tp
->napi
[1]) {
7163 struct tg3_rx_prodring_set
*dpr
= &tp
->napi
[0].prodring
;
7165 u32 std_prod_idx
= dpr
->rx_std_prod_idx
;
7166 u32 jmb_prod_idx
= dpr
->rx_jmb_prod_idx
;
7168 tp
->rx_refill
= false;
7169 for (i
= 1; i
<= tp
->rxq_cnt
; i
++)
7170 err
|= tg3_rx_prodring_xfer(tp
, dpr
,
7171 &tp
->napi
[i
].prodring
);
7175 if (std_prod_idx
!= dpr
->rx_std_prod_idx
)
7176 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG
,
7177 dpr
->rx_std_prod_idx
);
7179 if (jmb_prod_idx
!= dpr
->rx_jmb_prod_idx
)
7180 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG
,
7181 dpr
->rx_jmb_prod_idx
);
7186 tw32_f(HOSTCC_MODE
, tp
->coal_now
);
7192 static inline void tg3_reset_task_schedule(struct tg3
*tp
)
7194 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING
, tp
->tg3_flags
))
7195 schedule_work(&tp
->reset_task
);
7198 static inline void tg3_reset_task_cancel(struct tg3
*tp
)
7200 cancel_work_sync(&tp
->reset_task
);
7201 tg3_flag_clear(tp
, RESET_TASK_PENDING
);
7202 tg3_flag_clear(tp
, TX_RECOVERY_PENDING
);
7205 static int tg3_poll_msix(struct napi_struct
*napi
, int budget
)
7207 struct tg3_napi
*tnapi
= container_of(napi
, struct tg3_napi
, napi
);
7208 struct tg3
*tp
= tnapi
->tp
;
7210 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
7213 work_done
= tg3_poll_work(tnapi
, work_done
, budget
);
7215 if (unlikely(tg3_flag(tp
, TX_RECOVERY_PENDING
)))
7218 if (unlikely(work_done
>= budget
))
7221 /* tp->last_tag is used in tg3_int_reenable() below
7222 * to tell the hw how much work has been processed,
7223 * so we must read it before checking for more work.
7225 tnapi
->last_tag
= sblk
->status_tag
;
7226 tnapi
->last_irq_tag
= tnapi
->last_tag
;
7229 /* check for RX/TX work to do */
7230 if (likely(sblk
->idx
[0].tx_consumer
== tnapi
->tx_cons
&&
7231 *(tnapi
->rx_rcb_prod_idx
) == tnapi
->rx_rcb_ptr
)) {
7233 /* This test here is not race free, but will reduce
7234 * the number of interrupts by looping again.
7236 if (tnapi
== &tp
->napi
[1] && tp
->rx_refill
)
7239 napi_complete(napi
);
7240 /* Reenable interrupts. */
7241 tw32_mailbox(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
7243 /* This test here is synchronized by napi_schedule()
7244 * and napi_complete() to close the race condition.
7246 if (unlikely(tnapi
== &tp
->napi
[1] && tp
->rx_refill
)) {
7247 tw32(HOSTCC_MODE
, tp
->coalesce_mode
|
7248 HOSTCC_MODE_ENABLE
|
7259 /* work_done is guaranteed to be less than budget. */
7260 napi_complete(napi
);
7261 tg3_reset_task_schedule(tp
);
7265 static void tg3_process_error(struct tg3
*tp
)
7268 bool real_error
= false;
7270 if (tg3_flag(tp
, ERROR_PROCESSED
))
7273 /* Check Flow Attention register */
7274 val
= tr32(HOSTCC_FLOW_ATTN
);
7275 if (val
& ~HOSTCC_FLOW_ATTN_MBUF_LWM
) {
7276 netdev_err(tp
->dev
, "FLOW Attention error. Resetting chip.\n");
7280 if (tr32(MSGINT_STATUS
) & ~MSGINT_STATUS_MSI_REQ
) {
7281 netdev_err(tp
->dev
, "MSI Status error. Resetting chip.\n");
7285 if (tr32(RDMAC_STATUS
) || tr32(WDMAC_STATUS
)) {
7286 netdev_err(tp
->dev
, "DMA Status error. Resetting chip.\n");
7295 tg3_flag_set(tp
, ERROR_PROCESSED
);
7296 tg3_reset_task_schedule(tp
);
7299 static int tg3_poll(struct napi_struct
*napi
, int budget
)
7301 struct tg3_napi
*tnapi
= container_of(napi
, struct tg3_napi
, napi
);
7302 struct tg3
*tp
= tnapi
->tp
;
7304 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
7307 if (sblk
->status
& SD_STATUS_ERROR
)
7308 tg3_process_error(tp
);
7312 work_done
= tg3_poll_work(tnapi
, work_done
, budget
);
7314 if (unlikely(tg3_flag(tp
, TX_RECOVERY_PENDING
)))
7317 if (unlikely(work_done
>= budget
))
7320 if (tg3_flag(tp
, TAGGED_STATUS
)) {
7321 /* tp->last_tag is used in tg3_int_reenable() below
7322 * to tell the hw how much work has been processed,
7323 * so we must read it before checking for more work.
7325 tnapi
->last_tag
= sblk
->status_tag
;
7326 tnapi
->last_irq_tag
= tnapi
->last_tag
;
7329 sblk
->status
&= ~SD_STATUS_UPDATED
;
7331 if (likely(!tg3_has_work(tnapi
))) {
7332 napi_complete(napi
);
7333 tg3_int_reenable(tnapi
);
7341 /* work_done is guaranteed to be less than budget. */
7342 napi_complete(napi
);
7343 tg3_reset_task_schedule(tp
);
7347 static void tg3_napi_disable(struct tg3
*tp
)
7351 for (i
= tp
->irq_cnt
- 1; i
>= 0; i
--)
7352 napi_disable(&tp
->napi
[i
].napi
);
7355 static void tg3_napi_enable(struct tg3
*tp
)
7359 for (i
= 0; i
< tp
->irq_cnt
; i
++)
7360 napi_enable(&tp
->napi
[i
].napi
);
7363 static void tg3_napi_init(struct tg3
*tp
)
7367 netif_napi_add(tp
->dev
, &tp
->napi
[0].napi
, tg3_poll
, 64);
7368 for (i
= 1; i
< tp
->irq_cnt
; i
++)
7369 netif_napi_add(tp
->dev
, &tp
->napi
[i
].napi
, tg3_poll_msix
, 64);
7372 static void tg3_napi_fini(struct tg3
*tp
)
7376 for (i
= 0; i
< tp
->irq_cnt
; i
++)
7377 netif_napi_del(&tp
->napi
[i
].napi
);
7380 static inline void tg3_netif_stop(struct tg3
*tp
)
7382 tp
->dev
->trans_start
= jiffies
; /* prevent tx timeout */
7383 tg3_napi_disable(tp
);
7384 netif_carrier_off(tp
->dev
);
7385 netif_tx_disable(tp
->dev
);
7388 /* tp->lock must be held */
7389 static inline void tg3_netif_start(struct tg3
*tp
)
7393 /* NOTE: unconditional netif_tx_wake_all_queues is only
7394 * appropriate so long as all callers are assured to
7395 * have free tx slots (such as after tg3_init_hw)
7397 netif_tx_wake_all_queues(tp
->dev
);
7400 netif_carrier_on(tp
->dev
);
7402 tg3_napi_enable(tp
);
7403 tp
->napi
[0].hw_status
->status
|= SD_STATUS_UPDATED
;
7404 tg3_enable_ints(tp
);
7407 static void tg3_irq_quiesce(struct tg3
*tp
)
7411 BUG_ON(tp
->irq_sync
);
7416 for (i
= 0; i
< tp
->irq_cnt
; i
++)
7417 synchronize_irq(tp
->napi
[i
].irq_vec
);
7420 /* Fully shutdown all tg3 driver activity elsewhere in the system.
7421 * If irq_sync is non-zero, then the IRQ handler must be synchronized
7422 * with as well. Most of the time, this is not necessary except when
7423 * shutting down the device.
7425 static inline void tg3_full_lock(struct tg3
*tp
, int irq_sync
)
7427 spin_lock_bh(&tp
->lock
);
7429 tg3_irq_quiesce(tp
);
7432 static inline void tg3_full_unlock(struct tg3
*tp
)
7434 spin_unlock_bh(&tp
->lock
);
7437 /* One-shot MSI handler - Chip automatically disables interrupt
7438 * after sending MSI so driver doesn't have to do it.
7440 static irqreturn_t
tg3_msi_1shot(int irq
, void *dev_id
)
7442 struct tg3_napi
*tnapi
= dev_id
;
7443 struct tg3
*tp
= tnapi
->tp
;
7445 prefetch(tnapi
->hw_status
);
7447 prefetch(&tnapi
->rx_rcb
[tnapi
->rx_rcb_ptr
]);
7449 if (likely(!tg3_irq_sync(tp
)))
7450 napi_schedule(&tnapi
->napi
);
7455 /* MSI ISR - No need to check for interrupt sharing and no need to
7456 * flush status block and interrupt mailbox. PCI ordering rules
7457 * guarantee that MSI will arrive after the status block.
7459 static irqreturn_t
tg3_msi(int irq
, void *dev_id
)
7461 struct tg3_napi
*tnapi
= dev_id
;
7462 struct tg3
*tp
= tnapi
->tp
;
7464 prefetch(tnapi
->hw_status
);
7466 prefetch(&tnapi
->rx_rcb
[tnapi
->rx_rcb_ptr
]);
7468 * Writing any value to intr-mbox-0 clears PCI INTA# and
7469 * chip-internal interrupt pending events.
7470 * Writing non-zero to intr-mbox-0 additional tells the
7471 * NIC to stop sending us irqs, engaging "in-intr-handler"
7474 tw32_mailbox(tnapi
->int_mbox
, 0x00000001);
7475 if (likely(!tg3_irq_sync(tp
)))
7476 napi_schedule(&tnapi
->napi
);
7478 return IRQ_RETVAL(1);
7481 static irqreturn_t
tg3_interrupt(int irq
, void *dev_id
)
7483 struct tg3_napi
*tnapi
= dev_id
;
7484 struct tg3
*tp
= tnapi
->tp
;
7485 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
7486 unsigned int handled
= 1;
7488 /* In INTx mode, it is possible for the interrupt to arrive at
7489 * the CPU before the status block posted prior to the interrupt.
7490 * Reading the PCI State register will confirm whether the
7491 * interrupt is ours and will flush the status block.
7493 if (unlikely(!(sblk
->status
& SD_STATUS_UPDATED
))) {
7494 if (tg3_flag(tp
, CHIP_RESETTING
) ||
7495 (tr32(TG3PCI_PCISTATE
) & PCISTATE_INT_NOT_ACTIVE
)) {
7502 * Writing any value to intr-mbox-0 clears PCI INTA# and
7503 * chip-internal interrupt pending events.
7504 * Writing non-zero to intr-mbox-0 additional tells the
7505 * NIC to stop sending us irqs, engaging "in-intr-handler"
7508 * Flush the mailbox to de-assert the IRQ immediately to prevent
7509 * spurious interrupts. The flush impacts performance but
7510 * excessive spurious interrupts can be worse in some cases.
7512 tw32_mailbox_f(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
, 0x00000001);
7513 if (tg3_irq_sync(tp
))
7515 sblk
->status
&= ~SD_STATUS_UPDATED
;
7516 if (likely(tg3_has_work(tnapi
))) {
7517 prefetch(&tnapi
->rx_rcb
[tnapi
->rx_rcb_ptr
]);
7518 napi_schedule(&tnapi
->napi
);
7520 /* No work, shared interrupt perhaps? re-enable
7521 * interrupts, and flush that PCI write
7523 tw32_mailbox_f(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
,
7527 return IRQ_RETVAL(handled
);
7530 static irqreturn_t
tg3_interrupt_tagged(int irq
, void *dev_id
)
7532 struct tg3_napi
*tnapi
= dev_id
;
7533 struct tg3
*tp
= tnapi
->tp
;
7534 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
7535 unsigned int handled
= 1;
7537 /* In INTx mode, it is possible for the interrupt to arrive at
7538 * the CPU before the status block posted prior to the interrupt.
7539 * Reading the PCI State register will confirm whether the
7540 * interrupt is ours and will flush the status block.
7542 if (unlikely(sblk
->status_tag
== tnapi
->last_irq_tag
)) {
7543 if (tg3_flag(tp
, CHIP_RESETTING
) ||
7544 (tr32(TG3PCI_PCISTATE
) & PCISTATE_INT_NOT_ACTIVE
)) {
7551 * writing any value to intr-mbox-0 clears PCI INTA# and
7552 * chip-internal interrupt pending events.
7553 * writing non-zero to intr-mbox-0 additional tells the
7554 * NIC to stop sending us irqs, engaging "in-intr-handler"
7557 * Flush the mailbox to de-assert the IRQ immediately to prevent
7558 * spurious interrupts. The flush impacts performance but
7559 * excessive spurious interrupts can be worse in some cases.
7561 tw32_mailbox_f(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
, 0x00000001);
7564 * In a shared interrupt configuration, sometimes other devices'
7565 * interrupts will scream. We record the current status tag here
7566 * so that the above check can report that the screaming interrupts
7567 * are unhandled. Eventually they will be silenced.
7569 tnapi
->last_irq_tag
= sblk
->status_tag
;
7571 if (tg3_irq_sync(tp
))
7574 prefetch(&tnapi
->rx_rcb
[tnapi
->rx_rcb_ptr
]);
7576 napi_schedule(&tnapi
->napi
);
7579 return IRQ_RETVAL(handled
);
7582 /* ISR for interrupt test */
7583 static irqreturn_t
tg3_test_isr(int irq
, void *dev_id
)
7585 struct tg3_napi
*tnapi
= dev_id
;
7586 struct tg3
*tp
= tnapi
->tp
;
7587 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
7589 if ((sblk
->status
& SD_STATUS_UPDATED
) ||
7590 !(tr32(TG3PCI_PCISTATE
) & PCISTATE_INT_NOT_ACTIVE
)) {
7591 tg3_disable_ints(tp
);
7592 return IRQ_RETVAL(1);
7594 return IRQ_RETVAL(0);
7597 #ifdef CONFIG_NET_POLL_CONTROLLER
7598 static void tg3_poll_controller(struct net_device
*dev
)
7601 struct tg3
*tp
= netdev_priv(dev
);
7603 if (tg3_irq_sync(tp
))
7606 for (i
= 0; i
< tp
->irq_cnt
; i
++)
7607 tg3_interrupt(tp
->napi
[i
].irq_vec
, &tp
->napi
[i
]);
7611 static void tg3_tx_timeout(struct net_device
*dev
)
7613 struct tg3
*tp
= netdev_priv(dev
);
7615 if (netif_msg_tx_err(tp
)) {
7616 netdev_err(dev
, "transmit timed out, resetting\n");
7620 tg3_reset_task_schedule(tp
);
7623 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7624 static inline int tg3_4g_overflow_test(dma_addr_t mapping
, int len
)
7626 u32 base
= (u32
) mapping
& 0xffffffff;
7628 return (base
> 0xffffdcc0) && (base
+ len
+ 8 < base
);
7631 /* Test for TSO DMA buffers that cross into regions which are within MSS bytes
7632 * of any 4GB boundaries: 4G, 8G, etc
7634 static inline int tg3_4g_tso_overflow_test(struct tg3
*tp
, dma_addr_t mapping
,
7637 if (tg3_asic_rev(tp
) == ASIC_REV_5762
&& mss
) {
7638 u32 base
= (u32
) mapping
& 0xffffffff;
7640 return ((base
+ len
+ (mss
& 0x3fff)) < base
);
7645 /* Test for DMA addresses > 40-bit */
7646 static inline int tg3_40bit_overflow_test(struct tg3
*tp
, dma_addr_t mapping
,
7649 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7650 if (tg3_flag(tp
, 40BIT_DMA_BUG
))
7651 return ((u64
) mapping
+ len
) > DMA_BIT_MASK(40);
7658 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc
*txbd
,
7659 dma_addr_t mapping
, u32 len
, u32 flags
,
7662 txbd
->addr_hi
= ((u64
) mapping
>> 32);
7663 txbd
->addr_lo
= ((u64
) mapping
& 0xffffffff);
7664 txbd
->len_flags
= (len
<< TXD_LEN_SHIFT
) | (flags
& 0x0000ffff);
7665 txbd
->vlan_tag
= (mss
<< TXD_MSS_SHIFT
) | (vlan
<< TXD_VLAN_TAG_SHIFT
);
7668 static bool tg3_tx_frag_set(struct tg3_napi
*tnapi
, u32
*entry
, u32
*budget
,
7669 dma_addr_t map
, u32 len
, u32 flags
,
7672 struct tg3
*tp
= tnapi
->tp
;
7675 if (tg3_flag(tp
, SHORT_DMA_BUG
) && len
<= 8)
7678 if (tg3_4g_overflow_test(map
, len
))
7681 if (tg3_4g_tso_overflow_test(tp
, map
, len
, mss
))
7684 if (tg3_40bit_overflow_test(tp
, map
, len
))
7687 if (tp
->dma_limit
) {
7688 u32 prvidx
= *entry
;
7689 u32 tmp_flag
= flags
& ~TXD_FLAG_END
;
7690 while (len
> tp
->dma_limit
&& *budget
) {
7691 u32 frag_len
= tp
->dma_limit
;
7692 len
-= tp
->dma_limit
;
7694 /* Avoid the 8byte DMA problem */
7696 len
+= tp
->dma_limit
/ 2;
7697 frag_len
= tp
->dma_limit
/ 2;
7700 tnapi
->tx_buffers
[*entry
].fragmented
= true;
7702 tg3_tx_set_bd(&tnapi
->tx_ring
[*entry
], map
,
7703 frag_len
, tmp_flag
, mss
, vlan
);
7706 *entry
= NEXT_TX(*entry
);
7713 tg3_tx_set_bd(&tnapi
->tx_ring
[*entry
], map
,
7714 len
, flags
, mss
, vlan
);
7716 *entry
= NEXT_TX(*entry
);
7719 tnapi
->tx_buffers
[prvidx
].fragmented
= false;
7723 tg3_tx_set_bd(&tnapi
->tx_ring
[*entry
], map
,
7724 len
, flags
, mss
, vlan
);
7725 *entry
= NEXT_TX(*entry
);
7731 static void tg3_tx_skb_unmap(struct tg3_napi
*tnapi
, u32 entry
, int last
)
7734 struct sk_buff
*skb
;
7735 struct tg3_tx_ring_info
*txb
= &tnapi
->tx_buffers
[entry
];
7740 pci_unmap_single(tnapi
->tp
->pdev
,
7741 dma_unmap_addr(txb
, mapping
),
7745 while (txb
->fragmented
) {
7746 txb
->fragmented
= false;
7747 entry
= NEXT_TX(entry
);
7748 txb
= &tnapi
->tx_buffers
[entry
];
7751 for (i
= 0; i
<= last
; i
++) {
7752 const skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
7754 entry
= NEXT_TX(entry
);
7755 txb
= &tnapi
->tx_buffers
[entry
];
7757 pci_unmap_page(tnapi
->tp
->pdev
,
7758 dma_unmap_addr(txb
, mapping
),
7759 skb_frag_size(frag
), PCI_DMA_TODEVICE
);
7761 while (txb
->fragmented
) {
7762 txb
->fragmented
= false;
7763 entry
= NEXT_TX(entry
);
7764 txb
= &tnapi
->tx_buffers
[entry
];
7769 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7770 static int tigon3_dma_hwbug_workaround(struct tg3_napi
*tnapi
,
7771 struct sk_buff
**pskb
,
7772 u32
*entry
, u32
*budget
,
7773 u32 base_flags
, u32 mss
, u32 vlan
)
7775 struct tg3
*tp
= tnapi
->tp
;
7776 struct sk_buff
*new_skb
, *skb
= *pskb
;
7777 dma_addr_t new_addr
= 0;
7780 if (tg3_asic_rev(tp
) != ASIC_REV_5701
)
7781 new_skb
= skb_copy(skb
, GFP_ATOMIC
);
7783 int more_headroom
= 4 - ((unsigned long)skb
->data
& 3);
7785 new_skb
= skb_copy_expand(skb
,
7786 skb_headroom(skb
) + more_headroom
,
7787 skb_tailroom(skb
), GFP_ATOMIC
);
7793 /* New SKB is guaranteed to be linear. */
7794 new_addr
= pci_map_single(tp
->pdev
, new_skb
->data
, new_skb
->len
,
7796 /* Make sure the mapping succeeded */
7797 if (pci_dma_mapping_error(tp
->pdev
, new_addr
)) {
7798 dev_kfree_skb(new_skb
);
7801 u32 save_entry
= *entry
;
7803 base_flags
|= TXD_FLAG_END
;
7805 tnapi
->tx_buffers
[*entry
].skb
= new_skb
;
7806 dma_unmap_addr_set(&tnapi
->tx_buffers
[*entry
],
7809 if (tg3_tx_frag_set(tnapi
, entry
, budget
, new_addr
,
7810 new_skb
->len
, base_flags
,
7812 tg3_tx_skb_unmap(tnapi
, save_entry
, -1);
7813 dev_kfree_skb(new_skb
);
7824 static netdev_tx_t
tg3_start_xmit(struct sk_buff
*, struct net_device
*);
7826 /* Use GSO to workaround a rare TSO bug that may be triggered when the
7827 * TSO header is greater than 80 bytes.
7829 static int tg3_tso_bug(struct tg3
*tp
, struct sk_buff
*skb
)
7831 struct sk_buff
*segs
, *nskb
;
7832 u32 frag_cnt_est
= skb_shinfo(skb
)->gso_segs
* 3;
7834 /* Estimate the number of fragments in the worst case */
7835 if (unlikely(tg3_tx_avail(&tp
->napi
[0]) <= frag_cnt_est
)) {
7836 netif_stop_queue(tp
->dev
);
7838 /* netif_tx_stop_queue() must be done before checking
7839 * checking tx index in tg3_tx_avail() below, because in
7840 * tg3_tx(), we update tx index before checking for
7841 * netif_tx_queue_stopped().
7844 if (tg3_tx_avail(&tp
->napi
[0]) <= frag_cnt_est
)
7845 return NETDEV_TX_BUSY
;
7847 netif_wake_queue(tp
->dev
);
7850 segs
= skb_gso_segment(skb
, tp
->dev
->features
& ~NETIF_F_TSO
);
7852 goto tg3_tso_bug_end
;
7858 tg3_start_xmit(nskb
, tp
->dev
);
7864 return NETDEV_TX_OK
;
7867 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
7868 * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
7870 static netdev_tx_t
tg3_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
7872 struct tg3
*tp
= netdev_priv(dev
);
7873 u32 len
, entry
, base_flags
, mss
, vlan
= 0;
7875 int i
= -1, would_hit_hwbug
;
7877 struct tg3_napi
*tnapi
;
7878 struct netdev_queue
*txq
;
7881 txq
= netdev_get_tx_queue(dev
, skb_get_queue_mapping(skb
));
7882 tnapi
= &tp
->napi
[skb_get_queue_mapping(skb
)];
7883 if (tg3_flag(tp
, ENABLE_TSS
))
7886 budget
= tg3_tx_avail(tnapi
);
7888 /* We are running in BH disabled context with netif_tx_lock
7889 * and TX reclaim runs via tp->napi.poll inside of a software
7890 * interrupt. Furthermore, IRQ processing runs lockless so we have
7891 * no IRQ context deadlocks to worry about either. Rejoice!
7893 if (unlikely(budget
<= (skb_shinfo(skb
)->nr_frags
+ 1))) {
7894 if (!netif_tx_queue_stopped(txq
)) {
7895 netif_tx_stop_queue(txq
);
7897 /* This is a hard error, log it. */
7899 "BUG! Tx Ring full when queue awake!\n");
7901 return NETDEV_TX_BUSY
;
7904 entry
= tnapi
->tx_prod
;
7906 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
7907 base_flags
|= TXD_FLAG_TCPUDP_CSUM
;
7909 mss
= skb_shinfo(skb
)->gso_size
;
7912 u32 tcp_opt_len
, hdr_len
;
7914 if (skb_header_cloned(skb
) &&
7915 pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
))
7919 tcp_opt_len
= tcp_optlen(skb
);
7921 hdr_len
= skb_transport_offset(skb
) + tcp_hdrlen(skb
) - ETH_HLEN
;
7923 if (!skb_is_gso_v6(skb
)) {
7925 iph
->tot_len
= htons(mss
+ hdr_len
);
7928 if (unlikely((ETH_HLEN
+ hdr_len
) > 80) &&
7929 tg3_flag(tp
, TSO_BUG
))
7930 return tg3_tso_bug(tp
, skb
);
7932 base_flags
|= (TXD_FLAG_CPU_PRE_DMA
|
7933 TXD_FLAG_CPU_POST_DMA
);
7935 if (tg3_flag(tp
, HW_TSO_1
) ||
7936 tg3_flag(tp
, HW_TSO_2
) ||
7937 tg3_flag(tp
, HW_TSO_3
)) {
7938 tcp_hdr(skb
)->check
= 0;
7939 base_flags
&= ~TXD_FLAG_TCPUDP_CSUM
;
7941 tcp_hdr(skb
)->check
= ~csum_tcpudp_magic(iph
->saddr
,
7946 if (tg3_flag(tp
, HW_TSO_3
)) {
7947 mss
|= (hdr_len
& 0xc) << 12;
7949 base_flags
|= 0x00000010;
7950 base_flags
|= (hdr_len
& 0x3e0) << 5;
7951 } else if (tg3_flag(tp
, HW_TSO_2
))
7952 mss
|= hdr_len
<< 9;
7953 else if (tg3_flag(tp
, HW_TSO_1
) ||
7954 tg3_asic_rev(tp
) == ASIC_REV_5705
) {
7955 if (tcp_opt_len
|| iph
->ihl
> 5) {
7958 tsflags
= (iph
->ihl
- 5) + (tcp_opt_len
>> 2);
7959 mss
|= (tsflags
<< 11);
7962 if (tcp_opt_len
|| iph
->ihl
> 5) {
7965 tsflags
= (iph
->ihl
- 5) + (tcp_opt_len
>> 2);
7966 base_flags
|= tsflags
<< 12;
7971 if (tg3_flag(tp
, USE_JUMBO_BDFLAG
) &&
7972 !mss
&& skb
->len
> VLAN_ETH_FRAME_LEN
)
7973 base_flags
|= TXD_FLAG_JMB_PKT
;
7975 if (vlan_tx_tag_present(skb
)) {
7976 base_flags
|= TXD_FLAG_VLAN
;
7977 vlan
= vlan_tx_tag_get(skb
);
7980 if ((unlikely(skb_shinfo(skb
)->tx_flags
& SKBTX_HW_TSTAMP
)) &&
7981 tg3_flag(tp
, TX_TSTAMP_EN
)) {
7982 skb_shinfo(skb
)->tx_flags
|= SKBTX_IN_PROGRESS
;
7983 base_flags
|= TXD_FLAG_HWTSTAMP
;
7986 len
= skb_headlen(skb
);
7988 mapping
= pci_map_single(tp
->pdev
, skb
->data
, len
, PCI_DMA_TODEVICE
);
7989 if (pci_dma_mapping_error(tp
->pdev
, mapping
))
7993 tnapi
->tx_buffers
[entry
].skb
= skb
;
7994 dma_unmap_addr_set(&tnapi
->tx_buffers
[entry
], mapping
, mapping
);
7996 would_hit_hwbug
= 0;
7998 if (tg3_flag(tp
, 5701_DMA_BUG
))
7999 would_hit_hwbug
= 1;
8001 if (tg3_tx_frag_set(tnapi
, &entry
, &budget
, mapping
, len
, base_flags
|
8002 ((skb_shinfo(skb
)->nr_frags
== 0) ? TXD_FLAG_END
: 0),
8004 would_hit_hwbug
= 1;
8005 } else if (skb_shinfo(skb
)->nr_frags
> 0) {
8008 if (!tg3_flag(tp
, HW_TSO_1
) &&
8009 !tg3_flag(tp
, HW_TSO_2
) &&
8010 !tg3_flag(tp
, HW_TSO_3
))
8013 /* Now loop through additional data
8014 * fragments, and queue them.
8016 last
= skb_shinfo(skb
)->nr_frags
- 1;
8017 for (i
= 0; i
<= last
; i
++) {
8018 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
8020 len
= skb_frag_size(frag
);
8021 mapping
= skb_frag_dma_map(&tp
->pdev
->dev
, frag
, 0,
8022 len
, DMA_TO_DEVICE
);
8024 tnapi
->tx_buffers
[entry
].skb
= NULL
;
8025 dma_unmap_addr_set(&tnapi
->tx_buffers
[entry
], mapping
,
8027 if (dma_mapping_error(&tp
->pdev
->dev
, mapping
))
8031 tg3_tx_frag_set(tnapi
, &entry
, &budget
, mapping
,
8033 ((i
== last
) ? TXD_FLAG_END
: 0),
8035 would_hit_hwbug
= 1;
8041 if (would_hit_hwbug
) {
8042 tg3_tx_skb_unmap(tnapi
, tnapi
->tx_prod
, i
);
8044 /* If the workaround fails due to memory/mapping
8045 * failure, silently drop this packet.
8047 entry
= tnapi
->tx_prod
;
8048 budget
= tg3_tx_avail(tnapi
);
8049 if (tigon3_dma_hwbug_workaround(tnapi
, &skb
, &entry
, &budget
,
8050 base_flags
, mss
, vlan
))
8054 skb_tx_timestamp(skb
);
8055 netdev_tx_sent_queue(txq
, skb
->len
);
8057 /* Sync BD data before updating mailbox */
8060 /* Packets are ready, update Tx producer idx local and on card. */
8061 tw32_tx_mbox(tnapi
->prodmbox
, entry
);
8063 tnapi
->tx_prod
= entry
;
8064 if (unlikely(tg3_tx_avail(tnapi
) <= (MAX_SKB_FRAGS
+ 1))) {
8065 netif_tx_stop_queue(txq
);
8067 /* netif_tx_stop_queue() must be done before checking
8068 * checking tx index in tg3_tx_avail() below, because in
8069 * tg3_tx(), we update tx index before checking for
8070 * netif_tx_queue_stopped().
8073 if (tg3_tx_avail(tnapi
) > TG3_TX_WAKEUP_THRESH(tnapi
))
8074 netif_tx_wake_queue(txq
);
8078 return NETDEV_TX_OK
;
8081 tg3_tx_skb_unmap(tnapi
, tnapi
->tx_prod
, --i
);
8082 tnapi
->tx_buffers
[tnapi
->tx_prod
].skb
= NULL
;
8087 return NETDEV_TX_OK
;
8090 static void tg3_mac_loopback(struct tg3
*tp
, bool enable
)
8093 tp
->mac_mode
&= ~(MAC_MODE_HALF_DUPLEX
|
8094 MAC_MODE_PORT_MODE_MASK
);
8096 tp
->mac_mode
|= MAC_MODE_PORT_INT_LPBACK
;
8098 if (!tg3_flag(tp
, 5705_PLUS
))
8099 tp
->mac_mode
|= MAC_MODE_LINK_POLARITY
;
8101 if (tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)
8102 tp
->mac_mode
|= MAC_MODE_PORT_MODE_MII
;
8104 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
8106 tp
->mac_mode
&= ~MAC_MODE_PORT_INT_LPBACK
;
8108 if (tg3_flag(tp
, 5705_PLUS
) ||
8109 (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) ||
8110 tg3_asic_rev(tp
) == ASIC_REV_5700
)
8111 tp
->mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
8114 tw32(MAC_MODE
, tp
->mac_mode
);
8118 static int tg3_phy_lpbk_set(struct tg3
*tp
, u32 speed
, bool extlpbk
)
8120 u32 val
, bmcr
, mac_mode
, ptest
= 0;
8122 tg3_phy_toggle_apd(tp
, false);
8123 tg3_phy_toggle_automdix(tp
, false);
8125 if (extlpbk
&& tg3_phy_set_extloopbk(tp
))
8128 bmcr
= BMCR_FULLDPLX
;
8133 bmcr
|= BMCR_SPEED100
;
8137 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
8139 bmcr
|= BMCR_SPEED100
;
8142 bmcr
|= BMCR_SPEED1000
;
8147 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_FET
)) {
8148 tg3_readphy(tp
, MII_CTRL1000
, &val
);
8149 val
|= CTL1000_AS_MASTER
|
8150 CTL1000_ENABLE_MASTER
;
8151 tg3_writephy(tp
, MII_CTRL1000
, val
);
8153 ptest
= MII_TG3_FET_PTEST_TRIM_SEL
|
8154 MII_TG3_FET_PTEST_TRIM_2
;
8155 tg3_writephy(tp
, MII_TG3_FET_PTEST
, ptest
);
8158 bmcr
|= BMCR_LOOPBACK
;
8160 tg3_writephy(tp
, MII_BMCR
, bmcr
);
8162 /* The write needs to be flushed for the FETs */
8163 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
)
8164 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
8168 if ((tp
->phy_flags
& TG3_PHYFLG_IS_FET
) &&
8169 tg3_asic_rev(tp
) == ASIC_REV_5785
) {
8170 tg3_writephy(tp
, MII_TG3_FET_PTEST
, ptest
|
8171 MII_TG3_FET_PTEST_FRC_TX_LINK
|
8172 MII_TG3_FET_PTEST_FRC_TX_LOCK
);
8174 /* The write needs to be flushed for the AC131 */
8175 tg3_readphy(tp
, MII_TG3_FET_PTEST
, &val
);
8178 /* Reset to prevent losing 1st rx packet intermittently */
8179 if ((tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) &&
8180 tg3_flag(tp
, 5780_CLASS
)) {
8181 tw32_f(MAC_RX_MODE
, RX_MODE_RESET
);
8183 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
8186 mac_mode
= tp
->mac_mode
&
8187 ~(MAC_MODE_PORT_MODE_MASK
| MAC_MODE_HALF_DUPLEX
);
8188 if (speed
== SPEED_1000
)
8189 mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
8191 mac_mode
|= MAC_MODE_PORT_MODE_MII
;
8193 if (tg3_asic_rev(tp
) == ASIC_REV_5700
) {
8194 u32 masked_phy_id
= tp
->phy_id
& TG3_PHY_ID_MASK
;
8196 if (masked_phy_id
== TG3_PHY_ID_BCM5401
)
8197 mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
8198 else if (masked_phy_id
== TG3_PHY_ID_BCM5411
)
8199 mac_mode
|= MAC_MODE_LINK_POLARITY
;
8201 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
8202 MII_TG3_EXT_CTRL_LNK3_LED_MODE
);
8205 tw32(MAC_MODE
, mac_mode
);
8211 static void tg3_set_loopback(struct net_device
*dev
, netdev_features_t features
)
8213 struct tg3
*tp
= netdev_priv(dev
);
8215 if (features
& NETIF_F_LOOPBACK
) {
8216 if (tp
->mac_mode
& MAC_MODE_PORT_INT_LPBACK
)
8219 spin_lock_bh(&tp
->lock
);
8220 tg3_mac_loopback(tp
, true);
8221 netif_carrier_on(tp
->dev
);
8222 spin_unlock_bh(&tp
->lock
);
8223 netdev_info(dev
, "Internal MAC loopback mode enabled.\n");
8225 if (!(tp
->mac_mode
& MAC_MODE_PORT_INT_LPBACK
))
8228 spin_lock_bh(&tp
->lock
);
8229 tg3_mac_loopback(tp
, false);
8230 /* Force link status check */
8231 tg3_setup_phy(tp
, true);
8232 spin_unlock_bh(&tp
->lock
);
8233 netdev_info(dev
, "Internal MAC loopback mode disabled.\n");
8237 static netdev_features_t
tg3_fix_features(struct net_device
*dev
,
8238 netdev_features_t features
)
8240 struct tg3
*tp
= netdev_priv(dev
);
8242 if (dev
->mtu
> ETH_DATA_LEN
&& tg3_flag(tp
, 5780_CLASS
))
8243 features
&= ~NETIF_F_ALL_TSO
;
8248 static int tg3_set_features(struct net_device
*dev
, netdev_features_t features
)
8250 netdev_features_t changed
= dev
->features
^ features
;
8252 if ((changed
& NETIF_F_LOOPBACK
) && netif_running(dev
))
8253 tg3_set_loopback(dev
, features
);
8258 static void tg3_rx_prodring_free(struct tg3
*tp
,
8259 struct tg3_rx_prodring_set
*tpr
)
8263 if (tpr
!= &tp
->napi
[0].prodring
) {
8264 for (i
= tpr
->rx_std_cons_idx
; i
!= tpr
->rx_std_prod_idx
;
8265 i
= (i
+ 1) & tp
->rx_std_ring_mask
)
8266 tg3_rx_data_free(tp
, &tpr
->rx_std_buffers
[i
],
8269 if (tg3_flag(tp
, JUMBO_CAPABLE
)) {
8270 for (i
= tpr
->rx_jmb_cons_idx
;
8271 i
!= tpr
->rx_jmb_prod_idx
;
8272 i
= (i
+ 1) & tp
->rx_jmb_ring_mask
) {
8273 tg3_rx_data_free(tp
, &tpr
->rx_jmb_buffers
[i
],
8281 for (i
= 0; i
<= tp
->rx_std_ring_mask
; i
++)
8282 tg3_rx_data_free(tp
, &tpr
->rx_std_buffers
[i
],
8285 if (tg3_flag(tp
, JUMBO_CAPABLE
) && !tg3_flag(tp
, 5780_CLASS
)) {
8286 for (i
= 0; i
<= tp
->rx_jmb_ring_mask
; i
++)
8287 tg3_rx_data_free(tp
, &tpr
->rx_jmb_buffers
[i
],
8292 /* Initialize rx rings for packet processing.
8294 * The chip has been shut down and the driver detached from
8295 * the networking, so no interrupts or new tx packets will
8296 * end up in the driver. tp->{tx,}lock are held and thus
8299 static int tg3_rx_prodring_alloc(struct tg3
*tp
,
8300 struct tg3_rx_prodring_set
*tpr
)
8302 u32 i
, rx_pkt_dma_sz
;
8304 tpr
->rx_std_cons_idx
= 0;
8305 tpr
->rx_std_prod_idx
= 0;
8306 tpr
->rx_jmb_cons_idx
= 0;
8307 tpr
->rx_jmb_prod_idx
= 0;
8309 if (tpr
!= &tp
->napi
[0].prodring
) {
8310 memset(&tpr
->rx_std_buffers
[0], 0,
8311 TG3_RX_STD_BUFF_RING_SIZE(tp
));
8312 if (tpr
->rx_jmb_buffers
)
8313 memset(&tpr
->rx_jmb_buffers
[0], 0,
8314 TG3_RX_JMB_BUFF_RING_SIZE(tp
));
8318 /* Zero out all descriptors. */
8319 memset(tpr
->rx_std
, 0, TG3_RX_STD_RING_BYTES(tp
));
8321 rx_pkt_dma_sz
= TG3_RX_STD_DMA_SZ
;
8322 if (tg3_flag(tp
, 5780_CLASS
) &&
8323 tp
->dev
->mtu
> ETH_DATA_LEN
)
8324 rx_pkt_dma_sz
= TG3_RX_JMB_DMA_SZ
;
8325 tp
->rx_pkt_map_sz
= TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz
);
8327 /* Initialize invariants of the rings, we only set this
8328 * stuff once. This works because the card does not
8329 * write into the rx buffer posting rings.
8331 for (i
= 0; i
<= tp
->rx_std_ring_mask
; i
++) {
8332 struct tg3_rx_buffer_desc
*rxd
;
8334 rxd
= &tpr
->rx_std
[i
];
8335 rxd
->idx_len
= rx_pkt_dma_sz
<< RXD_LEN_SHIFT
;
8336 rxd
->type_flags
= (RXD_FLAG_END
<< RXD_FLAGS_SHIFT
);
8337 rxd
->opaque
= (RXD_OPAQUE_RING_STD
|
8338 (i
<< RXD_OPAQUE_INDEX_SHIFT
));
8341 /* Now allocate fresh SKBs for each rx ring. */
8342 for (i
= 0; i
< tp
->rx_pending
; i
++) {
8343 unsigned int frag_size
;
8345 if (tg3_alloc_rx_data(tp
, tpr
, RXD_OPAQUE_RING_STD
, i
,
8347 netdev_warn(tp
->dev
,
8348 "Using a smaller RX standard ring. Only "
8349 "%d out of %d buffers were allocated "
8350 "successfully\n", i
, tp
->rx_pending
);
8358 if (!tg3_flag(tp
, JUMBO_CAPABLE
) || tg3_flag(tp
, 5780_CLASS
))
8361 memset(tpr
->rx_jmb
, 0, TG3_RX_JMB_RING_BYTES(tp
));
8363 if (!tg3_flag(tp
, JUMBO_RING_ENABLE
))
8366 for (i
= 0; i
<= tp
->rx_jmb_ring_mask
; i
++) {
8367 struct tg3_rx_buffer_desc
*rxd
;
8369 rxd
= &tpr
->rx_jmb
[i
].std
;
8370 rxd
->idx_len
= TG3_RX_JMB_DMA_SZ
<< RXD_LEN_SHIFT
;
8371 rxd
->type_flags
= (RXD_FLAG_END
<< RXD_FLAGS_SHIFT
) |
8373 rxd
->opaque
= (RXD_OPAQUE_RING_JUMBO
|
8374 (i
<< RXD_OPAQUE_INDEX_SHIFT
));
8377 for (i
= 0; i
< tp
->rx_jumbo_pending
; i
++) {
8378 unsigned int frag_size
;
8380 if (tg3_alloc_rx_data(tp
, tpr
, RXD_OPAQUE_RING_JUMBO
, i
,
8382 netdev_warn(tp
->dev
,
8383 "Using a smaller RX jumbo ring. Only %d "
8384 "out of %d buffers were allocated "
8385 "successfully\n", i
, tp
->rx_jumbo_pending
);
8388 tp
->rx_jumbo_pending
= i
;
8397 tg3_rx_prodring_free(tp
, tpr
);
8401 static void tg3_rx_prodring_fini(struct tg3
*tp
,
8402 struct tg3_rx_prodring_set
*tpr
)
8404 kfree(tpr
->rx_std_buffers
);
8405 tpr
->rx_std_buffers
= NULL
;
8406 kfree(tpr
->rx_jmb_buffers
);
8407 tpr
->rx_jmb_buffers
= NULL
;
8409 dma_free_coherent(&tp
->pdev
->dev
, TG3_RX_STD_RING_BYTES(tp
),
8410 tpr
->rx_std
, tpr
->rx_std_mapping
);
8414 dma_free_coherent(&tp
->pdev
->dev
, TG3_RX_JMB_RING_BYTES(tp
),
8415 tpr
->rx_jmb
, tpr
->rx_jmb_mapping
);
8420 static int tg3_rx_prodring_init(struct tg3
*tp
,
8421 struct tg3_rx_prodring_set
*tpr
)
8423 tpr
->rx_std_buffers
= kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp
),
8425 if (!tpr
->rx_std_buffers
)
8428 tpr
->rx_std
= dma_alloc_coherent(&tp
->pdev
->dev
,
8429 TG3_RX_STD_RING_BYTES(tp
),
8430 &tpr
->rx_std_mapping
,
8435 if (tg3_flag(tp
, JUMBO_CAPABLE
) && !tg3_flag(tp
, 5780_CLASS
)) {
8436 tpr
->rx_jmb_buffers
= kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp
),
8438 if (!tpr
->rx_jmb_buffers
)
8441 tpr
->rx_jmb
= dma_alloc_coherent(&tp
->pdev
->dev
,
8442 TG3_RX_JMB_RING_BYTES(tp
),
8443 &tpr
->rx_jmb_mapping
,
8452 tg3_rx_prodring_fini(tp
, tpr
);
8456 /* Free up pending packets in all rx/tx rings.
8458 * The chip has been shut down and the driver detached from
8459 * the networking, so no interrupts or new tx packets will
8460 * end up in the driver. tp->{tx,}lock is not held and we are not
8461 * in an interrupt context and thus may sleep.
8463 static void tg3_free_rings(struct tg3
*tp
)
8467 for (j
= 0; j
< tp
->irq_cnt
; j
++) {
8468 struct tg3_napi
*tnapi
= &tp
->napi
[j
];
8470 tg3_rx_prodring_free(tp
, &tnapi
->prodring
);
8472 if (!tnapi
->tx_buffers
)
8475 for (i
= 0; i
< TG3_TX_RING_SIZE
; i
++) {
8476 struct sk_buff
*skb
= tnapi
->tx_buffers
[i
].skb
;
8481 tg3_tx_skb_unmap(tnapi
, i
,
8482 skb_shinfo(skb
)->nr_frags
- 1);
8484 dev_kfree_skb_any(skb
);
8486 netdev_tx_reset_queue(netdev_get_tx_queue(tp
->dev
, j
));
8490 /* Initialize tx/rx rings for packet processing.
8492 * The chip has been shut down and the driver detached from
8493 * the networking, so no interrupts or new tx packets will
8494 * end up in the driver. tp->{tx,}lock are held and thus
8497 static int tg3_init_rings(struct tg3
*tp
)
8501 /* Free up all the SKBs. */
8504 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
8505 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
8507 tnapi
->last_tag
= 0;
8508 tnapi
->last_irq_tag
= 0;
8509 tnapi
->hw_status
->status
= 0;
8510 tnapi
->hw_status
->status_tag
= 0;
8511 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
8516 memset(tnapi
->tx_ring
, 0, TG3_TX_RING_BYTES
);
8518 tnapi
->rx_rcb_ptr
= 0;
8520 memset(tnapi
->rx_rcb
, 0, TG3_RX_RCB_RING_BYTES(tp
));
8522 if (tg3_rx_prodring_alloc(tp
, &tnapi
->prodring
)) {
8531 static void tg3_mem_tx_release(struct tg3
*tp
)
8535 for (i
= 0; i
< tp
->irq_max
; i
++) {
8536 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
8538 if (tnapi
->tx_ring
) {
8539 dma_free_coherent(&tp
->pdev
->dev
, TG3_TX_RING_BYTES
,
8540 tnapi
->tx_ring
, tnapi
->tx_desc_mapping
);
8541 tnapi
->tx_ring
= NULL
;
8544 kfree(tnapi
->tx_buffers
);
8545 tnapi
->tx_buffers
= NULL
;
8549 static int tg3_mem_tx_acquire(struct tg3
*tp
)
8552 struct tg3_napi
*tnapi
= &tp
->napi
[0];
8554 /* If multivector TSS is enabled, vector 0 does not handle
8555 * tx interrupts. Don't allocate any resources for it.
8557 if (tg3_flag(tp
, ENABLE_TSS
))
8560 for (i
= 0; i
< tp
->txq_cnt
; i
++, tnapi
++) {
8561 tnapi
->tx_buffers
= kzalloc(sizeof(struct tg3_tx_ring_info
) *
8562 TG3_TX_RING_SIZE
, GFP_KERNEL
);
8563 if (!tnapi
->tx_buffers
)
8566 tnapi
->tx_ring
= dma_alloc_coherent(&tp
->pdev
->dev
,
8568 &tnapi
->tx_desc_mapping
,
8570 if (!tnapi
->tx_ring
)
8577 tg3_mem_tx_release(tp
);
8581 static void tg3_mem_rx_release(struct tg3
*tp
)
8585 for (i
= 0; i
< tp
->irq_max
; i
++) {
8586 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
8588 tg3_rx_prodring_fini(tp
, &tnapi
->prodring
);
8593 dma_free_coherent(&tp
->pdev
->dev
,
8594 TG3_RX_RCB_RING_BYTES(tp
),
8596 tnapi
->rx_rcb_mapping
);
8597 tnapi
->rx_rcb
= NULL
;
8601 static int tg3_mem_rx_acquire(struct tg3
*tp
)
8603 unsigned int i
, limit
;
8605 limit
= tp
->rxq_cnt
;
8607 /* If RSS is enabled, we need a (dummy) producer ring
8608 * set on vector zero. This is the true hw prodring.
8610 if (tg3_flag(tp
, ENABLE_RSS
))
8613 for (i
= 0; i
< limit
; i
++) {
8614 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
8616 if (tg3_rx_prodring_init(tp
, &tnapi
->prodring
))
8619 /* If multivector RSS is enabled, vector 0
8620 * does not handle rx or tx interrupts.
8621 * Don't allocate any resources for it.
8623 if (!i
&& tg3_flag(tp
, ENABLE_RSS
))
8626 tnapi
->rx_rcb
= dma_zalloc_coherent(&tp
->pdev
->dev
,
8627 TG3_RX_RCB_RING_BYTES(tp
),
8628 &tnapi
->rx_rcb_mapping
,
8637 tg3_mem_rx_release(tp
);
8642 * Must not be invoked with interrupt sources disabled and
8643 * the hardware shutdown down.
8645 static void tg3_free_consistent(struct tg3
*tp
)
8649 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
8650 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
8652 if (tnapi
->hw_status
) {
8653 dma_free_coherent(&tp
->pdev
->dev
, TG3_HW_STATUS_SIZE
,
8655 tnapi
->status_mapping
);
8656 tnapi
->hw_status
= NULL
;
8660 tg3_mem_rx_release(tp
);
8661 tg3_mem_tx_release(tp
);
8664 dma_free_coherent(&tp
->pdev
->dev
, sizeof(struct tg3_hw_stats
),
8665 tp
->hw_stats
, tp
->stats_mapping
);
8666 tp
->hw_stats
= NULL
;
8671 * Must not be invoked with interrupt sources disabled and
8672 * the hardware shutdown down. Can sleep.
8674 static int tg3_alloc_consistent(struct tg3
*tp
)
8678 tp
->hw_stats
= dma_zalloc_coherent(&tp
->pdev
->dev
,
8679 sizeof(struct tg3_hw_stats
),
8680 &tp
->stats_mapping
, GFP_KERNEL
);
8684 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
8685 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
8686 struct tg3_hw_status
*sblk
;
8688 tnapi
->hw_status
= dma_zalloc_coherent(&tp
->pdev
->dev
,
8690 &tnapi
->status_mapping
,
8692 if (!tnapi
->hw_status
)
8695 sblk
= tnapi
->hw_status
;
8697 if (tg3_flag(tp
, ENABLE_RSS
)) {
8698 u16
*prodptr
= NULL
;
8701 * When RSS is enabled, the status block format changes
8702 * slightly. The "rx_jumbo_consumer", "reserved",
8703 * and "rx_mini_consumer" members get mapped to the
8704 * other three rx return ring producer indexes.
8708 prodptr
= &sblk
->idx
[0].rx_producer
;
8711 prodptr
= &sblk
->rx_jumbo_consumer
;
8714 prodptr
= &sblk
->reserved
;
8717 prodptr
= &sblk
->rx_mini_consumer
;
8720 tnapi
->rx_rcb_prod_idx
= prodptr
;
8722 tnapi
->rx_rcb_prod_idx
= &sblk
->idx
[0].rx_producer
;
8726 if (tg3_mem_tx_acquire(tp
) || tg3_mem_rx_acquire(tp
))
8732 tg3_free_consistent(tp
);
8736 #define MAX_WAIT_CNT 1000
8738 /* To stop a block, clear the enable bit and poll till it
8739 * clears. tp->lock is held.
8741 static int tg3_stop_block(struct tg3
*tp
, unsigned long ofs
, u32 enable_bit
, bool silent
)
8746 if (tg3_flag(tp
, 5705_PLUS
)) {
8753 /* We can't enable/disable these bits of the
8754 * 5705/5750, just say success.
8767 for (i
= 0; i
< MAX_WAIT_CNT
; i
++) {
8768 if (pci_channel_offline(tp
->pdev
)) {
8769 dev_err(&tp
->pdev
->dev
,
8770 "tg3_stop_block device offline, "
8771 "ofs=%lx enable_bit=%x\n",
8778 if ((val
& enable_bit
) == 0)
8782 if (i
== MAX_WAIT_CNT
&& !silent
) {
8783 dev_err(&tp
->pdev
->dev
,
8784 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8792 /* tp->lock is held. */
8793 static int tg3_abort_hw(struct tg3
*tp
, bool silent
)
8797 tg3_disable_ints(tp
);
8799 if (pci_channel_offline(tp
->pdev
)) {
8800 tp
->rx_mode
&= ~(RX_MODE_ENABLE
| TX_MODE_ENABLE
);
8801 tp
->mac_mode
&= ~MAC_MODE_TDE_ENABLE
;
8806 tp
->rx_mode
&= ~RX_MODE_ENABLE
;
8807 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
8810 err
= tg3_stop_block(tp
, RCVBDI_MODE
, RCVBDI_MODE_ENABLE
, silent
);
8811 err
|= tg3_stop_block(tp
, RCVLPC_MODE
, RCVLPC_MODE_ENABLE
, silent
);
8812 err
|= tg3_stop_block(tp
, RCVLSC_MODE
, RCVLSC_MODE_ENABLE
, silent
);
8813 err
|= tg3_stop_block(tp
, RCVDBDI_MODE
, RCVDBDI_MODE_ENABLE
, silent
);
8814 err
|= tg3_stop_block(tp
, RCVDCC_MODE
, RCVDCC_MODE_ENABLE
, silent
);
8815 err
|= tg3_stop_block(tp
, RCVCC_MODE
, RCVCC_MODE_ENABLE
, silent
);
8817 err
|= tg3_stop_block(tp
, SNDBDS_MODE
, SNDBDS_MODE_ENABLE
, silent
);
8818 err
|= tg3_stop_block(tp
, SNDBDI_MODE
, SNDBDI_MODE_ENABLE
, silent
);
8819 err
|= tg3_stop_block(tp
, SNDDATAI_MODE
, SNDDATAI_MODE_ENABLE
, silent
);
8820 err
|= tg3_stop_block(tp
, RDMAC_MODE
, RDMAC_MODE_ENABLE
, silent
);
8821 err
|= tg3_stop_block(tp
, SNDDATAC_MODE
, SNDDATAC_MODE_ENABLE
, silent
);
8822 err
|= tg3_stop_block(tp
, DMAC_MODE
, DMAC_MODE_ENABLE
, silent
);
8823 err
|= tg3_stop_block(tp
, SNDBDC_MODE
, SNDBDC_MODE_ENABLE
, silent
);
8825 tp
->mac_mode
&= ~MAC_MODE_TDE_ENABLE
;
8826 tw32_f(MAC_MODE
, tp
->mac_mode
);
8829 tp
->tx_mode
&= ~TX_MODE_ENABLE
;
8830 tw32_f(MAC_TX_MODE
, tp
->tx_mode
);
8832 for (i
= 0; i
< MAX_WAIT_CNT
; i
++) {
8834 if (!(tr32(MAC_TX_MODE
) & TX_MODE_ENABLE
))
8837 if (i
>= MAX_WAIT_CNT
) {
8838 dev_err(&tp
->pdev
->dev
,
8839 "%s timed out, TX_MODE_ENABLE will not clear "
8840 "MAC_TX_MODE=%08x\n", __func__
, tr32(MAC_TX_MODE
));
8844 err
|= tg3_stop_block(tp
, HOSTCC_MODE
, HOSTCC_MODE_ENABLE
, silent
);
8845 err
|= tg3_stop_block(tp
, WDMAC_MODE
, WDMAC_MODE_ENABLE
, silent
);
8846 err
|= tg3_stop_block(tp
, MBFREE_MODE
, MBFREE_MODE_ENABLE
, silent
);
8848 tw32(FTQ_RESET
, 0xffffffff);
8849 tw32(FTQ_RESET
, 0x00000000);
8851 err
|= tg3_stop_block(tp
, BUFMGR_MODE
, BUFMGR_MODE_ENABLE
, silent
);
8852 err
|= tg3_stop_block(tp
, MEMARB_MODE
, MEMARB_MODE_ENABLE
, silent
);
8855 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
8856 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
8857 if (tnapi
->hw_status
)
8858 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
8864 /* Save PCI command register before chip reset */
8865 static void tg3_save_pci_state(struct tg3
*tp
)
8867 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &tp
->pci_cmd
);
8870 /* Restore PCI state after chip reset */
8871 static void tg3_restore_pci_state(struct tg3
*tp
)
8875 /* Re-enable indirect register accesses. */
8876 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
8877 tp
->misc_host_ctrl
);
8879 /* Set MAX PCI retry to zero. */
8880 val
= (PCISTATE_ROM_ENABLE
| PCISTATE_ROM_RETRY_ENABLE
);
8881 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5704_A0
&&
8882 tg3_flag(tp
, PCIX_MODE
))
8883 val
|= PCISTATE_RETRY_SAME_DMA
;
8884 /* Allow reads and writes to the APE register and memory space. */
8885 if (tg3_flag(tp
, ENABLE_APE
))
8886 val
|= PCISTATE_ALLOW_APE_CTLSPC_WR
|
8887 PCISTATE_ALLOW_APE_SHMEM_WR
|
8888 PCISTATE_ALLOW_APE_PSPACE_WR
;
8889 pci_write_config_dword(tp
->pdev
, TG3PCI_PCISTATE
, val
);
8891 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, tp
->pci_cmd
);
8893 if (!tg3_flag(tp
, PCI_EXPRESS
)) {
8894 pci_write_config_byte(tp
->pdev
, PCI_CACHE_LINE_SIZE
,
8895 tp
->pci_cacheline_sz
);
8896 pci_write_config_byte(tp
->pdev
, PCI_LATENCY_TIMER
,
8900 /* Make sure PCI-X relaxed ordering bit is clear. */
8901 if (tg3_flag(tp
, PCIX_MODE
)) {
8904 pci_read_config_word(tp
->pdev
, tp
->pcix_cap
+ PCI_X_CMD
,
8906 pcix_cmd
&= ~PCI_X_CMD_ERO
;
8907 pci_write_config_word(tp
->pdev
, tp
->pcix_cap
+ PCI_X_CMD
,
8911 if (tg3_flag(tp
, 5780_CLASS
)) {
8913 /* Chip reset on 5780 will reset MSI enable bit,
8914 * so need to restore it.
8916 if (tg3_flag(tp
, USING_MSI
)) {
8919 pci_read_config_word(tp
->pdev
,
8920 tp
->msi_cap
+ PCI_MSI_FLAGS
,
8922 pci_write_config_word(tp
->pdev
,
8923 tp
->msi_cap
+ PCI_MSI_FLAGS
,
8924 ctrl
| PCI_MSI_FLAGS_ENABLE
);
8925 val
= tr32(MSGINT_MODE
);
8926 tw32(MSGINT_MODE
, val
| MSGINT_MODE_ENABLE
);
8931 /* tp->lock is held. */
8932 static int tg3_chip_reset(struct tg3
*tp
)
8935 void (*write_op
)(struct tg3
*, u32
, u32
);
8938 if (!pci_device_is_present(tp
->pdev
))
8943 tg3_ape_lock(tp
, TG3_APE_LOCK_GRC
);
8945 /* No matching tg3_nvram_unlock() after this because
8946 * chip reset below will undo the nvram lock.
8948 tp
->nvram_lock_cnt
= 0;
8950 /* GRC_MISC_CFG core clock reset will clear the memory
8951 * enable bit in PCI register 4 and the MSI enable bit
8952 * on some chips, so we save relevant registers here.
8954 tg3_save_pci_state(tp
);
8956 if (tg3_asic_rev(tp
) == ASIC_REV_5752
||
8957 tg3_flag(tp
, 5755_PLUS
))
8958 tw32(GRC_FASTBOOT_PC
, 0);
8961 * We must avoid the readl() that normally takes place.
8962 * It locks machines, causes machine checks, and other
8963 * fun things. So, temporarily disable the 5701
8964 * hardware workaround, while we do the reset.
8966 write_op
= tp
->write32
;
8967 if (write_op
== tg3_write_flush_reg32
)
8968 tp
->write32
= tg3_write32
;
8970 /* Prevent the irq handler from reading or writing PCI registers
8971 * during chip reset when the memory enable bit in the PCI command
8972 * register may be cleared. The chip does not generate interrupt
8973 * at this time, but the irq handler may still be called due to irq
8974 * sharing or irqpoll.
8976 tg3_flag_set(tp
, CHIP_RESETTING
);
8977 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
8978 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
8979 if (tnapi
->hw_status
) {
8980 tnapi
->hw_status
->status
= 0;
8981 tnapi
->hw_status
->status_tag
= 0;
8983 tnapi
->last_tag
= 0;
8984 tnapi
->last_irq_tag
= 0;
8988 for (i
= 0; i
< tp
->irq_cnt
; i
++)
8989 synchronize_irq(tp
->napi
[i
].irq_vec
);
8991 if (tg3_asic_rev(tp
) == ASIC_REV_57780
) {
8992 val
= tr32(TG3_PCIE_LNKCTL
) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN
;
8993 tw32(TG3_PCIE_LNKCTL
, val
| TG3_PCIE_LNKCTL_L1_PLL_PD_DIS
);
8997 val
= GRC_MISC_CFG_CORECLK_RESET
;
8999 if (tg3_flag(tp
, PCI_EXPRESS
)) {
9000 /* Force PCIe 1.0a mode */
9001 if (tg3_asic_rev(tp
) != ASIC_REV_5785
&&
9002 !tg3_flag(tp
, 57765_PLUS
) &&
9003 tr32(TG3_PCIE_PHY_TSTCTL
) ==
9004 (TG3_PCIE_PHY_TSTCTL_PCIE10
| TG3_PCIE_PHY_TSTCTL_PSCRAM
))
9005 tw32(TG3_PCIE_PHY_TSTCTL
, TG3_PCIE_PHY_TSTCTL_PSCRAM
);
9007 if (tg3_chip_rev_id(tp
) != CHIPREV_ID_5750_A0
) {
9008 tw32(GRC_MISC_CFG
, (1 << 29));
9013 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
9014 tw32(VCPU_STATUS
, tr32(VCPU_STATUS
) | VCPU_STATUS_DRV_RESET
);
9015 tw32(GRC_VCPU_EXT_CTRL
,
9016 tr32(GRC_VCPU_EXT_CTRL
) & ~GRC_VCPU_EXT_CTRL_HALT_CPU
);
9019 /* Manage gphy power for all CPMU absent PCIe devices. */
9020 if (tg3_flag(tp
, 5705_PLUS
) && !tg3_flag(tp
, CPMU_PRESENT
))
9021 val
|= GRC_MISC_CFG_KEEP_GPHY_POWER
;
9023 tw32(GRC_MISC_CFG
, val
);
9025 /* restore 5701 hardware bug workaround write method */
9026 tp
->write32
= write_op
;
9028 /* Unfortunately, we have to delay before the PCI read back.
9029 * Some 575X chips even will not respond to a PCI cfg access
9030 * when the reset command is given to the chip.
9032 * How do these hardware designers expect things to work
9033 * properly if the PCI write is posted for a long period
9034 * of time? It is always necessary to have some method by
9035 * which a register read back can occur to push the write
9036 * out which does the reset.
9038 * For most tg3 variants the trick below was working.
9043 /* Flush PCI posted writes. The normal MMIO registers
9044 * are inaccessible at this time so this is the only
9045 * way to make this reliably (actually, this is no longer
9046 * the case, see above). I tried to use indirect
9047 * register read/write but this upset some 5701 variants.
9049 pci_read_config_dword(tp
->pdev
, PCI_COMMAND
, &val
);
9053 if (tg3_flag(tp
, PCI_EXPRESS
) && pci_is_pcie(tp
->pdev
)) {
9056 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5750_A0
) {
9060 /* Wait for link training to complete. */
9061 for (j
= 0; j
< 5000; j
++)
9064 pci_read_config_dword(tp
->pdev
, 0xc4, &cfg_val
);
9065 pci_write_config_dword(tp
->pdev
, 0xc4,
9066 cfg_val
| (1 << 15));
9069 /* Clear the "no snoop" and "relaxed ordering" bits. */
9070 val16
= PCI_EXP_DEVCTL_RELAX_EN
| PCI_EXP_DEVCTL_NOSNOOP_EN
;
9072 * Older PCIe devices only support the 128 byte
9073 * MPS setting. Enforce the restriction.
9075 if (!tg3_flag(tp
, CPMU_PRESENT
))
9076 val16
|= PCI_EXP_DEVCTL_PAYLOAD
;
9077 pcie_capability_clear_word(tp
->pdev
, PCI_EXP_DEVCTL
, val16
);
9079 /* Clear error status */
9080 pcie_capability_write_word(tp
->pdev
, PCI_EXP_DEVSTA
,
9081 PCI_EXP_DEVSTA_CED
|
9082 PCI_EXP_DEVSTA_NFED
|
9083 PCI_EXP_DEVSTA_FED
|
9084 PCI_EXP_DEVSTA_URD
);
9087 tg3_restore_pci_state(tp
);
9089 tg3_flag_clear(tp
, CHIP_RESETTING
);
9090 tg3_flag_clear(tp
, ERROR_PROCESSED
);
9093 if (tg3_flag(tp
, 5780_CLASS
))
9094 val
= tr32(MEMARB_MODE
);
9095 tw32(MEMARB_MODE
, val
| MEMARB_MODE_ENABLE
);
9097 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5750_A3
) {
9099 tw32(0x5000, 0x400);
9102 if (tg3_flag(tp
, IS_SSB_CORE
)) {
9104 * BCM4785: In order to avoid repercussions from using
9105 * potentially defective internal ROM, stop the Rx RISC CPU,
9106 * which is not required.
9109 tg3_halt_cpu(tp
, RX_CPU_BASE
);
9112 err
= tg3_poll_fw(tp
);
9116 tw32(GRC_MODE
, tp
->grc_mode
);
9118 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5705_A0
) {
9121 tw32(0xc4, val
| (1 << 15));
9124 if ((tp
->nic_sram_data_cfg
& NIC_SRAM_DATA_CFG_MINI_PCI
) != 0 &&
9125 tg3_asic_rev(tp
) == ASIC_REV_5705
) {
9126 tp
->pci_clock_ctrl
|= CLOCK_CTRL_CLKRUN_OENABLE
;
9127 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5705_A0
)
9128 tp
->pci_clock_ctrl
|= CLOCK_CTRL_FORCE_CLKRUN
;
9129 tw32(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
);
9132 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
9133 tp
->mac_mode
= MAC_MODE_PORT_MODE_TBI
;
9135 } else if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) {
9136 tp
->mac_mode
= MAC_MODE_PORT_MODE_GMII
;
9141 tw32_f(MAC_MODE
, val
);
9144 tg3_ape_unlock(tp
, TG3_APE_LOCK_GRC
);
9148 if (tg3_flag(tp
, PCI_EXPRESS
) &&
9149 tg3_chip_rev_id(tp
) != CHIPREV_ID_5750_A0
&&
9150 tg3_asic_rev(tp
) != ASIC_REV_5785
&&
9151 !tg3_flag(tp
, 57765_PLUS
)) {
9154 tw32(0x7c00, val
| (1 << 25));
9157 if (tg3_asic_rev(tp
) == ASIC_REV_5720
) {
9158 val
= tr32(TG3_CPMU_CLCK_ORIDE
);
9159 tw32(TG3_CPMU_CLCK_ORIDE
, val
& ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN
);
9162 /* Reprobe ASF enable state. */
9163 tg3_flag_clear(tp
, ENABLE_ASF
);
9164 tp
->phy_flags
&= ~(TG3_PHYFLG_1G_ON_VAUX_OK
|
9165 TG3_PHYFLG_KEEP_LINK_ON_PWRDN
);
9167 tg3_flag_clear(tp
, ASF_NEW_HANDSHAKE
);
9168 tg3_read_mem(tp
, NIC_SRAM_DATA_SIG
, &val
);
9169 if (val
== NIC_SRAM_DATA_SIG_MAGIC
) {
9172 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG
, &nic_cfg
);
9173 if (nic_cfg
& NIC_SRAM_DATA_CFG_ASF_ENABLE
) {
9174 tg3_flag_set(tp
, ENABLE_ASF
);
9175 tp
->last_event_jiffies
= jiffies
;
9176 if (tg3_flag(tp
, 5750_PLUS
))
9177 tg3_flag_set(tp
, ASF_NEW_HANDSHAKE
);
9179 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG_3
, &nic_cfg
);
9180 if (nic_cfg
& NIC_SRAM_1G_ON_VAUX_OK
)
9181 tp
->phy_flags
|= TG3_PHYFLG_1G_ON_VAUX_OK
;
9182 if (nic_cfg
& NIC_SRAM_LNK_FLAP_AVOID
)
9183 tp
->phy_flags
|= TG3_PHYFLG_KEEP_LINK_ON_PWRDN
;
9190 static void tg3_get_nstats(struct tg3
*, struct rtnl_link_stats64
*);
9191 static void tg3_get_estats(struct tg3
*, struct tg3_ethtool_stats
*);
9193 /* tp->lock is held. */
9194 static int tg3_halt(struct tg3
*tp
, int kind
, bool silent
)
9200 tg3_write_sig_pre_reset(tp
, kind
);
9202 tg3_abort_hw(tp
, silent
);
9203 err
= tg3_chip_reset(tp
);
9205 __tg3_set_mac_addr(tp
, false);
9207 tg3_write_sig_legacy(tp
, kind
);
9208 tg3_write_sig_post_reset(tp
, kind
);
9211 /* Save the stats across chip resets... */
9212 tg3_get_nstats(tp
, &tp
->net_stats_prev
);
9213 tg3_get_estats(tp
, &tp
->estats_prev
);
9215 /* And make sure the next sample is new data */
9216 memset(tp
->hw_stats
, 0, sizeof(struct tg3_hw_stats
));
9222 static int tg3_set_mac_addr(struct net_device
*dev
, void *p
)
9224 struct tg3
*tp
= netdev_priv(dev
);
9225 struct sockaddr
*addr
= p
;
9227 bool skip_mac_1
= false;
9229 if (!is_valid_ether_addr(addr
->sa_data
))
9230 return -EADDRNOTAVAIL
;
9232 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
9234 if (!netif_running(dev
))
9237 if (tg3_flag(tp
, ENABLE_ASF
)) {
9238 u32 addr0_high
, addr0_low
, addr1_high
, addr1_low
;
9240 addr0_high
= tr32(MAC_ADDR_0_HIGH
);
9241 addr0_low
= tr32(MAC_ADDR_0_LOW
);
9242 addr1_high
= tr32(MAC_ADDR_1_HIGH
);
9243 addr1_low
= tr32(MAC_ADDR_1_LOW
);
9245 /* Skip MAC addr 1 if ASF is using it. */
9246 if ((addr0_high
!= addr1_high
|| addr0_low
!= addr1_low
) &&
9247 !(addr1_high
== 0 && addr1_low
== 0))
9250 spin_lock_bh(&tp
->lock
);
9251 __tg3_set_mac_addr(tp
, skip_mac_1
);
9252 spin_unlock_bh(&tp
->lock
);
9257 /* tp->lock is held. */
9258 static void tg3_set_bdinfo(struct tg3
*tp
, u32 bdinfo_addr
,
9259 dma_addr_t mapping
, u32 maxlen_flags
,
9263 (bdinfo_addr
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_HIGH
),
9264 ((u64
) mapping
>> 32));
9266 (bdinfo_addr
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_LOW
),
9267 ((u64
) mapping
& 0xffffffff));
9269 (bdinfo_addr
+ TG3_BDINFO_MAXLEN_FLAGS
),
9272 if (!tg3_flag(tp
, 5705_PLUS
))
9274 (bdinfo_addr
+ TG3_BDINFO_NIC_ADDR
),
9279 static void tg3_coal_tx_init(struct tg3
*tp
, struct ethtool_coalesce
*ec
)
9283 if (!tg3_flag(tp
, ENABLE_TSS
)) {
9284 tw32(HOSTCC_TXCOL_TICKS
, ec
->tx_coalesce_usecs
);
9285 tw32(HOSTCC_TXMAX_FRAMES
, ec
->tx_max_coalesced_frames
);
9286 tw32(HOSTCC_TXCOAL_MAXF_INT
, ec
->tx_max_coalesced_frames_irq
);
9288 tw32(HOSTCC_TXCOL_TICKS
, 0);
9289 tw32(HOSTCC_TXMAX_FRAMES
, 0);
9290 tw32(HOSTCC_TXCOAL_MAXF_INT
, 0);
9292 for (; i
< tp
->txq_cnt
; i
++) {
9295 reg
= HOSTCC_TXCOL_TICKS_VEC1
+ i
* 0x18;
9296 tw32(reg
, ec
->tx_coalesce_usecs
);
9297 reg
= HOSTCC_TXMAX_FRAMES_VEC1
+ i
* 0x18;
9298 tw32(reg
, ec
->tx_max_coalesced_frames
);
9299 reg
= HOSTCC_TXCOAL_MAXF_INT_VEC1
+ i
* 0x18;
9300 tw32(reg
, ec
->tx_max_coalesced_frames_irq
);
9304 for (; i
< tp
->irq_max
- 1; i
++) {
9305 tw32(HOSTCC_TXCOL_TICKS_VEC1
+ i
* 0x18, 0);
9306 tw32(HOSTCC_TXMAX_FRAMES_VEC1
+ i
* 0x18, 0);
9307 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1
+ i
* 0x18, 0);
9311 static void tg3_coal_rx_init(struct tg3
*tp
, struct ethtool_coalesce
*ec
)
9314 u32 limit
= tp
->rxq_cnt
;
9316 if (!tg3_flag(tp
, ENABLE_RSS
)) {
9317 tw32(HOSTCC_RXCOL_TICKS
, ec
->rx_coalesce_usecs
);
9318 tw32(HOSTCC_RXMAX_FRAMES
, ec
->rx_max_coalesced_frames
);
9319 tw32(HOSTCC_RXCOAL_MAXF_INT
, ec
->rx_max_coalesced_frames_irq
);
9322 tw32(HOSTCC_RXCOL_TICKS
, 0);
9323 tw32(HOSTCC_RXMAX_FRAMES
, 0);
9324 tw32(HOSTCC_RXCOAL_MAXF_INT
, 0);
9327 for (; i
< limit
; i
++) {
9330 reg
= HOSTCC_RXCOL_TICKS_VEC1
+ i
* 0x18;
9331 tw32(reg
, ec
->rx_coalesce_usecs
);
9332 reg
= HOSTCC_RXMAX_FRAMES_VEC1
+ i
* 0x18;
9333 tw32(reg
, ec
->rx_max_coalesced_frames
);
9334 reg
= HOSTCC_RXCOAL_MAXF_INT_VEC1
+ i
* 0x18;
9335 tw32(reg
, ec
->rx_max_coalesced_frames_irq
);
9338 for (; i
< tp
->irq_max
- 1; i
++) {
9339 tw32(HOSTCC_RXCOL_TICKS_VEC1
+ i
* 0x18, 0);
9340 tw32(HOSTCC_RXMAX_FRAMES_VEC1
+ i
* 0x18, 0);
9341 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1
+ i
* 0x18, 0);
9345 static void __tg3_set_coalesce(struct tg3
*tp
, struct ethtool_coalesce
*ec
)
9347 tg3_coal_tx_init(tp
, ec
);
9348 tg3_coal_rx_init(tp
, ec
);
9350 if (!tg3_flag(tp
, 5705_PLUS
)) {
9351 u32 val
= ec
->stats_block_coalesce_usecs
;
9353 tw32(HOSTCC_RXCOAL_TICK_INT
, ec
->rx_coalesce_usecs_irq
);
9354 tw32(HOSTCC_TXCOAL_TICK_INT
, ec
->tx_coalesce_usecs_irq
);
9359 tw32(HOSTCC_STAT_COAL_TICKS
, val
);
9363 /* tp->lock is held. */
9364 static void tg3_tx_rcbs_disable(struct tg3
*tp
)
9368 /* Disable all transmit rings but the first. */
9369 if (!tg3_flag(tp
, 5705_PLUS
))
9370 limit
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
* 16;
9371 else if (tg3_flag(tp
, 5717_PLUS
))
9372 limit
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
* 4;
9373 else if (tg3_flag(tp
, 57765_CLASS
) ||
9374 tg3_asic_rev(tp
) == ASIC_REV_5762
)
9375 limit
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
* 2;
9377 limit
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
;
9379 for (txrcb
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
;
9380 txrcb
< limit
; txrcb
+= TG3_BDINFO_SIZE
)
9381 tg3_write_mem(tp
, txrcb
+ TG3_BDINFO_MAXLEN_FLAGS
,
9382 BDINFO_FLAGS_DISABLED
);
9385 /* tp->lock is held. */
9386 static void tg3_tx_rcbs_init(struct tg3
*tp
)
9389 u32 txrcb
= NIC_SRAM_SEND_RCB
;
9391 if (tg3_flag(tp
, ENABLE_TSS
))
9394 for (; i
< tp
->irq_max
; i
++, txrcb
+= TG3_BDINFO_SIZE
) {
9395 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
9397 if (!tnapi
->tx_ring
)
9400 tg3_set_bdinfo(tp
, txrcb
, tnapi
->tx_desc_mapping
,
9401 (TG3_TX_RING_SIZE
<< BDINFO_FLAGS_MAXLEN_SHIFT
),
9402 NIC_SRAM_TX_BUFFER_DESC
);
9406 /* tp->lock is held. */
9407 static void tg3_rx_ret_rcbs_disable(struct tg3
*tp
)
9411 /* Disable all receive return rings but the first. */
9412 if (tg3_flag(tp
, 5717_PLUS
))
9413 limit
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
* 17;
9414 else if (!tg3_flag(tp
, 5705_PLUS
))
9415 limit
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
* 16;
9416 else if (tg3_asic_rev(tp
) == ASIC_REV_5755
||
9417 tg3_asic_rev(tp
) == ASIC_REV_5762
||
9418 tg3_flag(tp
, 57765_CLASS
))
9419 limit
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
* 4;
9421 limit
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
;
9423 for (rxrcb
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
;
9424 rxrcb
< limit
; rxrcb
+= TG3_BDINFO_SIZE
)
9425 tg3_write_mem(tp
, rxrcb
+ TG3_BDINFO_MAXLEN_FLAGS
,
9426 BDINFO_FLAGS_DISABLED
);
9429 /* tp->lock is held. */
9430 static void tg3_rx_ret_rcbs_init(struct tg3
*tp
)
9433 u32 rxrcb
= NIC_SRAM_RCV_RET_RCB
;
9435 if (tg3_flag(tp
, ENABLE_RSS
))
9438 for (; i
< tp
->irq_max
; i
++, rxrcb
+= TG3_BDINFO_SIZE
) {
9439 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
9444 tg3_set_bdinfo(tp
, rxrcb
, tnapi
->rx_rcb_mapping
,
9445 (tp
->rx_ret_ring_mask
+ 1) <<
9446 BDINFO_FLAGS_MAXLEN_SHIFT
, 0);
9450 /* tp->lock is held. */
9451 static void tg3_rings_reset(struct tg3
*tp
)
9455 struct tg3_napi
*tnapi
= &tp
->napi
[0];
9457 tg3_tx_rcbs_disable(tp
);
9459 tg3_rx_ret_rcbs_disable(tp
);
9461 /* Disable interrupts */
9462 tw32_mailbox_f(tp
->napi
[0].int_mbox
, 1);
9463 tp
->napi
[0].chk_msi_cnt
= 0;
9464 tp
->napi
[0].last_rx_cons
= 0;
9465 tp
->napi
[0].last_tx_cons
= 0;
9467 /* Zero mailbox registers. */
9468 if (tg3_flag(tp
, SUPPORT_MSIX
)) {
9469 for (i
= 1; i
< tp
->irq_max
; i
++) {
9470 tp
->napi
[i
].tx_prod
= 0;
9471 tp
->napi
[i
].tx_cons
= 0;
9472 if (tg3_flag(tp
, ENABLE_TSS
))
9473 tw32_mailbox(tp
->napi
[i
].prodmbox
, 0);
9474 tw32_rx_mbox(tp
->napi
[i
].consmbox
, 0);
9475 tw32_mailbox_f(tp
->napi
[i
].int_mbox
, 1);
9476 tp
->napi
[i
].chk_msi_cnt
= 0;
9477 tp
->napi
[i
].last_rx_cons
= 0;
9478 tp
->napi
[i
].last_tx_cons
= 0;
9480 if (!tg3_flag(tp
, ENABLE_TSS
))
9481 tw32_mailbox(tp
->napi
[0].prodmbox
, 0);
9483 tp
->napi
[0].tx_prod
= 0;
9484 tp
->napi
[0].tx_cons
= 0;
9485 tw32_mailbox(tp
->napi
[0].prodmbox
, 0);
9486 tw32_rx_mbox(tp
->napi
[0].consmbox
, 0);
9489 /* Make sure the NIC-based send BD rings are disabled. */
9490 if (!tg3_flag(tp
, 5705_PLUS
)) {
9491 u32 mbox
= MAILBOX_SNDNIC_PROD_IDX_0
+ TG3_64BIT_REG_LOW
;
9492 for (i
= 0; i
< 16; i
++)
9493 tw32_tx_mbox(mbox
+ i
* 8, 0);
9496 /* Clear status block in ram. */
9497 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
9499 /* Set status block DMA address */
9500 tw32(HOSTCC_STATUS_BLK_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
9501 ((u64
) tnapi
->status_mapping
>> 32));
9502 tw32(HOSTCC_STATUS_BLK_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
9503 ((u64
) tnapi
->status_mapping
& 0xffffffff));
9505 stblk
= HOSTCC_STATBLCK_RING1
;
9507 for (i
= 1, tnapi
++; i
< tp
->irq_cnt
; i
++, tnapi
++) {
9508 u64 mapping
= (u64
)tnapi
->status_mapping
;
9509 tw32(stblk
+ TG3_64BIT_REG_HIGH
, mapping
>> 32);
9510 tw32(stblk
+ TG3_64BIT_REG_LOW
, mapping
& 0xffffffff);
9513 /* Clear status block in ram. */
9514 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
9517 tg3_tx_rcbs_init(tp
);
9518 tg3_rx_ret_rcbs_init(tp
);
9521 static void tg3_setup_rxbd_thresholds(struct tg3
*tp
)
9523 u32 val
, bdcache_maxcnt
, host_rep_thresh
, nic_rep_thresh
;
9525 if (!tg3_flag(tp
, 5750_PLUS
) ||
9526 tg3_flag(tp
, 5780_CLASS
) ||
9527 tg3_asic_rev(tp
) == ASIC_REV_5750
||
9528 tg3_asic_rev(tp
) == ASIC_REV_5752
||
9529 tg3_flag(tp
, 57765_PLUS
))
9530 bdcache_maxcnt
= TG3_SRAM_RX_STD_BDCACHE_SIZE_5700
;
9531 else if (tg3_asic_rev(tp
) == ASIC_REV_5755
||
9532 tg3_asic_rev(tp
) == ASIC_REV_5787
)
9533 bdcache_maxcnt
= TG3_SRAM_RX_STD_BDCACHE_SIZE_5755
;
9535 bdcache_maxcnt
= TG3_SRAM_RX_STD_BDCACHE_SIZE_5906
;
9537 nic_rep_thresh
= min(bdcache_maxcnt
/ 2, tp
->rx_std_max_post
);
9538 host_rep_thresh
= max_t(u32
, tp
->rx_pending
/ 8, 1);
9540 val
= min(nic_rep_thresh
, host_rep_thresh
);
9541 tw32(RCVBDI_STD_THRESH
, val
);
9543 if (tg3_flag(tp
, 57765_PLUS
))
9544 tw32(STD_REPLENISH_LWM
, bdcache_maxcnt
);
9546 if (!tg3_flag(tp
, JUMBO_CAPABLE
) || tg3_flag(tp
, 5780_CLASS
))
9549 bdcache_maxcnt
= TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700
;
9551 host_rep_thresh
= max_t(u32
, tp
->rx_jumbo_pending
/ 8, 1);
9553 val
= min(bdcache_maxcnt
/ 2, host_rep_thresh
);
9554 tw32(RCVBDI_JUMBO_THRESH
, val
);
9556 if (tg3_flag(tp
, 57765_PLUS
))
9557 tw32(JMB_REPLENISH_LWM
, bdcache_maxcnt
);
9560 static inline u32
calc_crc(unsigned char *buf
, int len
)
9568 for (j
= 0; j
< len
; j
++) {
9571 for (k
= 0; k
< 8; k
++) {
9584 static void tg3_set_multi(struct tg3
*tp
, unsigned int accept_all
)
9586 /* accept or reject all multicast frames */
9587 tw32(MAC_HASH_REG_0
, accept_all
? 0xffffffff : 0);
9588 tw32(MAC_HASH_REG_1
, accept_all
? 0xffffffff : 0);
9589 tw32(MAC_HASH_REG_2
, accept_all
? 0xffffffff : 0);
9590 tw32(MAC_HASH_REG_3
, accept_all
? 0xffffffff : 0);
9593 static void __tg3_set_rx_mode(struct net_device
*dev
)
9595 struct tg3
*tp
= netdev_priv(dev
);
9598 rx_mode
= tp
->rx_mode
& ~(RX_MODE_PROMISC
|
9599 RX_MODE_KEEP_VLAN_TAG
);
9601 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9602 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9605 if (!tg3_flag(tp
, ENABLE_ASF
))
9606 rx_mode
|= RX_MODE_KEEP_VLAN_TAG
;
9609 if (dev
->flags
& IFF_PROMISC
) {
9610 /* Promiscuous mode. */
9611 rx_mode
|= RX_MODE_PROMISC
;
9612 } else if (dev
->flags
& IFF_ALLMULTI
) {
9613 /* Accept all multicast. */
9614 tg3_set_multi(tp
, 1);
9615 } else if (netdev_mc_empty(dev
)) {
9616 /* Reject all multicast. */
9617 tg3_set_multi(tp
, 0);
9619 /* Accept one or more multicast(s). */
9620 struct netdev_hw_addr
*ha
;
9621 u32 mc_filter
[4] = { 0, };
9626 netdev_for_each_mc_addr(ha
, dev
) {
9627 crc
= calc_crc(ha
->addr
, ETH_ALEN
);
9629 regidx
= (bit
& 0x60) >> 5;
9631 mc_filter
[regidx
] |= (1 << bit
);
9634 tw32(MAC_HASH_REG_0
, mc_filter
[0]);
9635 tw32(MAC_HASH_REG_1
, mc_filter
[1]);
9636 tw32(MAC_HASH_REG_2
, mc_filter
[2]);
9637 tw32(MAC_HASH_REG_3
, mc_filter
[3]);
9640 if (rx_mode
!= tp
->rx_mode
) {
9641 tp
->rx_mode
= rx_mode
;
9642 tw32_f(MAC_RX_MODE
, rx_mode
);
9647 static void tg3_rss_init_dflt_indir_tbl(struct tg3
*tp
, u32 qcnt
)
9651 for (i
= 0; i
< TG3_RSS_INDIR_TBL_SIZE
; i
++)
9652 tp
->rss_ind_tbl
[i
] = ethtool_rxfh_indir_default(i
, qcnt
);
9655 static void tg3_rss_check_indir_tbl(struct tg3
*tp
)
9659 if (!tg3_flag(tp
, SUPPORT_MSIX
))
9662 if (tp
->rxq_cnt
== 1) {
9663 memset(&tp
->rss_ind_tbl
[0], 0, sizeof(tp
->rss_ind_tbl
));
9667 /* Validate table against current IRQ count */
9668 for (i
= 0; i
< TG3_RSS_INDIR_TBL_SIZE
; i
++) {
9669 if (tp
->rss_ind_tbl
[i
] >= tp
->rxq_cnt
)
9673 if (i
!= TG3_RSS_INDIR_TBL_SIZE
)
9674 tg3_rss_init_dflt_indir_tbl(tp
, tp
->rxq_cnt
);
9677 static void tg3_rss_write_indir_tbl(struct tg3
*tp
)
9680 u32 reg
= MAC_RSS_INDIR_TBL_0
;
9682 while (i
< TG3_RSS_INDIR_TBL_SIZE
) {
9683 u32 val
= tp
->rss_ind_tbl
[i
];
9685 for (; i
% 8; i
++) {
9687 val
|= tp
->rss_ind_tbl
[i
];
9694 static inline u32
tg3_lso_rd_dma_workaround_bit(struct tg3
*tp
)
9696 if (tg3_asic_rev(tp
) == ASIC_REV_5719
)
9697 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719
;
9699 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720
;
9702 /* tp->lock is held. */
9703 static int tg3_reset_hw(struct tg3
*tp
, bool reset_phy
)
9705 u32 val
, rdmac_mode
;
9707 struct tg3_rx_prodring_set
*tpr
= &tp
->napi
[0].prodring
;
9709 tg3_disable_ints(tp
);
9713 tg3_write_sig_pre_reset(tp
, RESET_KIND_INIT
);
9715 if (tg3_flag(tp
, INIT_COMPLETE
))
9716 tg3_abort_hw(tp
, 1);
9718 if ((tp
->phy_flags
& TG3_PHYFLG_KEEP_LINK_ON_PWRDN
) &&
9719 !(tp
->phy_flags
& TG3_PHYFLG_USER_CONFIGURED
)) {
9720 tg3_phy_pull_config(tp
);
9721 tg3_eee_pull_config(tp
, NULL
);
9722 tp
->phy_flags
|= TG3_PHYFLG_USER_CONFIGURED
;
9725 /* Enable MAC control of LPI */
9726 if (tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
)
9732 err
= tg3_chip_reset(tp
);
9736 tg3_write_sig_legacy(tp
, RESET_KIND_INIT
);
9738 if (tg3_chip_rev(tp
) == CHIPREV_5784_AX
) {
9739 val
= tr32(TG3_CPMU_CTRL
);
9740 val
&= ~(CPMU_CTRL_LINK_AWARE_MODE
| CPMU_CTRL_LINK_IDLE_MODE
);
9741 tw32(TG3_CPMU_CTRL
, val
);
9743 val
= tr32(TG3_CPMU_LSPD_10MB_CLK
);
9744 val
&= ~CPMU_LSPD_10MB_MACCLK_MASK
;
9745 val
|= CPMU_LSPD_10MB_MACCLK_6_25
;
9746 tw32(TG3_CPMU_LSPD_10MB_CLK
, val
);
9748 val
= tr32(TG3_CPMU_LNK_AWARE_PWRMD
);
9749 val
&= ~CPMU_LNK_AWARE_MACCLK_MASK
;
9750 val
|= CPMU_LNK_AWARE_MACCLK_6_25
;
9751 tw32(TG3_CPMU_LNK_AWARE_PWRMD
, val
);
9753 val
= tr32(TG3_CPMU_HST_ACC
);
9754 val
&= ~CPMU_HST_ACC_MACCLK_MASK
;
9755 val
|= CPMU_HST_ACC_MACCLK_6_25
;
9756 tw32(TG3_CPMU_HST_ACC
, val
);
9759 if (tg3_asic_rev(tp
) == ASIC_REV_57780
) {
9760 val
= tr32(PCIE_PWR_MGMT_THRESH
) & ~PCIE_PWR_MGMT_L1_THRESH_MSK
;
9761 val
|= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN
|
9762 PCIE_PWR_MGMT_L1_THRESH_4MS
;
9763 tw32(PCIE_PWR_MGMT_THRESH
, val
);
9765 val
= tr32(TG3_PCIE_EIDLE_DELAY
) & ~TG3_PCIE_EIDLE_DELAY_MASK
;
9766 tw32(TG3_PCIE_EIDLE_DELAY
, val
| TG3_PCIE_EIDLE_DELAY_13_CLKS
);
9768 tw32(TG3_CORR_ERR_STAT
, TG3_CORR_ERR_STAT_CLEAR
);
9770 val
= tr32(TG3_PCIE_LNKCTL
) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN
;
9771 tw32(TG3_PCIE_LNKCTL
, val
| TG3_PCIE_LNKCTL_L1_PLL_PD_DIS
);
9774 if (tg3_flag(tp
, L1PLLPD_EN
)) {
9775 u32 grc_mode
= tr32(GRC_MODE
);
9777 /* Access the lower 1K of PL PCIE block registers. */
9778 val
= grc_mode
& ~GRC_MODE_PCIE_PORT_MASK
;
9779 tw32(GRC_MODE
, val
| GRC_MODE_PCIE_PL_SEL
);
9781 val
= tr32(TG3_PCIE_TLDLPL_PORT
+ TG3_PCIE_PL_LO_PHYCTL1
);
9782 tw32(TG3_PCIE_TLDLPL_PORT
+ TG3_PCIE_PL_LO_PHYCTL1
,
9783 val
| TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN
);
9785 tw32(GRC_MODE
, grc_mode
);
9788 if (tg3_flag(tp
, 57765_CLASS
)) {
9789 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_57765_A0
) {
9790 u32 grc_mode
= tr32(GRC_MODE
);
9792 /* Access the lower 1K of PL PCIE block registers. */
9793 val
= grc_mode
& ~GRC_MODE_PCIE_PORT_MASK
;
9794 tw32(GRC_MODE
, val
| GRC_MODE_PCIE_PL_SEL
);
9796 val
= tr32(TG3_PCIE_TLDLPL_PORT
+
9797 TG3_PCIE_PL_LO_PHYCTL5
);
9798 tw32(TG3_PCIE_TLDLPL_PORT
+ TG3_PCIE_PL_LO_PHYCTL5
,
9799 val
| TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ
);
9801 tw32(GRC_MODE
, grc_mode
);
9804 if (tg3_chip_rev(tp
) != CHIPREV_57765_AX
) {
9807 /* Fix transmit hangs */
9808 val
= tr32(TG3_CPMU_PADRNG_CTL
);
9809 val
|= TG3_CPMU_PADRNG_CTL_RDIV2
;
9810 tw32(TG3_CPMU_PADRNG_CTL
, val
);
9812 grc_mode
= tr32(GRC_MODE
);
9814 /* Access the lower 1K of DL PCIE block registers. */
9815 val
= grc_mode
& ~GRC_MODE_PCIE_PORT_MASK
;
9816 tw32(GRC_MODE
, val
| GRC_MODE_PCIE_DL_SEL
);
9818 val
= tr32(TG3_PCIE_TLDLPL_PORT
+
9819 TG3_PCIE_DL_LO_FTSMAX
);
9820 val
&= ~TG3_PCIE_DL_LO_FTSMAX_MSK
;
9821 tw32(TG3_PCIE_TLDLPL_PORT
+ TG3_PCIE_DL_LO_FTSMAX
,
9822 val
| TG3_PCIE_DL_LO_FTSMAX_VAL
);
9824 tw32(GRC_MODE
, grc_mode
);
9827 val
= tr32(TG3_CPMU_LSPD_10MB_CLK
);
9828 val
&= ~CPMU_LSPD_10MB_MACCLK_MASK
;
9829 val
|= CPMU_LSPD_10MB_MACCLK_6_25
;
9830 tw32(TG3_CPMU_LSPD_10MB_CLK
, val
);
9833 /* This works around an issue with Athlon chipsets on
9834 * B3 tigon3 silicon. This bit has no effect on any
9835 * other revision. But do not set this on PCI Express
9836 * chips and don't even touch the clocks if the CPMU is present.
9838 if (!tg3_flag(tp
, CPMU_PRESENT
)) {
9839 if (!tg3_flag(tp
, PCI_EXPRESS
))
9840 tp
->pci_clock_ctrl
|= CLOCK_CTRL_DELAY_PCI_GRANT
;
9841 tw32_f(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
);
9844 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5704_A0
&&
9845 tg3_flag(tp
, PCIX_MODE
)) {
9846 val
= tr32(TG3PCI_PCISTATE
);
9847 val
|= PCISTATE_RETRY_SAME_DMA
;
9848 tw32(TG3PCI_PCISTATE
, val
);
9851 if (tg3_flag(tp
, ENABLE_APE
)) {
9852 /* Allow reads and writes to the
9853 * APE register and memory space.
9855 val
= tr32(TG3PCI_PCISTATE
);
9856 val
|= PCISTATE_ALLOW_APE_CTLSPC_WR
|
9857 PCISTATE_ALLOW_APE_SHMEM_WR
|
9858 PCISTATE_ALLOW_APE_PSPACE_WR
;
9859 tw32(TG3PCI_PCISTATE
, val
);
9862 if (tg3_chip_rev(tp
) == CHIPREV_5704_BX
) {
9863 /* Enable some hw fixes. */
9864 val
= tr32(TG3PCI_MSI_DATA
);
9865 val
|= (1 << 26) | (1 << 28) | (1 << 29);
9866 tw32(TG3PCI_MSI_DATA
, val
);
9869 /* Descriptor ring init may make accesses to the
9870 * NIC SRAM area to setup the TX descriptors, so we
9871 * can only do this after the hardware has been
9872 * successfully reset.
9874 err
= tg3_init_rings(tp
);
9878 if (tg3_flag(tp
, 57765_PLUS
)) {
9879 val
= tr32(TG3PCI_DMA_RW_CTRL
) &
9880 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT
;
9881 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_57765_A0
)
9882 val
&= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK
;
9883 if (!tg3_flag(tp
, 57765_CLASS
) &&
9884 tg3_asic_rev(tp
) != ASIC_REV_5717
&&
9885 tg3_asic_rev(tp
) != ASIC_REV_5762
)
9886 val
|= DMA_RWCTRL_TAGGED_STAT_WA
;
9887 tw32(TG3PCI_DMA_RW_CTRL
, val
| tp
->dma_rwctrl
);
9888 } else if (tg3_asic_rev(tp
) != ASIC_REV_5784
&&
9889 tg3_asic_rev(tp
) != ASIC_REV_5761
) {
9890 /* This value is determined during the probe time DMA
9891 * engine test, tg3_test_dma.
9893 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
9896 tp
->grc_mode
&= ~(GRC_MODE_HOST_SENDBDS
|
9897 GRC_MODE_4X_NIC_SEND_RINGS
|
9898 GRC_MODE_NO_TX_PHDR_CSUM
|
9899 GRC_MODE_NO_RX_PHDR_CSUM
);
9900 tp
->grc_mode
|= GRC_MODE_HOST_SENDBDS
;
9902 /* Pseudo-header checksum is done by hardware logic and not
9903 * the offload processers, so make the chip do the pseudo-
9904 * header checksums on receive. For transmit it is more
9905 * convenient to do the pseudo-header checksum in software
9906 * as Linux does that on transmit for us in all cases.
9908 tp
->grc_mode
|= GRC_MODE_NO_TX_PHDR_CSUM
;
9910 val
= GRC_MODE_IRQ_ON_MAC_ATTN
| GRC_MODE_HOST_STACKUP
;
9912 tw32(TG3_RX_PTP_CTL
,
9913 tp
->rxptpctl
| TG3_RX_PTP_CTL_HWTS_INTERLOCK
);
9915 if (tg3_flag(tp
, PTP_CAPABLE
))
9916 val
|= GRC_MODE_TIME_SYNC_ENABLE
;
9918 tw32(GRC_MODE
, tp
->grc_mode
| val
);
9920 /* Setup the timer prescalar register. Clock is always 66Mhz. */
9921 val
= tr32(GRC_MISC_CFG
);
9923 val
|= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT
);
9924 tw32(GRC_MISC_CFG
, val
);
9926 /* Initialize MBUF/DESC pool. */
9927 if (tg3_flag(tp
, 5750_PLUS
)) {
9929 } else if (tg3_asic_rev(tp
) != ASIC_REV_5705
) {
9930 tw32(BUFMGR_MB_POOL_ADDR
, NIC_SRAM_MBUF_POOL_BASE
);
9931 if (tg3_asic_rev(tp
) == ASIC_REV_5704
)
9932 tw32(BUFMGR_MB_POOL_SIZE
, NIC_SRAM_MBUF_POOL_SIZE64
);
9934 tw32(BUFMGR_MB_POOL_SIZE
, NIC_SRAM_MBUF_POOL_SIZE96
);
9935 tw32(BUFMGR_DMA_DESC_POOL_ADDR
, NIC_SRAM_DMA_DESC_POOL_BASE
);
9936 tw32(BUFMGR_DMA_DESC_POOL_SIZE
, NIC_SRAM_DMA_DESC_POOL_SIZE
);
9937 } else if (tg3_flag(tp
, TSO_CAPABLE
)) {
9940 fw_len
= tp
->fw_len
;
9941 fw_len
= (fw_len
+ (0x80 - 1)) & ~(0x80 - 1);
9942 tw32(BUFMGR_MB_POOL_ADDR
,
9943 NIC_SRAM_MBUF_POOL_BASE5705
+ fw_len
);
9944 tw32(BUFMGR_MB_POOL_SIZE
,
9945 NIC_SRAM_MBUF_POOL_SIZE5705
- fw_len
- 0xa00);
9948 if (tp
->dev
->mtu
<= ETH_DATA_LEN
) {
9949 tw32(BUFMGR_MB_RDMA_LOW_WATER
,
9950 tp
->bufmgr_config
.mbuf_read_dma_low_water
);
9951 tw32(BUFMGR_MB_MACRX_LOW_WATER
,
9952 tp
->bufmgr_config
.mbuf_mac_rx_low_water
);
9953 tw32(BUFMGR_MB_HIGH_WATER
,
9954 tp
->bufmgr_config
.mbuf_high_water
);
9956 tw32(BUFMGR_MB_RDMA_LOW_WATER
,
9957 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
);
9958 tw32(BUFMGR_MB_MACRX_LOW_WATER
,
9959 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
);
9960 tw32(BUFMGR_MB_HIGH_WATER
,
9961 tp
->bufmgr_config
.mbuf_high_water_jumbo
);
9963 tw32(BUFMGR_DMA_LOW_WATER
,
9964 tp
->bufmgr_config
.dma_low_water
);
9965 tw32(BUFMGR_DMA_HIGH_WATER
,
9966 tp
->bufmgr_config
.dma_high_water
);
9968 val
= BUFMGR_MODE_ENABLE
| BUFMGR_MODE_ATTN_ENABLE
;
9969 if (tg3_asic_rev(tp
) == ASIC_REV_5719
)
9970 val
|= BUFMGR_MODE_NO_TX_UNDERRUN
;
9971 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
9972 tg3_asic_rev(tp
) == ASIC_REV_5762
||
9973 tg3_chip_rev_id(tp
) == CHIPREV_ID_5719_A0
||
9974 tg3_chip_rev_id(tp
) == CHIPREV_ID_5720_A0
)
9975 val
|= BUFMGR_MODE_MBLOW_ATTN_ENAB
;
9976 tw32(BUFMGR_MODE
, val
);
9977 for (i
= 0; i
< 2000; i
++) {
9978 if (tr32(BUFMGR_MODE
) & BUFMGR_MODE_ENABLE
)
9983 netdev_err(tp
->dev
, "%s cannot enable BUFMGR\n", __func__
);
9987 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5906_A1
)
9988 tw32(ISO_PKT_TX
, (tr32(ISO_PKT_TX
) & ~0x3) | 0x2);
9990 tg3_setup_rxbd_thresholds(tp
);
9992 /* Initialize TG3_BDINFO's at:
9993 * RCVDBDI_STD_BD: standard eth size rx ring
9994 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
9995 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
9998 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
9999 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
10000 * ring attribute flags
10001 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
10003 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
10004 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
10006 * The size of each ring is fixed in the firmware, but the location is
10009 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
10010 ((u64
) tpr
->rx_std_mapping
>> 32));
10011 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
10012 ((u64
) tpr
->rx_std_mapping
& 0xffffffff));
10013 if (!tg3_flag(tp
, 5717_PLUS
))
10014 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_NIC_ADDR
,
10015 NIC_SRAM_RX_BUFFER_DESC
);
10017 /* Disable the mini ring */
10018 if (!tg3_flag(tp
, 5705_PLUS
))
10019 tw32(RCVDBDI_MINI_BD
+ TG3_BDINFO_MAXLEN_FLAGS
,
10020 BDINFO_FLAGS_DISABLED
);
10022 /* Program the jumbo buffer descriptor ring control
10023 * blocks on those devices that have them.
10025 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5719_A0
||
10026 (tg3_flag(tp
, JUMBO_CAPABLE
) && !tg3_flag(tp
, 5780_CLASS
))) {
10028 if (tg3_flag(tp
, JUMBO_RING_ENABLE
)) {
10029 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
10030 ((u64
) tpr
->rx_jmb_mapping
>> 32));
10031 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
10032 ((u64
) tpr
->rx_jmb_mapping
& 0xffffffff));
10033 val
= TG3_RX_JMB_RING_SIZE(tp
) <<
10034 BDINFO_FLAGS_MAXLEN_SHIFT
;
10035 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_MAXLEN_FLAGS
,
10036 val
| BDINFO_FLAGS_USE_EXT_RECV
);
10037 if (!tg3_flag(tp
, USE_JUMBO_BDFLAG
) ||
10038 tg3_flag(tp
, 57765_CLASS
) ||
10039 tg3_asic_rev(tp
) == ASIC_REV_5762
)
10040 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_NIC_ADDR
,
10041 NIC_SRAM_RX_JUMBO_BUFFER_DESC
);
10043 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_MAXLEN_FLAGS
,
10044 BDINFO_FLAGS_DISABLED
);
10047 if (tg3_flag(tp
, 57765_PLUS
)) {
10048 val
= TG3_RX_STD_RING_SIZE(tp
);
10049 val
<<= BDINFO_FLAGS_MAXLEN_SHIFT
;
10050 val
|= (TG3_RX_STD_DMA_SZ
<< 2);
10052 val
= TG3_RX_STD_DMA_SZ
<< BDINFO_FLAGS_MAXLEN_SHIFT
;
10054 val
= TG3_RX_STD_MAX_SIZE_5700
<< BDINFO_FLAGS_MAXLEN_SHIFT
;
10056 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_MAXLEN_FLAGS
, val
);
10058 tpr
->rx_std_prod_idx
= tp
->rx_pending
;
10059 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG
, tpr
->rx_std_prod_idx
);
10061 tpr
->rx_jmb_prod_idx
=
10062 tg3_flag(tp
, JUMBO_RING_ENABLE
) ? tp
->rx_jumbo_pending
: 0;
10063 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG
, tpr
->rx_jmb_prod_idx
);
10065 tg3_rings_reset(tp
);
10067 /* Initialize MAC address and backoff seed. */
10068 __tg3_set_mac_addr(tp
, false);
10070 /* MTU + ethernet header + FCS + optional VLAN tag */
10071 tw32(MAC_RX_MTU_SIZE
,
10072 tp
->dev
->mtu
+ ETH_HLEN
+ ETH_FCS_LEN
+ VLAN_HLEN
);
10074 /* The slot time is changed by tg3_setup_phy if we
10075 * run at gigabit with half duplex.
10077 val
= (2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
10078 (6 << TX_LENGTHS_IPG_SHIFT
) |
10079 (32 << TX_LENGTHS_SLOT_TIME_SHIFT
);
10081 if (tg3_asic_rev(tp
) == ASIC_REV_5720
||
10082 tg3_asic_rev(tp
) == ASIC_REV_5762
)
10083 val
|= tr32(MAC_TX_LENGTHS
) &
10084 (TX_LENGTHS_JMB_FRM_LEN_MSK
|
10085 TX_LENGTHS_CNT_DWN_VAL_MSK
);
10087 tw32(MAC_TX_LENGTHS
, val
);
10089 /* Receive rules. */
10090 tw32(MAC_RCV_RULE_CFG
, RCV_RULE_CFG_DEFAULT_CLASS
);
10091 tw32(RCVLPC_CONFIG
, 0x0181);
10093 /* Calculate RDMAC_MODE setting early, we need it to determine
10094 * the RCVLPC_STATE_ENABLE mask.
10096 rdmac_mode
= (RDMAC_MODE_ENABLE
| RDMAC_MODE_TGTABORT_ENAB
|
10097 RDMAC_MODE_MSTABORT_ENAB
| RDMAC_MODE_PARITYERR_ENAB
|
10098 RDMAC_MODE_ADDROFLOW_ENAB
| RDMAC_MODE_FIFOOFLOW_ENAB
|
10099 RDMAC_MODE_FIFOURUN_ENAB
| RDMAC_MODE_FIFOOREAD_ENAB
|
10100 RDMAC_MODE_LNGREAD_ENAB
);
10102 if (tg3_asic_rev(tp
) == ASIC_REV_5717
)
10103 rdmac_mode
|= RDMAC_MODE_MULT_DMA_RD_DIS
;
10105 if (tg3_asic_rev(tp
) == ASIC_REV_5784
||
10106 tg3_asic_rev(tp
) == ASIC_REV_5785
||
10107 tg3_asic_rev(tp
) == ASIC_REV_57780
)
10108 rdmac_mode
|= RDMAC_MODE_BD_SBD_CRPT_ENAB
|
10109 RDMAC_MODE_MBUF_RBD_CRPT_ENAB
|
10110 RDMAC_MODE_MBUF_SBD_CRPT_ENAB
;
10112 if (tg3_asic_rev(tp
) == ASIC_REV_5705
&&
10113 tg3_chip_rev_id(tp
) != CHIPREV_ID_5705_A0
) {
10114 if (tg3_flag(tp
, TSO_CAPABLE
) &&
10115 tg3_asic_rev(tp
) == ASIC_REV_5705
) {
10116 rdmac_mode
|= RDMAC_MODE_FIFO_SIZE_128
;
10117 } else if (!(tr32(TG3PCI_PCISTATE
) & PCISTATE_BUS_SPEED_HIGH
) &&
10118 !tg3_flag(tp
, IS_5788
)) {
10119 rdmac_mode
|= RDMAC_MODE_FIFO_LONG_BURST
;
10123 if (tg3_flag(tp
, PCI_EXPRESS
))
10124 rdmac_mode
|= RDMAC_MODE_FIFO_LONG_BURST
;
10126 if (tg3_asic_rev(tp
) == ASIC_REV_57766
) {
10128 if (tp
->dev
->mtu
<= ETH_DATA_LEN
) {
10129 rdmac_mode
|= RDMAC_MODE_JMB_2K_MMRR
;
10130 tp
->dma_limit
= TG3_TX_BD_DMA_MAX_2K
;
10134 if (tg3_flag(tp
, HW_TSO_1
) ||
10135 tg3_flag(tp
, HW_TSO_2
) ||
10136 tg3_flag(tp
, HW_TSO_3
))
10137 rdmac_mode
|= RDMAC_MODE_IPV4_LSO_EN
;
10139 if (tg3_flag(tp
, 57765_PLUS
) ||
10140 tg3_asic_rev(tp
) == ASIC_REV_5785
||
10141 tg3_asic_rev(tp
) == ASIC_REV_57780
)
10142 rdmac_mode
|= RDMAC_MODE_IPV6_LSO_EN
;
10144 if (tg3_asic_rev(tp
) == ASIC_REV_5720
||
10145 tg3_asic_rev(tp
) == ASIC_REV_5762
)
10146 rdmac_mode
|= tr32(RDMAC_MODE
) & RDMAC_MODE_H2BNC_VLAN_DET
;
10148 if (tg3_asic_rev(tp
) == ASIC_REV_5761
||
10149 tg3_asic_rev(tp
) == ASIC_REV_5784
||
10150 tg3_asic_rev(tp
) == ASIC_REV_5785
||
10151 tg3_asic_rev(tp
) == ASIC_REV_57780
||
10152 tg3_flag(tp
, 57765_PLUS
)) {
10155 if (tg3_asic_rev(tp
) == ASIC_REV_5762
)
10156 tgtreg
= TG3_RDMA_RSRVCTRL_REG2
;
10158 tgtreg
= TG3_RDMA_RSRVCTRL_REG
;
10160 val
= tr32(tgtreg
);
10161 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5719_A0
||
10162 tg3_asic_rev(tp
) == ASIC_REV_5762
) {
10163 val
&= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK
|
10164 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK
|
10165 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK
);
10166 val
|= TG3_RDMA_RSRVCTRL_TXMRGN_320B
|
10167 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K
|
10168 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K
;
10170 tw32(tgtreg
, val
| TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX
);
10173 if (tg3_asic_rev(tp
) == ASIC_REV_5719
||
10174 tg3_asic_rev(tp
) == ASIC_REV_5720
||
10175 tg3_asic_rev(tp
) == ASIC_REV_5762
) {
10178 if (tg3_asic_rev(tp
) == ASIC_REV_5762
)
10179 tgtreg
= TG3_LSO_RD_DMA_CRPTEN_CTRL2
;
10181 tgtreg
= TG3_LSO_RD_DMA_CRPTEN_CTRL
;
10183 val
= tr32(tgtreg
);
10185 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K
|
10186 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K
);
10189 /* Receive/send statistics. */
10190 if (tg3_flag(tp
, 5750_PLUS
)) {
10191 val
= tr32(RCVLPC_STATS_ENABLE
);
10192 val
&= ~RCVLPC_STATSENAB_DACK_FIX
;
10193 tw32(RCVLPC_STATS_ENABLE
, val
);
10194 } else if ((rdmac_mode
& RDMAC_MODE_FIFO_SIZE_128
) &&
10195 tg3_flag(tp
, TSO_CAPABLE
)) {
10196 val
= tr32(RCVLPC_STATS_ENABLE
);
10197 val
&= ~RCVLPC_STATSENAB_LNGBRST_RFIX
;
10198 tw32(RCVLPC_STATS_ENABLE
, val
);
10200 tw32(RCVLPC_STATS_ENABLE
, 0xffffff);
10202 tw32(RCVLPC_STATSCTRL
, RCVLPC_STATSCTRL_ENABLE
);
10203 tw32(SNDDATAI_STATSENAB
, 0xffffff);
10204 tw32(SNDDATAI_STATSCTRL
,
10205 (SNDDATAI_SCTRL_ENABLE
|
10206 SNDDATAI_SCTRL_FASTUPD
));
10208 /* Setup host coalescing engine. */
10209 tw32(HOSTCC_MODE
, 0);
10210 for (i
= 0; i
< 2000; i
++) {
10211 if (!(tr32(HOSTCC_MODE
) & HOSTCC_MODE_ENABLE
))
10216 __tg3_set_coalesce(tp
, &tp
->coal
);
10218 if (!tg3_flag(tp
, 5705_PLUS
)) {
10219 /* Status/statistics block address. See tg3_timer,
10220 * the tg3_periodic_fetch_stats call there, and
10221 * tg3_get_stats to see how this works for 5705/5750 chips.
10223 tw32(HOSTCC_STATS_BLK_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
10224 ((u64
) tp
->stats_mapping
>> 32));
10225 tw32(HOSTCC_STATS_BLK_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
10226 ((u64
) tp
->stats_mapping
& 0xffffffff));
10227 tw32(HOSTCC_STATS_BLK_NIC_ADDR
, NIC_SRAM_STATS_BLK
);
10229 tw32(HOSTCC_STATUS_BLK_NIC_ADDR
, NIC_SRAM_STATUS_BLK
);
10231 /* Clear statistics and status block memory areas */
10232 for (i
= NIC_SRAM_STATS_BLK
;
10233 i
< NIC_SRAM_STATUS_BLK
+ TG3_HW_STATUS_SIZE
;
10234 i
+= sizeof(u32
)) {
10235 tg3_write_mem(tp
, i
, 0);
10240 tw32(HOSTCC_MODE
, HOSTCC_MODE_ENABLE
| tp
->coalesce_mode
);
10242 tw32(RCVCC_MODE
, RCVCC_MODE_ENABLE
| RCVCC_MODE_ATTN_ENABLE
);
10243 tw32(RCVLPC_MODE
, RCVLPC_MODE_ENABLE
);
10244 if (!tg3_flag(tp
, 5705_PLUS
))
10245 tw32(RCVLSC_MODE
, RCVLSC_MODE_ENABLE
| RCVLSC_MODE_ATTN_ENABLE
);
10247 if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) {
10248 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
10249 /* reset to prevent losing 1st rx packet intermittently */
10250 tw32_f(MAC_RX_MODE
, RX_MODE_RESET
);
10254 tp
->mac_mode
|= MAC_MODE_TXSTAT_ENABLE
| MAC_MODE_RXSTAT_ENABLE
|
10255 MAC_MODE_TDE_ENABLE
| MAC_MODE_RDE_ENABLE
|
10256 MAC_MODE_FHDE_ENABLE
;
10257 if (tg3_flag(tp
, ENABLE_APE
))
10258 tp
->mac_mode
|= MAC_MODE_APE_TX_EN
| MAC_MODE_APE_RX_EN
;
10259 if (!tg3_flag(tp
, 5705_PLUS
) &&
10260 !(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
10261 tg3_asic_rev(tp
) != ASIC_REV_5700
)
10262 tp
->mac_mode
|= MAC_MODE_LINK_POLARITY
;
10263 tw32_f(MAC_MODE
, tp
->mac_mode
| MAC_MODE_RXSTAT_CLEAR
| MAC_MODE_TXSTAT_CLEAR
);
10266 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
10267 * If TG3_FLAG_IS_NIC is zero, we should read the
10268 * register to preserve the GPIO settings for LOMs. The GPIOs,
10269 * whether used as inputs or outputs, are set by boot code after
10272 if (!tg3_flag(tp
, IS_NIC
)) {
10275 gpio_mask
= GRC_LCLCTRL_GPIO_OE0
| GRC_LCLCTRL_GPIO_OE1
|
10276 GRC_LCLCTRL_GPIO_OE2
| GRC_LCLCTRL_GPIO_OUTPUT0
|
10277 GRC_LCLCTRL_GPIO_OUTPUT1
| GRC_LCLCTRL_GPIO_OUTPUT2
;
10279 if (tg3_asic_rev(tp
) == ASIC_REV_5752
)
10280 gpio_mask
|= GRC_LCLCTRL_GPIO_OE3
|
10281 GRC_LCLCTRL_GPIO_OUTPUT3
;
10283 if (tg3_asic_rev(tp
) == ASIC_REV_5755
)
10284 gpio_mask
|= GRC_LCLCTRL_GPIO_UART_SEL
;
10286 tp
->grc_local_ctrl
&= ~gpio_mask
;
10287 tp
->grc_local_ctrl
|= tr32(GRC_LOCAL_CTRL
) & gpio_mask
;
10289 /* GPIO1 must be driven high for eeprom write protect */
10290 if (tg3_flag(tp
, EEPROM_WRITE_PROT
))
10291 tp
->grc_local_ctrl
|= (GRC_LCLCTRL_GPIO_OE1
|
10292 GRC_LCLCTRL_GPIO_OUTPUT1
);
10294 tw32_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
);
10297 if (tg3_flag(tp
, USING_MSIX
)) {
10298 val
= tr32(MSGINT_MODE
);
10299 val
|= MSGINT_MODE_ENABLE
;
10300 if (tp
->irq_cnt
> 1)
10301 val
|= MSGINT_MODE_MULTIVEC_EN
;
10302 if (!tg3_flag(tp
, 1SHOT_MSI
))
10303 val
|= MSGINT_MODE_ONE_SHOT_DISABLE
;
10304 tw32(MSGINT_MODE
, val
);
10307 if (!tg3_flag(tp
, 5705_PLUS
)) {
10308 tw32_f(DMAC_MODE
, DMAC_MODE_ENABLE
);
10312 val
= (WDMAC_MODE_ENABLE
| WDMAC_MODE_TGTABORT_ENAB
|
10313 WDMAC_MODE_MSTABORT_ENAB
| WDMAC_MODE_PARITYERR_ENAB
|
10314 WDMAC_MODE_ADDROFLOW_ENAB
| WDMAC_MODE_FIFOOFLOW_ENAB
|
10315 WDMAC_MODE_FIFOURUN_ENAB
| WDMAC_MODE_FIFOOREAD_ENAB
|
10316 WDMAC_MODE_LNGREAD_ENAB
);
10318 if (tg3_asic_rev(tp
) == ASIC_REV_5705
&&
10319 tg3_chip_rev_id(tp
) != CHIPREV_ID_5705_A0
) {
10320 if (tg3_flag(tp
, TSO_CAPABLE
) &&
10321 (tg3_chip_rev_id(tp
) == CHIPREV_ID_5705_A1
||
10322 tg3_chip_rev_id(tp
) == CHIPREV_ID_5705_A2
)) {
10324 } else if (!(tr32(TG3PCI_PCISTATE
) & PCISTATE_BUS_SPEED_HIGH
) &&
10325 !tg3_flag(tp
, IS_5788
)) {
10326 val
|= WDMAC_MODE_RX_ACCEL
;
10330 /* Enable host coalescing bug fix */
10331 if (tg3_flag(tp
, 5755_PLUS
))
10332 val
|= WDMAC_MODE_STATUS_TAG_FIX
;
10334 if (tg3_asic_rev(tp
) == ASIC_REV_5785
)
10335 val
|= WDMAC_MODE_BURST_ALL_DATA
;
10337 tw32_f(WDMAC_MODE
, val
);
10340 if (tg3_flag(tp
, PCIX_MODE
)) {
10343 pci_read_config_word(tp
->pdev
, tp
->pcix_cap
+ PCI_X_CMD
,
10345 if (tg3_asic_rev(tp
) == ASIC_REV_5703
) {
10346 pcix_cmd
&= ~PCI_X_CMD_MAX_READ
;
10347 pcix_cmd
|= PCI_X_CMD_READ_2K
;
10348 } else if (tg3_asic_rev(tp
) == ASIC_REV_5704
) {
10349 pcix_cmd
&= ~(PCI_X_CMD_MAX_SPLIT
| PCI_X_CMD_MAX_READ
);
10350 pcix_cmd
|= PCI_X_CMD_READ_2K
;
10352 pci_write_config_word(tp
->pdev
, tp
->pcix_cap
+ PCI_X_CMD
,
10356 tw32_f(RDMAC_MODE
, rdmac_mode
);
10359 if (tg3_asic_rev(tp
) == ASIC_REV_5719
||
10360 tg3_asic_rev(tp
) == ASIC_REV_5720
) {
10361 for (i
= 0; i
< TG3_NUM_RDMA_CHANNELS
; i
++) {
10362 if (tr32(TG3_RDMA_LENGTH
+ (i
<< 2)) > TG3_MAX_MTU(tp
))
10365 if (i
< TG3_NUM_RDMA_CHANNELS
) {
10366 val
= tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL
);
10367 val
|= tg3_lso_rd_dma_workaround_bit(tp
);
10368 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL
, val
);
10369 tg3_flag_set(tp
, 5719_5720_RDMA_BUG
);
10373 tw32(RCVDCC_MODE
, RCVDCC_MODE_ENABLE
| RCVDCC_MODE_ATTN_ENABLE
);
10374 if (!tg3_flag(tp
, 5705_PLUS
))
10375 tw32(MBFREE_MODE
, MBFREE_MODE_ENABLE
);
10377 if (tg3_asic_rev(tp
) == ASIC_REV_5761
)
10378 tw32(SNDDATAC_MODE
,
10379 SNDDATAC_MODE_ENABLE
| SNDDATAC_MODE_CDELAY
);
10381 tw32(SNDDATAC_MODE
, SNDDATAC_MODE_ENABLE
);
10383 tw32(SNDBDC_MODE
, SNDBDC_MODE_ENABLE
| SNDBDC_MODE_ATTN_ENABLE
);
10384 tw32(RCVBDI_MODE
, RCVBDI_MODE_ENABLE
| RCVBDI_MODE_RCB_ATTN_ENAB
);
10385 val
= RCVDBDI_MODE_ENABLE
| RCVDBDI_MODE_INV_RING_SZ
;
10386 if (tg3_flag(tp
, LRG_PROD_RING_CAP
))
10387 val
|= RCVDBDI_MODE_LRG_RING_SZ
;
10388 tw32(RCVDBDI_MODE
, val
);
10389 tw32(SNDDATAI_MODE
, SNDDATAI_MODE_ENABLE
);
10390 if (tg3_flag(tp
, HW_TSO_1
) ||
10391 tg3_flag(tp
, HW_TSO_2
) ||
10392 tg3_flag(tp
, HW_TSO_3
))
10393 tw32(SNDDATAI_MODE
, SNDDATAI_MODE_ENABLE
| 0x8);
10394 val
= SNDBDI_MODE_ENABLE
| SNDBDI_MODE_ATTN_ENABLE
;
10395 if (tg3_flag(tp
, ENABLE_TSS
))
10396 val
|= SNDBDI_MODE_MULTI_TXQ_EN
;
10397 tw32(SNDBDI_MODE
, val
);
10398 tw32(SNDBDS_MODE
, SNDBDS_MODE_ENABLE
| SNDBDS_MODE_ATTN_ENABLE
);
10400 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
) {
10401 err
= tg3_load_5701_a0_firmware_fix(tp
);
10406 if (tg3_asic_rev(tp
) == ASIC_REV_57766
) {
10407 /* Ignore any errors for the firmware download. If download
10408 * fails, the device will operate with EEE disabled
10410 tg3_load_57766_firmware(tp
);
10413 if (tg3_flag(tp
, TSO_CAPABLE
)) {
10414 err
= tg3_load_tso_firmware(tp
);
10419 tp
->tx_mode
= TX_MODE_ENABLE
;
10421 if (tg3_flag(tp
, 5755_PLUS
) ||
10422 tg3_asic_rev(tp
) == ASIC_REV_5906
)
10423 tp
->tx_mode
|= TX_MODE_MBUF_LOCKUP_FIX
;
10425 if (tg3_asic_rev(tp
) == ASIC_REV_5720
||
10426 tg3_asic_rev(tp
) == ASIC_REV_5762
) {
10427 val
= TX_MODE_JMB_FRM_LEN
| TX_MODE_CNT_DN_MODE
;
10428 tp
->tx_mode
&= ~val
;
10429 tp
->tx_mode
|= tr32(MAC_TX_MODE
) & val
;
10432 tw32_f(MAC_TX_MODE
, tp
->tx_mode
);
10435 if (tg3_flag(tp
, ENABLE_RSS
)) {
10436 tg3_rss_write_indir_tbl(tp
);
10438 /* Setup the "secret" hash key. */
10439 tw32(MAC_RSS_HASH_KEY_0
, 0x5f865437);
10440 tw32(MAC_RSS_HASH_KEY_1
, 0xe4ac62cc);
10441 tw32(MAC_RSS_HASH_KEY_2
, 0x50103a45);
10442 tw32(MAC_RSS_HASH_KEY_3
, 0x36621985);
10443 tw32(MAC_RSS_HASH_KEY_4
, 0xbf14c0e8);
10444 tw32(MAC_RSS_HASH_KEY_5
, 0x1bc27a1e);
10445 tw32(MAC_RSS_HASH_KEY_6
, 0x84f4b556);
10446 tw32(MAC_RSS_HASH_KEY_7
, 0x094ea6fe);
10447 tw32(MAC_RSS_HASH_KEY_8
, 0x7dda01e7);
10448 tw32(MAC_RSS_HASH_KEY_9
, 0xc04d7481);
10451 tp
->rx_mode
= RX_MODE_ENABLE
;
10452 if (tg3_flag(tp
, 5755_PLUS
))
10453 tp
->rx_mode
|= RX_MODE_IPV6_CSUM_ENABLE
;
10455 if (tg3_asic_rev(tp
) == ASIC_REV_5762
)
10456 tp
->rx_mode
|= RX_MODE_IPV4_FRAG_FIX
;
10458 if (tg3_flag(tp
, ENABLE_RSS
))
10459 tp
->rx_mode
|= RX_MODE_RSS_ENABLE
|
10460 RX_MODE_RSS_ITBL_HASH_BITS_7
|
10461 RX_MODE_RSS_IPV6_HASH_EN
|
10462 RX_MODE_RSS_TCP_IPV6_HASH_EN
|
10463 RX_MODE_RSS_IPV4_HASH_EN
|
10464 RX_MODE_RSS_TCP_IPV4_HASH_EN
;
10466 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
10469 tw32(MAC_LED_CTRL
, tp
->led_ctrl
);
10471 tw32(MAC_MI_STAT
, MAC_MI_STAT_LNKSTAT_ATTN_ENAB
);
10472 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
10473 tw32_f(MAC_RX_MODE
, RX_MODE_RESET
);
10476 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
10479 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
10480 if ((tg3_asic_rev(tp
) == ASIC_REV_5704
) &&
10481 !(tp
->phy_flags
& TG3_PHYFLG_SERDES_PREEMPHASIS
)) {
10482 /* Set drive transmission level to 1.2V */
10483 /* only if the signal pre-emphasis bit is not set */
10484 val
= tr32(MAC_SERDES_CFG
);
10487 tw32(MAC_SERDES_CFG
, val
);
10489 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5703_A1
)
10490 tw32(MAC_SERDES_CFG
, 0x616000);
10493 /* Prevent chip from dropping frames when flow control
10496 if (tg3_flag(tp
, 57765_CLASS
))
10500 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME
, val
);
10502 if (tg3_asic_rev(tp
) == ASIC_REV_5704
&&
10503 (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)) {
10504 /* Use hardware link auto-negotiation */
10505 tg3_flag_set(tp
, HW_AUTONEG
);
10508 if ((tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) &&
10509 tg3_asic_rev(tp
) == ASIC_REV_5714
) {
10512 tmp
= tr32(SERDES_RX_CTRL
);
10513 tw32(SERDES_RX_CTRL
, tmp
| SERDES_RX_SIG_DETECT
);
10514 tp
->grc_local_ctrl
&= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT
;
10515 tp
->grc_local_ctrl
|= GRC_LCLCTRL_USE_SIG_DETECT
;
10516 tw32(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
);
10519 if (!tg3_flag(tp
, USE_PHYLIB
)) {
10520 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)
10521 tp
->phy_flags
&= ~TG3_PHYFLG_IS_LOW_POWER
;
10523 err
= tg3_setup_phy(tp
, false);
10527 if (!(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
10528 !(tp
->phy_flags
& TG3_PHYFLG_IS_FET
)) {
10531 /* Clear CRC stats. */
10532 if (!tg3_readphy(tp
, MII_TG3_TEST1
, &tmp
)) {
10533 tg3_writephy(tp
, MII_TG3_TEST1
,
10534 tmp
| MII_TG3_TEST1_CRC_EN
);
10535 tg3_readphy(tp
, MII_TG3_RXR_COUNTERS
, &tmp
);
10540 __tg3_set_rx_mode(tp
->dev
);
10542 /* Initialize receive rules. */
10543 tw32(MAC_RCV_RULE_0
, 0xc2000000 & RCV_RULE_DISABLE_MASK
);
10544 tw32(MAC_RCV_VALUE_0
, 0xffffffff & RCV_RULE_DISABLE_MASK
);
10545 tw32(MAC_RCV_RULE_1
, 0x86000004 & RCV_RULE_DISABLE_MASK
);
10546 tw32(MAC_RCV_VALUE_1
, 0xffffffff & RCV_RULE_DISABLE_MASK
);
10548 if (tg3_flag(tp
, 5705_PLUS
) && !tg3_flag(tp
, 5780_CLASS
))
10552 if (tg3_flag(tp
, ENABLE_ASF
))
10556 tw32(MAC_RCV_RULE_15
, 0); tw32(MAC_RCV_VALUE_15
, 0);
10558 tw32(MAC_RCV_RULE_14
, 0); tw32(MAC_RCV_VALUE_14
, 0);
10560 tw32(MAC_RCV_RULE_13
, 0); tw32(MAC_RCV_VALUE_13
, 0);
10562 tw32(MAC_RCV_RULE_12
, 0); tw32(MAC_RCV_VALUE_12
, 0);
10564 tw32(MAC_RCV_RULE_11
, 0); tw32(MAC_RCV_VALUE_11
, 0);
10566 tw32(MAC_RCV_RULE_10
, 0); tw32(MAC_RCV_VALUE_10
, 0);
10568 tw32(MAC_RCV_RULE_9
, 0); tw32(MAC_RCV_VALUE_9
, 0);
10570 tw32(MAC_RCV_RULE_8
, 0); tw32(MAC_RCV_VALUE_8
, 0);
10572 tw32(MAC_RCV_RULE_7
, 0); tw32(MAC_RCV_VALUE_7
, 0);
10574 tw32(MAC_RCV_RULE_6
, 0); tw32(MAC_RCV_VALUE_6
, 0);
10576 tw32(MAC_RCV_RULE_5
, 0); tw32(MAC_RCV_VALUE_5
, 0);
10578 tw32(MAC_RCV_RULE_4
, 0); tw32(MAC_RCV_VALUE_4
, 0);
10580 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
10582 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
10590 if (tg3_flag(tp
, ENABLE_APE
))
10591 /* Write our heartbeat update interval to APE. */
10592 tg3_ape_write32(tp
, TG3_APE_HOST_HEARTBEAT_INT_MS
,
10593 APE_HOST_HEARTBEAT_INT_DISABLE
);
10595 tg3_write_sig_post_reset(tp
, RESET_KIND_INIT
);
10600 /* Called at device open time to get the chip ready for
10601 * packet processing. Invoked with tp->lock held.
10603 static int tg3_init_hw(struct tg3
*tp
, bool reset_phy
)
10605 /* Chip may have been just powered on. If so, the boot code may still
10606 * be running initialization. Wait for it to finish to avoid races in
10607 * accessing the hardware.
10609 tg3_enable_register_access(tp
);
10612 tg3_switch_clocks(tp
);
10614 tw32(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
10616 return tg3_reset_hw(tp
, reset_phy
);
10619 static void tg3_sd_scan_scratchpad(struct tg3
*tp
, struct tg3_ocir
*ocir
)
10623 for (i
= 0; i
< TG3_SD_NUM_RECS
; i
++, ocir
++) {
10624 u32 off
= i
* TG3_OCIR_LEN
, len
= TG3_OCIR_LEN
;
10626 tg3_ape_scratchpad_read(tp
, (u32
*) ocir
, off
, len
);
10629 if (ocir
->signature
!= TG3_OCIR_SIG_MAGIC
||
10630 !(ocir
->version_flags
& TG3_OCIR_FLAG_ACTIVE
))
10631 memset(ocir
, 0, TG3_OCIR_LEN
);
10635 /* sysfs attributes for hwmon */
10636 static ssize_t
tg3_show_temp(struct device
*dev
,
10637 struct device_attribute
*devattr
, char *buf
)
10639 struct sensor_device_attribute
*attr
= to_sensor_dev_attr(devattr
);
10640 struct tg3
*tp
= dev_get_drvdata(dev
);
10643 spin_lock_bh(&tp
->lock
);
10644 tg3_ape_scratchpad_read(tp
, &temperature
, attr
->index
,
10645 sizeof(temperature
));
10646 spin_unlock_bh(&tp
->lock
);
10647 return sprintf(buf
, "%u\n", temperature
);
10651 static SENSOR_DEVICE_ATTR(temp1_input
, S_IRUGO
, tg3_show_temp
, NULL
,
10652 TG3_TEMP_SENSOR_OFFSET
);
10653 static SENSOR_DEVICE_ATTR(temp1_crit
, S_IRUGO
, tg3_show_temp
, NULL
,
10654 TG3_TEMP_CAUTION_OFFSET
);
10655 static SENSOR_DEVICE_ATTR(temp1_max
, S_IRUGO
, tg3_show_temp
, NULL
,
10656 TG3_TEMP_MAX_OFFSET
);
10658 static struct attribute
*tg3_attrs
[] = {
10659 &sensor_dev_attr_temp1_input
.dev_attr
.attr
,
10660 &sensor_dev_attr_temp1_crit
.dev_attr
.attr
,
10661 &sensor_dev_attr_temp1_max
.dev_attr
.attr
,
10664 ATTRIBUTE_GROUPS(tg3
);
10666 static void tg3_hwmon_close(struct tg3
*tp
)
10668 if (tp
->hwmon_dev
) {
10669 hwmon_device_unregister(tp
->hwmon_dev
);
10670 tp
->hwmon_dev
= NULL
;
10674 static void tg3_hwmon_open(struct tg3
*tp
)
10678 struct pci_dev
*pdev
= tp
->pdev
;
10679 struct tg3_ocir ocirs
[TG3_SD_NUM_RECS
];
10681 tg3_sd_scan_scratchpad(tp
, ocirs
);
10683 for (i
= 0; i
< TG3_SD_NUM_RECS
; i
++) {
10684 if (!ocirs
[i
].src_data_length
)
10687 size
+= ocirs
[i
].src_hdr_length
;
10688 size
+= ocirs
[i
].src_data_length
;
10694 tp
->hwmon_dev
= hwmon_device_register_with_groups(&pdev
->dev
, "tg3",
10696 if (IS_ERR(tp
->hwmon_dev
)) {
10697 tp
->hwmon_dev
= NULL
;
10698 dev_err(&pdev
->dev
, "Cannot register hwmon device, aborting\n");
10703 #define TG3_STAT_ADD32(PSTAT, REG) \
10704 do { u32 __val = tr32(REG); \
10705 (PSTAT)->low += __val; \
10706 if ((PSTAT)->low < __val) \
10707 (PSTAT)->high += 1; \
10710 static void tg3_periodic_fetch_stats(struct tg3
*tp
)
10712 struct tg3_hw_stats
*sp
= tp
->hw_stats
;
10717 TG3_STAT_ADD32(&sp
->tx_octets
, MAC_TX_STATS_OCTETS
);
10718 TG3_STAT_ADD32(&sp
->tx_collisions
, MAC_TX_STATS_COLLISIONS
);
10719 TG3_STAT_ADD32(&sp
->tx_xon_sent
, MAC_TX_STATS_XON_SENT
);
10720 TG3_STAT_ADD32(&sp
->tx_xoff_sent
, MAC_TX_STATS_XOFF_SENT
);
10721 TG3_STAT_ADD32(&sp
->tx_mac_errors
, MAC_TX_STATS_MAC_ERRORS
);
10722 TG3_STAT_ADD32(&sp
->tx_single_collisions
, MAC_TX_STATS_SINGLE_COLLISIONS
);
10723 TG3_STAT_ADD32(&sp
->tx_mult_collisions
, MAC_TX_STATS_MULT_COLLISIONS
);
10724 TG3_STAT_ADD32(&sp
->tx_deferred
, MAC_TX_STATS_DEFERRED
);
10725 TG3_STAT_ADD32(&sp
->tx_excessive_collisions
, MAC_TX_STATS_EXCESSIVE_COL
);
10726 TG3_STAT_ADD32(&sp
->tx_late_collisions
, MAC_TX_STATS_LATE_COL
);
10727 TG3_STAT_ADD32(&sp
->tx_ucast_packets
, MAC_TX_STATS_UCAST
);
10728 TG3_STAT_ADD32(&sp
->tx_mcast_packets
, MAC_TX_STATS_MCAST
);
10729 TG3_STAT_ADD32(&sp
->tx_bcast_packets
, MAC_TX_STATS_BCAST
);
10730 if (unlikely(tg3_flag(tp
, 5719_5720_RDMA_BUG
) &&
10731 (sp
->tx_ucast_packets
.low
+ sp
->tx_mcast_packets
.low
+
10732 sp
->tx_bcast_packets
.low
) > TG3_NUM_RDMA_CHANNELS
)) {
10735 val
= tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL
);
10736 val
&= ~tg3_lso_rd_dma_workaround_bit(tp
);
10737 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL
, val
);
10738 tg3_flag_clear(tp
, 5719_5720_RDMA_BUG
);
10741 TG3_STAT_ADD32(&sp
->rx_octets
, MAC_RX_STATS_OCTETS
);
10742 TG3_STAT_ADD32(&sp
->rx_fragments
, MAC_RX_STATS_FRAGMENTS
);
10743 TG3_STAT_ADD32(&sp
->rx_ucast_packets
, MAC_RX_STATS_UCAST
);
10744 TG3_STAT_ADD32(&sp
->rx_mcast_packets
, MAC_RX_STATS_MCAST
);
10745 TG3_STAT_ADD32(&sp
->rx_bcast_packets
, MAC_RX_STATS_BCAST
);
10746 TG3_STAT_ADD32(&sp
->rx_fcs_errors
, MAC_RX_STATS_FCS_ERRORS
);
10747 TG3_STAT_ADD32(&sp
->rx_align_errors
, MAC_RX_STATS_ALIGN_ERRORS
);
10748 TG3_STAT_ADD32(&sp
->rx_xon_pause_rcvd
, MAC_RX_STATS_XON_PAUSE_RECVD
);
10749 TG3_STAT_ADD32(&sp
->rx_xoff_pause_rcvd
, MAC_RX_STATS_XOFF_PAUSE_RECVD
);
10750 TG3_STAT_ADD32(&sp
->rx_mac_ctrl_rcvd
, MAC_RX_STATS_MAC_CTRL_RECVD
);
10751 TG3_STAT_ADD32(&sp
->rx_xoff_entered
, MAC_RX_STATS_XOFF_ENTERED
);
10752 TG3_STAT_ADD32(&sp
->rx_frame_too_long_errors
, MAC_RX_STATS_FRAME_TOO_LONG
);
10753 TG3_STAT_ADD32(&sp
->rx_jabbers
, MAC_RX_STATS_JABBERS
);
10754 TG3_STAT_ADD32(&sp
->rx_undersize_packets
, MAC_RX_STATS_UNDERSIZE
);
10756 TG3_STAT_ADD32(&sp
->rxbds_empty
, RCVLPC_NO_RCV_BD_CNT
);
10757 if (tg3_asic_rev(tp
) != ASIC_REV_5717
&&
10758 tg3_asic_rev(tp
) != ASIC_REV_5762
&&
10759 tg3_chip_rev_id(tp
) != CHIPREV_ID_5719_A0
&&
10760 tg3_chip_rev_id(tp
) != CHIPREV_ID_5720_A0
) {
10761 TG3_STAT_ADD32(&sp
->rx_discards
, RCVLPC_IN_DISCARDS_CNT
);
10763 u32 val
= tr32(HOSTCC_FLOW_ATTN
);
10764 val
= (val
& HOSTCC_FLOW_ATTN_MBUF_LWM
) ? 1 : 0;
10766 tw32(HOSTCC_FLOW_ATTN
, HOSTCC_FLOW_ATTN_MBUF_LWM
);
10767 sp
->rx_discards
.low
+= val
;
10768 if (sp
->rx_discards
.low
< val
)
10769 sp
->rx_discards
.high
+= 1;
10771 sp
->mbuf_lwm_thresh_hit
= sp
->rx_discards
;
10773 TG3_STAT_ADD32(&sp
->rx_errors
, RCVLPC_IN_ERRORS_CNT
);
10776 static void tg3_chk_missed_msi(struct tg3
*tp
)
10780 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
10781 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
10783 if (tg3_has_work(tnapi
)) {
10784 if (tnapi
->last_rx_cons
== tnapi
->rx_rcb_ptr
&&
10785 tnapi
->last_tx_cons
== tnapi
->tx_cons
) {
10786 if (tnapi
->chk_msi_cnt
< 1) {
10787 tnapi
->chk_msi_cnt
++;
10793 tnapi
->chk_msi_cnt
= 0;
10794 tnapi
->last_rx_cons
= tnapi
->rx_rcb_ptr
;
10795 tnapi
->last_tx_cons
= tnapi
->tx_cons
;
10799 static void tg3_timer(unsigned long __opaque
)
10801 struct tg3
*tp
= (struct tg3
*) __opaque
;
10803 if (tp
->irq_sync
|| tg3_flag(tp
, RESET_TASK_PENDING
))
10804 goto restart_timer
;
10806 spin_lock(&tp
->lock
);
10808 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
10809 tg3_flag(tp
, 57765_CLASS
))
10810 tg3_chk_missed_msi(tp
);
10812 if (tg3_flag(tp
, FLUSH_POSTED_WRITES
)) {
10813 /* BCM4785: Flush posted writes from GbE to host memory. */
10817 if (!tg3_flag(tp
, TAGGED_STATUS
)) {
10818 /* All of this garbage is because when using non-tagged
10819 * IRQ status the mailbox/status_block protocol the chip
10820 * uses with the cpu is race prone.
10822 if (tp
->napi
[0].hw_status
->status
& SD_STATUS_UPDATED
) {
10823 tw32(GRC_LOCAL_CTRL
,
10824 tp
->grc_local_ctrl
| GRC_LCLCTRL_SETINT
);
10826 tw32(HOSTCC_MODE
, tp
->coalesce_mode
|
10827 HOSTCC_MODE_ENABLE
| HOSTCC_MODE_NOW
);
10830 if (!(tr32(WDMAC_MODE
) & WDMAC_MODE_ENABLE
)) {
10831 spin_unlock(&tp
->lock
);
10832 tg3_reset_task_schedule(tp
);
10833 goto restart_timer
;
10837 /* This part only runs once per second. */
10838 if (!--tp
->timer_counter
) {
10839 if (tg3_flag(tp
, 5705_PLUS
))
10840 tg3_periodic_fetch_stats(tp
);
10842 if (tp
->setlpicnt
&& !--tp
->setlpicnt
)
10843 tg3_phy_eee_enable(tp
);
10845 if (tg3_flag(tp
, USE_LINKCHG_REG
)) {
10849 mac_stat
= tr32(MAC_STATUS
);
10852 if (tp
->phy_flags
& TG3_PHYFLG_USE_MI_INTERRUPT
) {
10853 if (mac_stat
& MAC_STATUS_MI_INTERRUPT
)
10855 } else if (mac_stat
& MAC_STATUS_LNKSTATE_CHANGED
)
10859 tg3_setup_phy(tp
, false);
10860 } else if (tg3_flag(tp
, POLL_SERDES
)) {
10861 u32 mac_stat
= tr32(MAC_STATUS
);
10862 int need_setup
= 0;
10865 (mac_stat
& MAC_STATUS_LNKSTATE_CHANGED
)) {
10868 if (!tp
->link_up
&&
10869 (mac_stat
& (MAC_STATUS_PCS_SYNCED
|
10870 MAC_STATUS_SIGNAL_DET
))) {
10874 if (!tp
->serdes_counter
) {
10877 ~MAC_MODE_PORT_MODE_MASK
));
10879 tw32_f(MAC_MODE
, tp
->mac_mode
);
10882 tg3_setup_phy(tp
, false);
10884 } else if ((tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) &&
10885 tg3_flag(tp
, 5780_CLASS
)) {
10886 tg3_serdes_parallel_detect(tp
);
10889 tp
->timer_counter
= tp
->timer_multiplier
;
10892 /* Heartbeat is only sent once every 2 seconds.
10894 * The heartbeat is to tell the ASF firmware that the host
10895 * driver is still alive. In the event that the OS crashes,
10896 * ASF needs to reset the hardware to free up the FIFO space
10897 * that may be filled with rx packets destined for the host.
10898 * If the FIFO is full, ASF will no longer function properly.
10900 * Unintended resets have been reported on real time kernels
10901 * where the timer doesn't run on time. Netpoll will also have
10904 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
10905 * to check the ring condition when the heartbeat is expiring
10906 * before doing the reset. This will prevent most unintended
10909 if (!--tp
->asf_counter
) {
10910 if (tg3_flag(tp
, ENABLE_ASF
) && !tg3_flag(tp
, ENABLE_APE
)) {
10911 tg3_wait_for_event_ack(tp
);
10913 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_MBOX
,
10914 FWCMD_NICDRV_ALIVE3
);
10915 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_LEN_MBOX
, 4);
10916 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
,
10917 TG3_FW_UPDATE_TIMEOUT_SEC
);
10919 tg3_generate_fw_event(tp
);
10921 tp
->asf_counter
= tp
->asf_multiplier
;
10924 spin_unlock(&tp
->lock
);
10927 tp
->timer
.expires
= jiffies
+ tp
->timer_offset
;
10928 add_timer(&tp
->timer
);
10931 static void tg3_timer_init(struct tg3
*tp
)
10933 if (tg3_flag(tp
, TAGGED_STATUS
) &&
10934 tg3_asic_rev(tp
) != ASIC_REV_5717
&&
10935 !tg3_flag(tp
, 57765_CLASS
))
10936 tp
->timer_offset
= HZ
;
10938 tp
->timer_offset
= HZ
/ 10;
10940 BUG_ON(tp
->timer_offset
> HZ
);
10942 tp
->timer_multiplier
= (HZ
/ tp
->timer_offset
);
10943 tp
->asf_multiplier
= (HZ
/ tp
->timer_offset
) *
10944 TG3_FW_UPDATE_FREQ_SEC
;
10946 init_timer(&tp
->timer
);
10947 tp
->timer
.data
= (unsigned long) tp
;
10948 tp
->timer
.function
= tg3_timer
;
10951 static void tg3_timer_start(struct tg3
*tp
)
10953 tp
->asf_counter
= tp
->asf_multiplier
;
10954 tp
->timer_counter
= tp
->timer_multiplier
;
10956 tp
->timer
.expires
= jiffies
+ tp
->timer_offset
;
10957 add_timer(&tp
->timer
);
10960 static void tg3_timer_stop(struct tg3
*tp
)
10962 del_timer_sync(&tp
->timer
);
10965 /* Restart hardware after configuration changes, self-test, etc.
10966 * Invoked with tp->lock held.
10968 static int tg3_restart_hw(struct tg3
*tp
, bool reset_phy
)
10969 __releases(tp
->lock
)
10970 __acquires(tp
->lock
)
10974 err
= tg3_init_hw(tp
, reset_phy
);
10976 netdev_err(tp
->dev
,
10977 "Failed to re-initialize device, aborting\n");
10978 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
10979 tg3_full_unlock(tp
);
10980 tg3_timer_stop(tp
);
10982 tg3_napi_enable(tp
);
10983 dev_close(tp
->dev
);
10984 tg3_full_lock(tp
, 0);
10989 static void tg3_reset_task(struct work_struct
*work
)
10991 struct tg3
*tp
= container_of(work
, struct tg3
, reset_task
);
10994 tg3_full_lock(tp
, 0);
10996 if (!netif_running(tp
->dev
)) {
10997 tg3_flag_clear(tp
, RESET_TASK_PENDING
);
10998 tg3_full_unlock(tp
);
11002 tg3_full_unlock(tp
);
11006 tg3_netif_stop(tp
);
11008 tg3_full_lock(tp
, 1);
11010 if (tg3_flag(tp
, TX_RECOVERY_PENDING
)) {
11011 tp
->write32_tx_mbox
= tg3_write32_tx_mbox
;
11012 tp
->write32_rx_mbox
= tg3_write_flush_reg32
;
11013 tg3_flag_set(tp
, MBOX_WRITE_REORDER
);
11014 tg3_flag_clear(tp
, TX_RECOVERY_PENDING
);
11017 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 0);
11018 err
= tg3_init_hw(tp
, true);
11022 tg3_netif_start(tp
);
11025 tg3_full_unlock(tp
);
11030 tg3_flag_clear(tp
, RESET_TASK_PENDING
);
11033 static int tg3_request_irq(struct tg3
*tp
, int irq_num
)
11036 unsigned long flags
;
11038 struct tg3_napi
*tnapi
= &tp
->napi
[irq_num
];
11040 if (tp
->irq_cnt
== 1)
11041 name
= tp
->dev
->name
;
11043 name
= &tnapi
->irq_lbl
[0];
11044 if (tnapi
->tx_buffers
&& tnapi
->rx_rcb
)
11045 snprintf(name
, IFNAMSIZ
,
11046 "%s-txrx-%d", tp
->dev
->name
, irq_num
);
11047 else if (tnapi
->tx_buffers
)
11048 snprintf(name
, IFNAMSIZ
,
11049 "%s-tx-%d", tp
->dev
->name
, irq_num
);
11050 else if (tnapi
->rx_rcb
)
11051 snprintf(name
, IFNAMSIZ
,
11052 "%s-rx-%d", tp
->dev
->name
, irq_num
);
11054 snprintf(name
, IFNAMSIZ
,
11055 "%s-%d", tp
->dev
->name
, irq_num
);
11056 name
[IFNAMSIZ
-1] = 0;
11059 if (tg3_flag(tp
, USING_MSI
) || tg3_flag(tp
, USING_MSIX
)) {
11061 if (tg3_flag(tp
, 1SHOT_MSI
))
11062 fn
= tg3_msi_1shot
;
11065 fn
= tg3_interrupt
;
11066 if (tg3_flag(tp
, TAGGED_STATUS
))
11067 fn
= tg3_interrupt_tagged
;
11068 flags
= IRQF_SHARED
;
11071 return request_irq(tnapi
->irq_vec
, fn
, flags
, name
, tnapi
);
11074 static int tg3_test_interrupt(struct tg3
*tp
)
11076 struct tg3_napi
*tnapi
= &tp
->napi
[0];
11077 struct net_device
*dev
= tp
->dev
;
11078 int err
, i
, intr_ok
= 0;
11081 if (!netif_running(dev
))
11084 tg3_disable_ints(tp
);
11086 free_irq(tnapi
->irq_vec
, tnapi
);
11089 * Turn off MSI one shot mode. Otherwise this test has no
11090 * observable way to know whether the interrupt was delivered.
11092 if (tg3_flag(tp
, 57765_PLUS
)) {
11093 val
= tr32(MSGINT_MODE
) | MSGINT_MODE_ONE_SHOT_DISABLE
;
11094 tw32(MSGINT_MODE
, val
);
11097 err
= request_irq(tnapi
->irq_vec
, tg3_test_isr
,
11098 IRQF_SHARED
, dev
->name
, tnapi
);
11102 tnapi
->hw_status
->status
&= ~SD_STATUS_UPDATED
;
11103 tg3_enable_ints(tp
);
11105 tw32_f(HOSTCC_MODE
, tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
|
11108 for (i
= 0; i
< 5; i
++) {
11109 u32 int_mbox
, misc_host_ctrl
;
11111 int_mbox
= tr32_mailbox(tnapi
->int_mbox
);
11112 misc_host_ctrl
= tr32(TG3PCI_MISC_HOST_CTRL
);
11114 if ((int_mbox
!= 0) ||
11115 (misc_host_ctrl
& MISC_HOST_CTRL_MASK_PCI_INT
)) {
11120 if (tg3_flag(tp
, 57765_PLUS
) &&
11121 tnapi
->hw_status
->status_tag
!= tnapi
->last_tag
)
11122 tw32_mailbox_f(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
11127 tg3_disable_ints(tp
);
11129 free_irq(tnapi
->irq_vec
, tnapi
);
11131 err
= tg3_request_irq(tp
, 0);
11137 /* Reenable MSI one shot mode. */
11138 if (tg3_flag(tp
, 57765_PLUS
) && tg3_flag(tp
, 1SHOT_MSI
)) {
11139 val
= tr32(MSGINT_MODE
) & ~MSGINT_MODE_ONE_SHOT_DISABLE
;
11140 tw32(MSGINT_MODE
, val
);
11148 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
11149 * successfully restored
11151 static int tg3_test_msi(struct tg3
*tp
)
11156 if (!tg3_flag(tp
, USING_MSI
))
11159 /* Turn off SERR reporting in case MSI terminates with Master
11162 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
11163 pci_write_config_word(tp
->pdev
, PCI_COMMAND
,
11164 pci_cmd
& ~PCI_COMMAND_SERR
);
11166 err
= tg3_test_interrupt(tp
);
11168 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
11173 /* other failures */
11177 /* MSI test failed, go back to INTx mode */
11178 netdev_warn(tp
->dev
, "No interrupt was generated using MSI. Switching "
11179 "to INTx mode. Please report this failure to the PCI "
11180 "maintainer and include system chipset information\n");
11182 free_irq(tp
->napi
[0].irq_vec
, &tp
->napi
[0]);
11184 pci_disable_msi(tp
->pdev
);
11186 tg3_flag_clear(tp
, USING_MSI
);
11187 tp
->napi
[0].irq_vec
= tp
->pdev
->irq
;
11189 err
= tg3_request_irq(tp
, 0);
11193 /* Need to reset the chip because the MSI cycle may have terminated
11194 * with Master Abort.
11196 tg3_full_lock(tp
, 1);
11198 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
11199 err
= tg3_init_hw(tp
, true);
11201 tg3_full_unlock(tp
);
11204 free_irq(tp
->napi
[0].irq_vec
, &tp
->napi
[0]);
11209 static int tg3_request_firmware(struct tg3
*tp
)
11211 const struct tg3_firmware_hdr
*fw_hdr
;
11213 if (request_firmware(&tp
->fw
, tp
->fw_needed
, &tp
->pdev
->dev
)) {
11214 netdev_err(tp
->dev
, "Failed to load firmware \"%s\"\n",
11219 fw_hdr
= (struct tg3_firmware_hdr
*)tp
->fw
->data
;
11221 /* Firmware blob starts with version numbers, followed by
11222 * start address and _full_ length including BSS sections
11223 * (which must be longer than the actual data, of course
11226 tp
->fw_len
= be32_to_cpu(fw_hdr
->len
); /* includes bss */
11227 if (tp
->fw_len
< (tp
->fw
->size
- TG3_FW_HDR_LEN
)) {
11228 netdev_err(tp
->dev
, "bogus length %d in \"%s\"\n",
11229 tp
->fw_len
, tp
->fw_needed
);
11230 release_firmware(tp
->fw
);
11235 /* We no longer need firmware; we have it. */
11236 tp
->fw_needed
= NULL
;
11240 static u32
tg3_irq_count(struct tg3
*tp
)
11242 u32 irq_cnt
= max(tp
->rxq_cnt
, tp
->txq_cnt
);
11245 /* We want as many rx rings enabled as there are cpus.
11246 * In multiqueue MSI-X mode, the first MSI-X vector
11247 * only deals with link interrupts, etc, so we add
11248 * one to the number of vectors we are requesting.
11250 irq_cnt
= min_t(unsigned, irq_cnt
+ 1, tp
->irq_max
);
11256 static bool tg3_enable_msix(struct tg3
*tp
)
11259 struct msix_entry msix_ent
[TG3_IRQ_MAX_VECS
];
11261 tp
->txq_cnt
= tp
->txq_req
;
11262 tp
->rxq_cnt
= tp
->rxq_req
;
11264 tp
->rxq_cnt
= netif_get_num_default_rss_queues();
11265 if (tp
->rxq_cnt
> tp
->rxq_max
)
11266 tp
->rxq_cnt
= tp
->rxq_max
;
11268 /* Disable multiple TX rings by default. Simple round-robin hardware
11269 * scheduling of the TX rings can cause starvation of rings with
11270 * small packets when other rings have TSO or jumbo packets.
11275 tp
->irq_cnt
= tg3_irq_count(tp
);
11277 for (i
= 0; i
< tp
->irq_max
; i
++) {
11278 msix_ent
[i
].entry
= i
;
11279 msix_ent
[i
].vector
= 0;
11282 rc
= pci_enable_msix(tp
->pdev
, msix_ent
, tp
->irq_cnt
);
11285 } else if (rc
!= 0) {
11286 if (pci_enable_msix(tp
->pdev
, msix_ent
, rc
))
11288 netdev_notice(tp
->dev
, "Requested %d MSI-X vectors, received %d\n",
11291 tp
->rxq_cnt
= max(rc
- 1, 1);
11293 tp
->txq_cnt
= min(tp
->rxq_cnt
, tp
->txq_max
);
11296 for (i
= 0; i
< tp
->irq_max
; i
++)
11297 tp
->napi
[i
].irq_vec
= msix_ent
[i
].vector
;
11299 if (netif_set_real_num_rx_queues(tp
->dev
, tp
->rxq_cnt
)) {
11300 pci_disable_msix(tp
->pdev
);
11304 if (tp
->irq_cnt
== 1)
11307 tg3_flag_set(tp
, ENABLE_RSS
);
11309 if (tp
->txq_cnt
> 1)
11310 tg3_flag_set(tp
, ENABLE_TSS
);
11312 netif_set_real_num_tx_queues(tp
->dev
, tp
->txq_cnt
);
11317 static void tg3_ints_init(struct tg3
*tp
)
11319 if ((tg3_flag(tp
, SUPPORT_MSI
) || tg3_flag(tp
, SUPPORT_MSIX
)) &&
11320 !tg3_flag(tp
, TAGGED_STATUS
)) {
11321 /* All MSI supporting chips should support tagged
11322 * status. Assert that this is the case.
11324 netdev_warn(tp
->dev
,
11325 "MSI without TAGGED_STATUS? Not using MSI\n");
11329 if (tg3_flag(tp
, SUPPORT_MSIX
) && tg3_enable_msix(tp
))
11330 tg3_flag_set(tp
, USING_MSIX
);
11331 else if (tg3_flag(tp
, SUPPORT_MSI
) && pci_enable_msi(tp
->pdev
) == 0)
11332 tg3_flag_set(tp
, USING_MSI
);
11334 if (tg3_flag(tp
, USING_MSI
) || tg3_flag(tp
, USING_MSIX
)) {
11335 u32 msi_mode
= tr32(MSGINT_MODE
);
11336 if (tg3_flag(tp
, USING_MSIX
) && tp
->irq_cnt
> 1)
11337 msi_mode
|= MSGINT_MODE_MULTIVEC_EN
;
11338 if (!tg3_flag(tp
, 1SHOT_MSI
))
11339 msi_mode
|= MSGINT_MODE_ONE_SHOT_DISABLE
;
11340 tw32(MSGINT_MODE
, msi_mode
| MSGINT_MODE_ENABLE
);
11343 if (!tg3_flag(tp
, USING_MSIX
)) {
11345 tp
->napi
[0].irq_vec
= tp
->pdev
->irq
;
11348 if (tp
->irq_cnt
== 1) {
11351 netif_set_real_num_tx_queues(tp
->dev
, 1);
11352 netif_set_real_num_rx_queues(tp
->dev
, 1);
11356 static void tg3_ints_fini(struct tg3
*tp
)
11358 if (tg3_flag(tp
, USING_MSIX
))
11359 pci_disable_msix(tp
->pdev
);
11360 else if (tg3_flag(tp
, USING_MSI
))
11361 pci_disable_msi(tp
->pdev
);
11362 tg3_flag_clear(tp
, USING_MSI
);
11363 tg3_flag_clear(tp
, USING_MSIX
);
11364 tg3_flag_clear(tp
, ENABLE_RSS
);
11365 tg3_flag_clear(tp
, ENABLE_TSS
);
11368 static int tg3_start(struct tg3
*tp
, bool reset_phy
, bool test_irq
,
11371 struct net_device
*dev
= tp
->dev
;
11375 * Setup interrupts first so we know how
11376 * many NAPI resources to allocate
11380 tg3_rss_check_indir_tbl(tp
);
11382 /* The placement of this call is tied
11383 * to the setup and use of Host TX descriptors.
11385 err
= tg3_alloc_consistent(tp
);
11387 goto out_ints_fini
;
11391 tg3_napi_enable(tp
);
11393 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
11394 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
11395 err
= tg3_request_irq(tp
, i
);
11397 for (i
--; i
>= 0; i
--) {
11398 tnapi
= &tp
->napi
[i
];
11399 free_irq(tnapi
->irq_vec
, tnapi
);
11401 goto out_napi_fini
;
11405 tg3_full_lock(tp
, 0);
11408 tg3_ape_driver_state_change(tp
, RESET_KIND_INIT
);
11410 err
= tg3_init_hw(tp
, reset_phy
);
11412 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
11413 tg3_free_rings(tp
);
11416 tg3_full_unlock(tp
);
11421 if (test_irq
&& tg3_flag(tp
, USING_MSI
)) {
11422 err
= tg3_test_msi(tp
);
11425 tg3_full_lock(tp
, 0);
11426 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
11427 tg3_free_rings(tp
);
11428 tg3_full_unlock(tp
);
11430 goto out_napi_fini
;
11433 if (!tg3_flag(tp
, 57765_PLUS
) && tg3_flag(tp
, USING_MSI
)) {
11434 u32 val
= tr32(PCIE_TRANSACTION_CFG
);
11436 tw32(PCIE_TRANSACTION_CFG
,
11437 val
| PCIE_TRANS_CFG_1SHOT_MSI
);
11443 tg3_hwmon_open(tp
);
11445 tg3_full_lock(tp
, 0);
11447 tg3_timer_start(tp
);
11448 tg3_flag_set(tp
, INIT_COMPLETE
);
11449 tg3_enable_ints(tp
);
11454 tg3_ptp_resume(tp
);
11457 tg3_full_unlock(tp
);
11459 netif_tx_start_all_queues(dev
);
11462 * Reset loopback feature if it was turned on while the device was down
11463 * make sure that it's installed properly now.
11465 if (dev
->features
& NETIF_F_LOOPBACK
)
11466 tg3_set_loopback(dev
, dev
->features
);
11471 for (i
= tp
->irq_cnt
- 1; i
>= 0; i
--) {
11472 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
11473 free_irq(tnapi
->irq_vec
, tnapi
);
11477 tg3_napi_disable(tp
);
11479 tg3_free_consistent(tp
);
11487 static void tg3_stop(struct tg3
*tp
)
11491 tg3_reset_task_cancel(tp
);
11492 tg3_netif_stop(tp
);
11494 tg3_timer_stop(tp
);
11496 tg3_hwmon_close(tp
);
11500 tg3_full_lock(tp
, 1);
11502 tg3_disable_ints(tp
);
11504 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
11505 tg3_free_rings(tp
);
11506 tg3_flag_clear(tp
, INIT_COMPLETE
);
11508 tg3_full_unlock(tp
);
11510 for (i
= tp
->irq_cnt
- 1; i
>= 0; i
--) {
11511 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
11512 free_irq(tnapi
->irq_vec
, tnapi
);
11519 tg3_free_consistent(tp
);
11522 static int tg3_open(struct net_device
*dev
)
11524 struct tg3
*tp
= netdev_priv(dev
);
11527 if (tp
->fw_needed
) {
11528 err
= tg3_request_firmware(tp
);
11529 if (tg3_asic_rev(tp
) == ASIC_REV_57766
) {
11531 netdev_warn(tp
->dev
, "EEE capability disabled\n");
11532 tp
->phy_flags
&= ~TG3_PHYFLG_EEE_CAP
;
11533 } else if (!(tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
)) {
11534 netdev_warn(tp
->dev
, "EEE capability restored\n");
11535 tp
->phy_flags
|= TG3_PHYFLG_EEE_CAP
;
11537 } else if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
) {
11541 netdev_warn(tp
->dev
, "TSO capability disabled\n");
11542 tg3_flag_clear(tp
, TSO_CAPABLE
);
11543 } else if (!tg3_flag(tp
, TSO_CAPABLE
)) {
11544 netdev_notice(tp
->dev
, "TSO capability restored\n");
11545 tg3_flag_set(tp
, TSO_CAPABLE
);
11549 tg3_carrier_off(tp
);
11551 err
= tg3_power_up(tp
);
11555 tg3_full_lock(tp
, 0);
11557 tg3_disable_ints(tp
);
11558 tg3_flag_clear(tp
, INIT_COMPLETE
);
11560 tg3_full_unlock(tp
);
11562 err
= tg3_start(tp
,
11563 !(tp
->phy_flags
& TG3_PHYFLG_KEEP_LINK_ON_PWRDN
),
11566 tg3_frob_aux_power(tp
, false);
11567 pci_set_power_state(tp
->pdev
, PCI_D3hot
);
11570 if (tg3_flag(tp
, PTP_CAPABLE
)) {
11571 tp
->ptp_clock
= ptp_clock_register(&tp
->ptp_info
,
11573 if (IS_ERR(tp
->ptp_clock
))
11574 tp
->ptp_clock
= NULL
;
11580 static int tg3_close(struct net_device
*dev
)
11582 struct tg3
*tp
= netdev_priv(dev
);
11588 /* Clear stats across close / open calls */
11589 memset(&tp
->net_stats_prev
, 0, sizeof(tp
->net_stats_prev
));
11590 memset(&tp
->estats_prev
, 0, sizeof(tp
->estats_prev
));
11592 if (pci_device_is_present(tp
->pdev
)) {
11593 tg3_power_down_prepare(tp
);
11595 tg3_carrier_off(tp
);
11600 static inline u64
get_stat64(tg3_stat64_t
*val
)
11602 return ((u64
)val
->high
<< 32) | ((u64
)val
->low
);
11605 static u64
tg3_calc_crc_errors(struct tg3
*tp
)
11607 struct tg3_hw_stats
*hw_stats
= tp
->hw_stats
;
11609 if (!(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
11610 (tg3_asic_rev(tp
) == ASIC_REV_5700
||
11611 tg3_asic_rev(tp
) == ASIC_REV_5701
)) {
11614 if (!tg3_readphy(tp
, MII_TG3_TEST1
, &val
)) {
11615 tg3_writephy(tp
, MII_TG3_TEST1
,
11616 val
| MII_TG3_TEST1_CRC_EN
);
11617 tg3_readphy(tp
, MII_TG3_RXR_COUNTERS
, &val
);
11621 tp
->phy_crc_errors
+= val
;
11623 return tp
->phy_crc_errors
;
11626 return get_stat64(&hw_stats
->rx_fcs_errors
);
11629 #define ESTAT_ADD(member) \
11630 estats->member = old_estats->member + \
11631 get_stat64(&hw_stats->member)
11633 static void tg3_get_estats(struct tg3
*tp
, struct tg3_ethtool_stats
*estats
)
11635 struct tg3_ethtool_stats
*old_estats
= &tp
->estats_prev
;
11636 struct tg3_hw_stats
*hw_stats
= tp
->hw_stats
;
11638 ESTAT_ADD(rx_octets
);
11639 ESTAT_ADD(rx_fragments
);
11640 ESTAT_ADD(rx_ucast_packets
);
11641 ESTAT_ADD(rx_mcast_packets
);
11642 ESTAT_ADD(rx_bcast_packets
);
11643 ESTAT_ADD(rx_fcs_errors
);
11644 ESTAT_ADD(rx_align_errors
);
11645 ESTAT_ADD(rx_xon_pause_rcvd
);
11646 ESTAT_ADD(rx_xoff_pause_rcvd
);
11647 ESTAT_ADD(rx_mac_ctrl_rcvd
);
11648 ESTAT_ADD(rx_xoff_entered
);
11649 ESTAT_ADD(rx_frame_too_long_errors
);
11650 ESTAT_ADD(rx_jabbers
);
11651 ESTAT_ADD(rx_undersize_packets
);
11652 ESTAT_ADD(rx_in_length_errors
);
11653 ESTAT_ADD(rx_out_length_errors
);
11654 ESTAT_ADD(rx_64_or_less_octet_packets
);
11655 ESTAT_ADD(rx_65_to_127_octet_packets
);
11656 ESTAT_ADD(rx_128_to_255_octet_packets
);
11657 ESTAT_ADD(rx_256_to_511_octet_packets
);
11658 ESTAT_ADD(rx_512_to_1023_octet_packets
);
11659 ESTAT_ADD(rx_1024_to_1522_octet_packets
);
11660 ESTAT_ADD(rx_1523_to_2047_octet_packets
);
11661 ESTAT_ADD(rx_2048_to_4095_octet_packets
);
11662 ESTAT_ADD(rx_4096_to_8191_octet_packets
);
11663 ESTAT_ADD(rx_8192_to_9022_octet_packets
);
11665 ESTAT_ADD(tx_octets
);
11666 ESTAT_ADD(tx_collisions
);
11667 ESTAT_ADD(tx_xon_sent
);
11668 ESTAT_ADD(tx_xoff_sent
);
11669 ESTAT_ADD(tx_flow_control
);
11670 ESTAT_ADD(tx_mac_errors
);
11671 ESTAT_ADD(tx_single_collisions
);
11672 ESTAT_ADD(tx_mult_collisions
);
11673 ESTAT_ADD(tx_deferred
);
11674 ESTAT_ADD(tx_excessive_collisions
);
11675 ESTAT_ADD(tx_late_collisions
);
11676 ESTAT_ADD(tx_collide_2times
);
11677 ESTAT_ADD(tx_collide_3times
);
11678 ESTAT_ADD(tx_collide_4times
);
11679 ESTAT_ADD(tx_collide_5times
);
11680 ESTAT_ADD(tx_collide_6times
);
11681 ESTAT_ADD(tx_collide_7times
);
11682 ESTAT_ADD(tx_collide_8times
);
11683 ESTAT_ADD(tx_collide_9times
);
11684 ESTAT_ADD(tx_collide_10times
);
11685 ESTAT_ADD(tx_collide_11times
);
11686 ESTAT_ADD(tx_collide_12times
);
11687 ESTAT_ADD(tx_collide_13times
);
11688 ESTAT_ADD(tx_collide_14times
);
11689 ESTAT_ADD(tx_collide_15times
);
11690 ESTAT_ADD(tx_ucast_packets
);
11691 ESTAT_ADD(tx_mcast_packets
);
11692 ESTAT_ADD(tx_bcast_packets
);
11693 ESTAT_ADD(tx_carrier_sense_errors
);
11694 ESTAT_ADD(tx_discards
);
11695 ESTAT_ADD(tx_errors
);
11697 ESTAT_ADD(dma_writeq_full
);
11698 ESTAT_ADD(dma_write_prioq_full
);
11699 ESTAT_ADD(rxbds_empty
);
11700 ESTAT_ADD(rx_discards
);
11701 ESTAT_ADD(rx_errors
);
11702 ESTAT_ADD(rx_threshold_hit
);
11704 ESTAT_ADD(dma_readq_full
);
11705 ESTAT_ADD(dma_read_prioq_full
);
11706 ESTAT_ADD(tx_comp_queue_full
);
11708 ESTAT_ADD(ring_set_send_prod_index
);
11709 ESTAT_ADD(ring_status_update
);
11710 ESTAT_ADD(nic_irqs
);
11711 ESTAT_ADD(nic_avoided_irqs
);
11712 ESTAT_ADD(nic_tx_threshold_hit
);
11714 ESTAT_ADD(mbuf_lwm_thresh_hit
);
11717 static void tg3_get_nstats(struct tg3
*tp
, struct rtnl_link_stats64
*stats
)
11719 struct rtnl_link_stats64
*old_stats
= &tp
->net_stats_prev
;
11720 struct tg3_hw_stats
*hw_stats
= tp
->hw_stats
;
11722 stats
->rx_packets
= old_stats
->rx_packets
+
11723 get_stat64(&hw_stats
->rx_ucast_packets
) +
11724 get_stat64(&hw_stats
->rx_mcast_packets
) +
11725 get_stat64(&hw_stats
->rx_bcast_packets
);
11727 stats
->tx_packets
= old_stats
->tx_packets
+
11728 get_stat64(&hw_stats
->tx_ucast_packets
) +
11729 get_stat64(&hw_stats
->tx_mcast_packets
) +
11730 get_stat64(&hw_stats
->tx_bcast_packets
);
11732 stats
->rx_bytes
= old_stats
->rx_bytes
+
11733 get_stat64(&hw_stats
->rx_octets
);
11734 stats
->tx_bytes
= old_stats
->tx_bytes
+
11735 get_stat64(&hw_stats
->tx_octets
);
11737 stats
->rx_errors
= old_stats
->rx_errors
+
11738 get_stat64(&hw_stats
->rx_errors
);
11739 stats
->tx_errors
= old_stats
->tx_errors
+
11740 get_stat64(&hw_stats
->tx_errors
) +
11741 get_stat64(&hw_stats
->tx_mac_errors
) +
11742 get_stat64(&hw_stats
->tx_carrier_sense_errors
) +
11743 get_stat64(&hw_stats
->tx_discards
);
11745 stats
->multicast
= old_stats
->multicast
+
11746 get_stat64(&hw_stats
->rx_mcast_packets
);
11747 stats
->collisions
= old_stats
->collisions
+
11748 get_stat64(&hw_stats
->tx_collisions
);
11750 stats
->rx_length_errors
= old_stats
->rx_length_errors
+
11751 get_stat64(&hw_stats
->rx_frame_too_long_errors
) +
11752 get_stat64(&hw_stats
->rx_undersize_packets
);
11754 stats
->rx_frame_errors
= old_stats
->rx_frame_errors
+
11755 get_stat64(&hw_stats
->rx_align_errors
);
11756 stats
->tx_aborted_errors
= old_stats
->tx_aborted_errors
+
11757 get_stat64(&hw_stats
->tx_discards
);
11758 stats
->tx_carrier_errors
= old_stats
->tx_carrier_errors
+
11759 get_stat64(&hw_stats
->tx_carrier_sense_errors
);
11761 stats
->rx_crc_errors
= old_stats
->rx_crc_errors
+
11762 tg3_calc_crc_errors(tp
);
11764 stats
->rx_missed_errors
= old_stats
->rx_missed_errors
+
11765 get_stat64(&hw_stats
->rx_discards
);
11767 stats
->rx_dropped
= tp
->rx_dropped
;
11768 stats
->tx_dropped
= tp
->tx_dropped
;
11771 static int tg3_get_regs_len(struct net_device
*dev
)
11773 return TG3_REG_BLK_SIZE
;
11776 static void tg3_get_regs(struct net_device
*dev
,
11777 struct ethtool_regs
*regs
, void *_p
)
11779 struct tg3
*tp
= netdev_priv(dev
);
11783 memset(_p
, 0, TG3_REG_BLK_SIZE
);
11785 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)
11788 tg3_full_lock(tp
, 0);
11790 tg3_dump_legacy_regs(tp
, (u32
*)_p
);
11792 tg3_full_unlock(tp
);
11795 static int tg3_get_eeprom_len(struct net_device
*dev
)
11797 struct tg3
*tp
= netdev_priv(dev
);
11799 return tp
->nvram_size
;
11802 static int tg3_get_eeprom(struct net_device
*dev
, struct ethtool_eeprom
*eeprom
, u8
*data
)
11804 struct tg3
*tp
= netdev_priv(dev
);
11807 u32 i
, offset
, len
, b_offset
, b_count
;
11810 if (tg3_flag(tp
, NO_NVRAM
))
11813 offset
= eeprom
->offset
;
11817 eeprom
->magic
= TG3_EEPROM_MAGIC
;
11820 /* adjustments to start on required 4 byte boundary */
11821 b_offset
= offset
& 3;
11822 b_count
= 4 - b_offset
;
11823 if (b_count
> len
) {
11824 /* i.e. offset=1 len=2 */
11827 ret
= tg3_nvram_read_be32(tp
, offset
-b_offset
, &val
);
11830 memcpy(data
, ((char *)&val
) + b_offset
, b_count
);
11833 eeprom
->len
+= b_count
;
11836 /* read bytes up to the last 4 byte boundary */
11837 pd
= &data
[eeprom
->len
];
11838 for (i
= 0; i
< (len
- (len
& 3)); i
+= 4) {
11839 ret
= tg3_nvram_read_be32(tp
, offset
+ i
, &val
);
11844 memcpy(pd
+ i
, &val
, 4);
11849 /* read last bytes not ending on 4 byte boundary */
11850 pd
= &data
[eeprom
->len
];
11852 b_offset
= offset
+ len
- b_count
;
11853 ret
= tg3_nvram_read_be32(tp
, b_offset
, &val
);
11856 memcpy(pd
, &val
, b_count
);
11857 eeprom
->len
+= b_count
;
11862 static int tg3_set_eeprom(struct net_device
*dev
, struct ethtool_eeprom
*eeprom
, u8
*data
)
11864 struct tg3
*tp
= netdev_priv(dev
);
11866 u32 offset
, len
, b_offset
, odd_len
;
11870 if (tg3_flag(tp
, NO_NVRAM
) ||
11871 eeprom
->magic
!= TG3_EEPROM_MAGIC
)
11874 offset
= eeprom
->offset
;
11877 if ((b_offset
= (offset
& 3))) {
11878 /* adjustments to start on required 4 byte boundary */
11879 ret
= tg3_nvram_read_be32(tp
, offset
-b_offset
, &start
);
11890 /* adjustments to end on required 4 byte boundary */
11892 len
= (len
+ 3) & ~3;
11893 ret
= tg3_nvram_read_be32(tp
, offset
+len
-4, &end
);
11899 if (b_offset
|| odd_len
) {
11900 buf
= kmalloc(len
, GFP_KERNEL
);
11904 memcpy(buf
, &start
, 4);
11906 memcpy(buf
+len
-4, &end
, 4);
11907 memcpy(buf
+ b_offset
, data
, eeprom
->len
);
11910 ret
= tg3_nvram_write_block(tp
, offset
, len
, buf
);
11918 static int tg3_get_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
11920 struct tg3
*tp
= netdev_priv(dev
);
11922 if (tg3_flag(tp
, USE_PHYLIB
)) {
11923 struct phy_device
*phydev
;
11924 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
11926 phydev
= tp
->mdio_bus
->phy_map
[tp
->phy_addr
];
11927 return phy_ethtool_gset(phydev
, cmd
);
11930 cmd
->supported
= (SUPPORTED_Autoneg
);
11932 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
))
11933 cmd
->supported
|= (SUPPORTED_1000baseT_Half
|
11934 SUPPORTED_1000baseT_Full
);
11936 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)) {
11937 cmd
->supported
|= (SUPPORTED_100baseT_Half
|
11938 SUPPORTED_100baseT_Full
|
11939 SUPPORTED_10baseT_Half
|
11940 SUPPORTED_10baseT_Full
|
11942 cmd
->port
= PORT_TP
;
11944 cmd
->supported
|= SUPPORTED_FIBRE
;
11945 cmd
->port
= PORT_FIBRE
;
11948 cmd
->advertising
= tp
->link_config
.advertising
;
11949 if (tg3_flag(tp
, PAUSE_AUTONEG
)) {
11950 if (tp
->link_config
.flowctrl
& FLOW_CTRL_RX
) {
11951 if (tp
->link_config
.flowctrl
& FLOW_CTRL_TX
) {
11952 cmd
->advertising
|= ADVERTISED_Pause
;
11954 cmd
->advertising
|= ADVERTISED_Pause
|
11955 ADVERTISED_Asym_Pause
;
11957 } else if (tp
->link_config
.flowctrl
& FLOW_CTRL_TX
) {
11958 cmd
->advertising
|= ADVERTISED_Asym_Pause
;
11961 if (netif_running(dev
) && tp
->link_up
) {
11962 ethtool_cmd_speed_set(cmd
, tp
->link_config
.active_speed
);
11963 cmd
->duplex
= tp
->link_config
.active_duplex
;
11964 cmd
->lp_advertising
= tp
->link_config
.rmt_adv
;
11965 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)) {
11966 if (tp
->phy_flags
& TG3_PHYFLG_MDIX_STATE
)
11967 cmd
->eth_tp_mdix
= ETH_TP_MDI_X
;
11969 cmd
->eth_tp_mdix
= ETH_TP_MDI
;
11972 ethtool_cmd_speed_set(cmd
, SPEED_UNKNOWN
);
11973 cmd
->duplex
= DUPLEX_UNKNOWN
;
11974 cmd
->eth_tp_mdix
= ETH_TP_MDI_INVALID
;
11976 cmd
->phy_address
= tp
->phy_addr
;
11977 cmd
->transceiver
= XCVR_INTERNAL
;
11978 cmd
->autoneg
= tp
->link_config
.autoneg
;
11984 static int tg3_set_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
11986 struct tg3
*tp
= netdev_priv(dev
);
11987 u32 speed
= ethtool_cmd_speed(cmd
);
11989 if (tg3_flag(tp
, USE_PHYLIB
)) {
11990 struct phy_device
*phydev
;
11991 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
11993 phydev
= tp
->mdio_bus
->phy_map
[tp
->phy_addr
];
11994 return phy_ethtool_sset(phydev
, cmd
);
11997 if (cmd
->autoneg
!= AUTONEG_ENABLE
&&
11998 cmd
->autoneg
!= AUTONEG_DISABLE
)
12001 if (cmd
->autoneg
== AUTONEG_DISABLE
&&
12002 cmd
->duplex
!= DUPLEX_FULL
&&
12003 cmd
->duplex
!= DUPLEX_HALF
)
12006 if (cmd
->autoneg
== AUTONEG_ENABLE
) {
12007 u32 mask
= ADVERTISED_Autoneg
|
12009 ADVERTISED_Asym_Pause
;
12011 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
))
12012 mask
|= ADVERTISED_1000baseT_Half
|
12013 ADVERTISED_1000baseT_Full
;
12015 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
12016 mask
|= ADVERTISED_100baseT_Half
|
12017 ADVERTISED_100baseT_Full
|
12018 ADVERTISED_10baseT_Half
|
12019 ADVERTISED_10baseT_Full
|
12022 mask
|= ADVERTISED_FIBRE
;
12024 if (cmd
->advertising
& ~mask
)
12027 mask
&= (ADVERTISED_1000baseT_Half
|
12028 ADVERTISED_1000baseT_Full
|
12029 ADVERTISED_100baseT_Half
|
12030 ADVERTISED_100baseT_Full
|
12031 ADVERTISED_10baseT_Half
|
12032 ADVERTISED_10baseT_Full
);
12034 cmd
->advertising
&= mask
;
12036 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
) {
12037 if (speed
!= SPEED_1000
)
12040 if (cmd
->duplex
!= DUPLEX_FULL
)
12043 if (speed
!= SPEED_100
&&
12049 tg3_full_lock(tp
, 0);
12051 tp
->link_config
.autoneg
= cmd
->autoneg
;
12052 if (cmd
->autoneg
== AUTONEG_ENABLE
) {
12053 tp
->link_config
.advertising
= (cmd
->advertising
|
12054 ADVERTISED_Autoneg
);
12055 tp
->link_config
.speed
= SPEED_UNKNOWN
;
12056 tp
->link_config
.duplex
= DUPLEX_UNKNOWN
;
12058 tp
->link_config
.advertising
= 0;
12059 tp
->link_config
.speed
= speed
;
12060 tp
->link_config
.duplex
= cmd
->duplex
;
12063 tp
->phy_flags
|= TG3_PHYFLG_USER_CONFIGURED
;
12065 tg3_warn_mgmt_link_flap(tp
);
12067 if (netif_running(dev
))
12068 tg3_setup_phy(tp
, true);
12070 tg3_full_unlock(tp
);
12075 static void tg3_get_drvinfo(struct net_device
*dev
, struct ethtool_drvinfo
*info
)
12077 struct tg3
*tp
= netdev_priv(dev
);
12079 strlcpy(info
->driver
, DRV_MODULE_NAME
, sizeof(info
->driver
));
12080 strlcpy(info
->version
, DRV_MODULE_VERSION
, sizeof(info
->version
));
12081 strlcpy(info
->fw_version
, tp
->fw_ver
, sizeof(info
->fw_version
));
12082 strlcpy(info
->bus_info
, pci_name(tp
->pdev
), sizeof(info
->bus_info
));
12085 static void tg3_get_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
12087 struct tg3
*tp
= netdev_priv(dev
);
12089 if (tg3_flag(tp
, WOL_CAP
) && device_can_wakeup(&tp
->pdev
->dev
))
12090 wol
->supported
= WAKE_MAGIC
;
12092 wol
->supported
= 0;
12094 if (tg3_flag(tp
, WOL_ENABLE
) && device_can_wakeup(&tp
->pdev
->dev
))
12095 wol
->wolopts
= WAKE_MAGIC
;
12096 memset(&wol
->sopass
, 0, sizeof(wol
->sopass
));
12099 static int tg3_set_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
12101 struct tg3
*tp
= netdev_priv(dev
);
12102 struct device
*dp
= &tp
->pdev
->dev
;
12104 if (wol
->wolopts
& ~WAKE_MAGIC
)
12106 if ((wol
->wolopts
& WAKE_MAGIC
) &&
12107 !(tg3_flag(tp
, WOL_CAP
) && device_can_wakeup(dp
)))
12110 device_set_wakeup_enable(dp
, wol
->wolopts
& WAKE_MAGIC
);
12112 if (device_may_wakeup(dp
))
12113 tg3_flag_set(tp
, WOL_ENABLE
);
12115 tg3_flag_clear(tp
, WOL_ENABLE
);
12120 static u32
tg3_get_msglevel(struct net_device
*dev
)
12122 struct tg3
*tp
= netdev_priv(dev
);
12123 return tp
->msg_enable
;
12126 static void tg3_set_msglevel(struct net_device
*dev
, u32 value
)
12128 struct tg3
*tp
= netdev_priv(dev
);
12129 tp
->msg_enable
= value
;
12132 static int tg3_nway_reset(struct net_device
*dev
)
12134 struct tg3
*tp
= netdev_priv(dev
);
12137 if (!netif_running(dev
))
12140 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
12143 tg3_warn_mgmt_link_flap(tp
);
12145 if (tg3_flag(tp
, USE_PHYLIB
)) {
12146 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
12148 r
= phy_start_aneg(tp
->mdio_bus
->phy_map
[tp
->phy_addr
]);
12152 spin_lock_bh(&tp
->lock
);
12154 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
12155 if (!tg3_readphy(tp
, MII_BMCR
, &bmcr
) &&
12156 ((bmcr
& BMCR_ANENABLE
) ||
12157 (tp
->phy_flags
& TG3_PHYFLG_PARALLEL_DETECT
))) {
12158 tg3_writephy(tp
, MII_BMCR
, bmcr
| BMCR_ANRESTART
|
12162 spin_unlock_bh(&tp
->lock
);
12168 static void tg3_get_ringparam(struct net_device
*dev
, struct ethtool_ringparam
*ering
)
12170 struct tg3
*tp
= netdev_priv(dev
);
12172 ering
->rx_max_pending
= tp
->rx_std_ring_mask
;
12173 if (tg3_flag(tp
, JUMBO_RING_ENABLE
))
12174 ering
->rx_jumbo_max_pending
= tp
->rx_jmb_ring_mask
;
12176 ering
->rx_jumbo_max_pending
= 0;
12178 ering
->tx_max_pending
= TG3_TX_RING_SIZE
- 1;
12180 ering
->rx_pending
= tp
->rx_pending
;
12181 if (tg3_flag(tp
, JUMBO_RING_ENABLE
))
12182 ering
->rx_jumbo_pending
= tp
->rx_jumbo_pending
;
12184 ering
->rx_jumbo_pending
= 0;
12186 ering
->tx_pending
= tp
->napi
[0].tx_pending
;
12189 static int tg3_set_ringparam(struct net_device
*dev
, struct ethtool_ringparam
*ering
)
12191 struct tg3
*tp
= netdev_priv(dev
);
12192 int i
, irq_sync
= 0, err
= 0;
12194 if ((ering
->rx_pending
> tp
->rx_std_ring_mask
) ||
12195 (ering
->rx_jumbo_pending
> tp
->rx_jmb_ring_mask
) ||
12196 (ering
->tx_pending
> TG3_TX_RING_SIZE
- 1) ||
12197 (ering
->tx_pending
<= MAX_SKB_FRAGS
) ||
12198 (tg3_flag(tp
, TSO_BUG
) &&
12199 (ering
->tx_pending
<= (MAX_SKB_FRAGS
* 3))))
12202 if (netif_running(dev
)) {
12204 tg3_netif_stop(tp
);
12208 tg3_full_lock(tp
, irq_sync
);
12210 tp
->rx_pending
= ering
->rx_pending
;
12212 if (tg3_flag(tp
, MAX_RXPEND_64
) &&
12213 tp
->rx_pending
> 63)
12214 tp
->rx_pending
= 63;
12215 tp
->rx_jumbo_pending
= ering
->rx_jumbo_pending
;
12217 for (i
= 0; i
< tp
->irq_max
; i
++)
12218 tp
->napi
[i
].tx_pending
= ering
->tx_pending
;
12220 if (netif_running(dev
)) {
12221 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
12222 err
= tg3_restart_hw(tp
, false);
12224 tg3_netif_start(tp
);
12227 tg3_full_unlock(tp
);
12229 if (irq_sync
&& !err
)
12235 static void tg3_get_pauseparam(struct net_device
*dev
, struct ethtool_pauseparam
*epause
)
12237 struct tg3
*tp
= netdev_priv(dev
);
12239 epause
->autoneg
= !!tg3_flag(tp
, PAUSE_AUTONEG
);
12241 if (tp
->link_config
.flowctrl
& FLOW_CTRL_RX
)
12242 epause
->rx_pause
= 1;
12244 epause
->rx_pause
= 0;
12246 if (tp
->link_config
.flowctrl
& FLOW_CTRL_TX
)
12247 epause
->tx_pause
= 1;
12249 epause
->tx_pause
= 0;
12252 static int tg3_set_pauseparam(struct net_device
*dev
, struct ethtool_pauseparam
*epause
)
12254 struct tg3
*tp
= netdev_priv(dev
);
12257 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
)
12258 tg3_warn_mgmt_link_flap(tp
);
12260 if (tg3_flag(tp
, USE_PHYLIB
)) {
12262 struct phy_device
*phydev
;
12264 phydev
= tp
->mdio_bus
->phy_map
[tp
->phy_addr
];
12266 if (!(phydev
->supported
& SUPPORTED_Pause
) ||
12267 (!(phydev
->supported
& SUPPORTED_Asym_Pause
) &&
12268 (epause
->rx_pause
!= epause
->tx_pause
)))
12271 tp
->link_config
.flowctrl
= 0;
12272 if (epause
->rx_pause
) {
12273 tp
->link_config
.flowctrl
|= FLOW_CTRL_RX
;
12275 if (epause
->tx_pause
) {
12276 tp
->link_config
.flowctrl
|= FLOW_CTRL_TX
;
12277 newadv
= ADVERTISED_Pause
;
12279 newadv
= ADVERTISED_Pause
|
12280 ADVERTISED_Asym_Pause
;
12281 } else if (epause
->tx_pause
) {
12282 tp
->link_config
.flowctrl
|= FLOW_CTRL_TX
;
12283 newadv
= ADVERTISED_Asym_Pause
;
12287 if (epause
->autoneg
)
12288 tg3_flag_set(tp
, PAUSE_AUTONEG
);
12290 tg3_flag_clear(tp
, PAUSE_AUTONEG
);
12292 if (tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
) {
12293 u32 oldadv
= phydev
->advertising
&
12294 (ADVERTISED_Pause
| ADVERTISED_Asym_Pause
);
12295 if (oldadv
!= newadv
) {
12296 phydev
->advertising
&=
12297 ~(ADVERTISED_Pause
|
12298 ADVERTISED_Asym_Pause
);
12299 phydev
->advertising
|= newadv
;
12300 if (phydev
->autoneg
) {
12302 * Always renegotiate the link to
12303 * inform our link partner of our
12304 * flow control settings, even if the
12305 * flow control is forced. Let
12306 * tg3_adjust_link() do the final
12307 * flow control setup.
12309 return phy_start_aneg(phydev
);
12313 if (!epause
->autoneg
)
12314 tg3_setup_flow_control(tp
, 0, 0);
12316 tp
->link_config
.advertising
&=
12317 ~(ADVERTISED_Pause
|
12318 ADVERTISED_Asym_Pause
);
12319 tp
->link_config
.advertising
|= newadv
;
12324 if (netif_running(dev
)) {
12325 tg3_netif_stop(tp
);
12329 tg3_full_lock(tp
, irq_sync
);
12331 if (epause
->autoneg
)
12332 tg3_flag_set(tp
, PAUSE_AUTONEG
);
12334 tg3_flag_clear(tp
, PAUSE_AUTONEG
);
12335 if (epause
->rx_pause
)
12336 tp
->link_config
.flowctrl
|= FLOW_CTRL_RX
;
12338 tp
->link_config
.flowctrl
&= ~FLOW_CTRL_RX
;
12339 if (epause
->tx_pause
)
12340 tp
->link_config
.flowctrl
|= FLOW_CTRL_TX
;
12342 tp
->link_config
.flowctrl
&= ~FLOW_CTRL_TX
;
12344 if (netif_running(dev
)) {
12345 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
12346 err
= tg3_restart_hw(tp
, false);
12348 tg3_netif_start(tp
);
12351 tg3_full_unlock(tp
);
12354 tp
->phy_flags
|= TG3_PHYFLG_USER_CONFIGURED
;
12359 static int tg3_get_sset_count(struct net_device
*dev
, int sset
)
12363 return TG3_NUM_TEST
;
12365 return TG3_NUM_STATS
;
12367 return -EOPNOTSUPP
;
12371 static int tg3_get_rxnfc(struct net_device
*dev
, struct ethtool_rxnfc
*info
,
12372 u32
*rules __always_unused
)
12374 struct tg3
*tp
= netdev_priv(dev
);
12376 if (!tg3_flag(tp
, SUPPORT_MSIX
))
12377 return -EOPNOTSUPP
;
12379 switch (info
->cmd
) {
12380 case ETHTOOL_GRXRINGS
:
12381 if (netif_running(tp
->dev
))
12382 info
->data
= tp
->rxq_cnt
;
12384 info
->data
= num_online_cpus();
12385 if (info
->data
> TG3_RSS_MAX_NUM_QS
)
12386 info
->data
= TG3_RSS_MAX_NUM_QS
;
12389 /* The first interrupt vector only
12390 * handles link interrupts.
12396 return -EOPNOTSUPP
;
12400 static u32
tg3_get_rxfh_indir_size(struct net_device
*dev
)
12403 struct tg3
*tp
= netdev_priv(dev
);
12405 if (tg3_flag(tp
, SUPPORT_MSIX
))
12406 size
= TG3_RSS_INDIR_TBL_SIZE
;
12411 static int tg3_get_rxfh_indir(struct net_device
*dev
, u32
*indir
)
12413 struct tg3
*tp
= netdev_priv(dev
);
12416 for (i
= 0; i
< TG3_RSS_INDIR_TBL_SIZE
; i
++)
12417 indir
[i
] = tp
->rss_ind_tbl
[i
];
12422 static int tg3_set_rxfh_indir(struct net_device
*dev
, const u32
*indir
)
12424 struct tg3
*tp
= netdev_priv(dev
);
12427 for (i
= 0; i
< TG3_RSS_INDIR_TBL_SIZE
; i
++)
12428 tp
->rss_ind_tbl
[i
] = indir
[i
];
12430 if (!netif_running(dev
) || !tg3_flag(tp
, ENABLE_RSS
))
12433 /* It is legal to write the indirection
12434 * table while the device is running.
12436 tg3_full_lock(tp
, 0);
12437 tg3_rss_write_indir_tbl(tp
);
12438 tg3_full_unlock(tp
);
12443 static void tg3_get_channels(struct net_device
*dev
,
12444 struct ethtool_channels
*channel
)
12446 struct tg3
*tp
= netdev_priv(dev
);
12447 u32 deflt_qs
= netif_get_num_default_rss_queues();
12449 channel
->max_rx
= tp
->rxq_max
;
12450 channel
->max_tx
= tp
->txq_max
;
12452 if (netif_running(dev
)) {
12453 channel
->rx_count
= tp
->rxq_cnt
;
12454 channel
->tx_count
= tp
->txq_cnt
;
12457 channel
->rx_count
= tp
->rxq_req
;
12459 channel
->rx_count
= min(deflt_qs
, tp
->rxq_max
);
12462 channel
->tx_count
= tp
->txq_req
;
12464 channel
->tx_count
= min(deflt_qs
, tp
->txq_max
);
12468 static int tg3_set_channels(struct net_device
*dev
,
12469 struct ethtool_channels
*channel
)
12471 struct tg3
*tp
= netdev_priv(dev
);
12473 if (!tg3_flag(tp
, SUPPORT_MSIX
))
12474 return -EOPNOTSUPP
;
12476 if (channel
->rx_count
> tp
->rxq_max
||
12477 channel
->tx_count
> tp
->txq_max
)
12480 tp
->rxq_req
= channel
->rx_count
;
12481 tp
->txq_req
= channel
->tx_count
;
12483 if (!netif_running(dev
))
12488 tg3_carrier_off(tp
);
12490 tg3_start(tp
, true, false, false);
12495 static void tg3_get_strings(struct net_device
*dev
, u32 stringset
, u8
*buf
)
12497 switch (stringset
) {
12499 memcpy(buf
, ðtool_stats_keys
, sizeof(ethtool_stats_keys
));
12502 memcpy(buf
, ðtool_test_keys
, sizeof(ethtool_test_keys
));
12505 WARN_ON(1); /* we need a WARN() */
12510 static int tg3_set_phys_id(struct net_device
*dev
,
12511 enum ethtool_phys_id_state state
)
12513 struct tg3
*tp
= netdev_priv(dev
);
12515 if (!netif_running(tp
->dev
))
12519 case ETHTOOL_ID_ACTIVE
:
12520 return 1; /* cycle on/off once per second */
12522 case ETHTOOL_ID_ON
:
12523 tw32(MAC_LED_CTRL
, LED_CTRL_LNKLED_OVERRIDE
|
12524 LED_CTRL_1000MBPS_ON
|
12525 LED_CTRL_100MBPS_ON
|
12526 LED_CTRL_10MBPS_ON
|
12527 LED_CTRL_TRAFFIC_OVERRIDE
|
12528 LED_CTRL_TRAFFIC_BLINK
|
12529 LED_CTRL_TRAFFIC_LED
);
12532 case ETHTOOL_ID_OFF
:
12533 tw32(MAC_LED_CTRL
, LED_CTRL_LNKLED_OVERRIDE
|
12534 LED_CTRL_TRAFFIC_OVERRIDE
);
12537 case ETHTOOL_ID_INACTIVE
:
12538 tw32(MAC_LED_CTRL
, tp
->led_ctrl
);
12545 static void tg3_get_ethtool_stats(struct net_device
*dev
,
12546 struct ethtool_stats
*estats
, u64
*tmp_stats
)
12548 struct tg3
*tp
= netdev_priv(dev
);
12551 tg3_get_estats(tp
, (struct tg3_ethtool_stats
*)tmp_stats
);
12553 memset(tmp_stats
, 0, sizeof(struct tg3_ethtool_stats
));
12556 static __be32
*tg3_vpd_readblock(struct tg3
*tp
, u32
*vpdlen
)
12560 u32 offset
= 0, len
= 0;
12563 if (tg3_flag(tp
, NO_NVRAM
) || tg3_nvram_read(tp
, 0, &magic
))
12566 if (magic
== TG3_EEPROM_MAGIC
) {
12567 for (offset
= TG3_NVM_DIR_START
;
12568 offset
< TG3_NVM_DIR_END
;
12569 offset
+= TG3_NVM_DIRENT_SIZE
) {
12570 if (tg3_nvram_read(tp
, offset
, &val
))
12573 if ((val
>> TG3_NVM_DIRTYPE_SHIFT
) ==
12574 TG3_NVM_DIRTYPE_EXTVPD
)
12578 if (offset
!= TG3_NVM_DIR_END
) {
12579 len
= (val
& TG3_NVM_DIRTYPE_LENMSK
) * 4;
12580 if (tg3_nvram_read(tp
, offset
+ 4, &offset
))
12583 offset
= tg3_nvram_logical_addr(tp
, offset
);
12587 if (!offset
|| !len
) {
12588 offset
= TG3_NVM_VPD_OFF
;
12589 len
= TG3_NVM_VPD_LEN
;
12592 buf
= kmalloc(len
, GFP_KERNEL
);
12596 if (magic
== TG3_EEPROM_MAGIC
) {
12597 for (i
= 0; i
< len
; i
+= 4) {
12598 /* The data is in little-endian format in NVRAM.
12599 * Use the big-endian read routines to preserve
12600 * the byte order as it exists in NVRAM.
12602 if (tg3_nvram_read_be32(tp
, offset
+ i
, &buf
[i
/4]))
12608 unsigned int pos
= 0;
12610 ptr
= (u8
*)&buf
[0];
12611 for (i
= 0; pos
< len
&& i
< 3; i
++, pos
+= cnt
, ptr
+= cnt
) {
12612 cnt
= pci_read_vpd(tp
->pdev
, pos
,
12614 if (cnt
== -ETIMEDOUT
|| cnt
== -EINTR
)
12632 #define NVRAM_TEST_SIZE 0x100
12633 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
12634 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
12635 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
12636 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
12637 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
12638 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
12639 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12640 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12642 static int tg3_test_nvram(struct tg3
*tp
)
12644 u32 csum
, magic
, len
;
12646 int i
, j
, k
, err
= 0, size
;
12648 if (tg3_flag(tp
, NO_NVRAM
))
12651 if (tg3_nvram_read(tp
, 0, &magic
) != 0)
12654 if (magic
== TG3_EEPROM_MAGIC
)
12655 size
= NVRAM_TEST_SIZE
;
12656 else if ((magic
& TG3_EEPROM_MAGIC_FW_MSK
) == TG3_EEPROM_MAGIC_FW
) {
12657 if ((magic
& TG3_EEPROM_SB_FORMAT_MASK
) ==
12658 TG3_EEPROM_SB_FORMAT_1
) {
12659 switch (magic
& TG3_EEPROM_SB_REVISION_MASK
) {
12660 case TG3_EEPROM_SB_REVISION_0
:
12661 size
= NVRAM_SELFBOOT_FORMAT1_0_SIZE
;
12663 case TG3_EEPROM_SB_REVISION_2
:
12664 size
= NVRAM_SELFBOOT_FORMAT1_2_SIZE
;
12666 case TG3_EEPROM_SB_REVISION_3
:
12667 size
= NVRAM_SELFBOOT_FORMAT1_3_SIZE
;
12669 case TG3_EEPROM_SB_REVISION_4
:
12670 size
= NVRAM_SELFBOOT_FORMAT1_4_SIZE
;
12672 case TG3_EEPROM_SB_REVISION_5
:
12673 size
= NVRAM_SELFBOOT_FORMAT1_5_SIZE
;
12675 case TG3_EEPROM_SB_REVISION_6
:
12676 size
= NVRAM_SELFBOOT_FORMAT1_6_SIZE
;
12683 } else if ((magic
& TG3_EEPROM_MAGIC_HW_MSK
) == TG3_EEPROM_MAGIC_HW
)
12684 size
= NVRAM_SELFBOOT_HW_SIZE
;
12688 buf
= kmalloc(size
, GFP_KERNEL
);
12693 for (i
= 0, j
= 0; i
< size
; i
+= 4, j
++) {
12694 err
= tg3_nvram_read_be32(tp
, i
, &buf
[j
]);
12701 /* Selfboot format */
12702 magic
= be32_to_cpu(buf
[0]);
12703 if ((magic
& TG3_EEPROM_MAGIC_FW_MSK
) ==
12704 TG3_EEPROM_MAGIC_FW
) {
12705 u8
*buf8
= (u8
*) buf
, csum8
= 0;
12707 if ((magic
& TG3_EEPROM_SB_REVISION_MASK
) ==
12708 TG3_EEPROM_SB_REVISION_2
) {
12709 /* For rev 2, the csum doesn't include the MBA. */
12710 for (i
= 0; i
< TG3_EEPROM_SB_F1R2_MBA_OFF
; i
++)
12712 for (i
= TG3_EEPROM_SB_F1R2_MBA_OFF
+ 4; i
< size
; i
++)
12715 for (i
= 0; i
< size
; i
++)
12728 if ((magic
& TG3_EEPROM_MAGIC_HW_MSK
) ==
12729 TG3_EEPROM_MAGIC_HW
) {
12730 u8 data
[NVRAM_SELFBOOT_DATA_SIZE
];
12731 u8 parity
[NVRAM_SELFBOOT_DATA_SIZE
];
12732 u8
*buf8
= (u8
*) buf
;
12734 /* Separate the parity bits and the data bytes. */
12735 for (i
= 0, j
= 0, k
= 0; i
< NVRAM_SELFBOOT_HW_SIZE
; i
++) {
12736 if ((i
== 0) || (i
== 8)) {
12740 for (l
= 0, msk
= 0x80; l
< 7; l
++, msk
>>= 1)
12741 parity
[k
++] = buf8
[i
] & msk
;
12743 } else if (i
== 16) {
12747 for (l
= 0, msk
= 0x20; l
< 6; l
++, msk
>>= 1)
12748 parity
[k
++] = buf8
[i
] & msk
;
12751 for (l
= 0, msk
= 0x80; l
< 8; l
++, msk
>>= 1)
12752 parity
[k
++] = buf8
[i
] & msk
;
12755 data
[j
++] = buf8
[i
];
12759 for (i
= 0; i
< NVRAM_SELFBOOT_DATA_SIZE
; i
++) {
12760 u8 hw8
= hweight8(data
[i
]);
12762 if ((hw8
& 0x1) && parity
[i
])
12764 else if (!(hw8
& 0x1) && !parity
[i
])
12773 /* Bootstrap checksum at offset 0x10 */
12774 csum
= calc_crc((unsigned char *) buf
, 0x10);
12775 if (csum
!= le32_to_cpu(buf
[0x10/4]))
12778 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
12779 csum
= calc_crc((unsigned char *) &buf
[0x74/4], 0x88);
12780 if (csum
!= le32_to_cpu(buf
[0xfc/4]))
12785 buf
= tg3_vpd_readblock(tp
, &len
);
12789 i
= pci_vpd_find_tag((u8
*)buf
, 0, len
, PCI_VPD_LRDT_RO_DATA
);
12791 j
= pci_vpd_lrdt_size(&((u8
*)buf
)[i
]);
12795 if (i
+ PCI_VPD_LRDT_TAG_SIZE
+ j
> len
)
12798 i
+= PCI_VPD_LRDT_TAG_SIZE
;
12799 j
= pci_vpd_find_info_keyword((u8
*)buf
, i
, j
,
12800 PCI_VPD_RO_KEYWORD_CHKSUM
);
12804 j
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
12806 for (i
= 0; i
<= j
; i
++)
12807 csum8
+= ((u8
*)buf
)[i
];
12821 #define TG3_SERDES_TIMEOUT_SEC 2
12822 #define TG3_COPPER_TIMEOUT_SEC 6
12824 static int tg3_test_link(struct tg3
*tp
)
12828 if (!netif_running(tp
->dev
))
12831 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)
12832 max
= TG3_SERDES_TIMEOUT_SEC
;
12834 max
= TG3_COPPER_TIMEOUT_SEC
;
12836 for (i
= 0; i
< max
; i
++) {
12840 if (msleep_interruptible(1000))
12847 /* Only test the commonly used registers */
12848 static int tg3_test_registers(struct tg3
*tp
)
12850 int i
, is_5705
, is_5750
;
12851 u32 offset
, read_mask
, write_mask
, val
, save_val
, read_val
;
12855 #define TG3_FL_5705 0x1
12856 #define TG3_FL_NOT_5705 0x2
12857 #define TG3_FL_NOT_5788 0x4
12858 #define TG3_FL_NOT_5750 0x8
12862 /* MAC Control Registers */
12863 { MAC_MODE
, TG3_FL_NOT_5705
,
12864 0x00000000, 0x00ef6f8c },
12865 { MAC_MODE
, TG3_FL_5705
,
12866 0x00000000, 0x01ef6b8c },
12867 { MAC_STATUS
, TG3_FL_NOT_5705
,
12868 0x03800107, 0x00000000 },
12869 { MAC_STATUS
, TG3_FL_5705
,
12870 0x03800100, 0x00000000 },
12871 { MAC_ADDR_0_HIGH
, 0x0000,
12872 0x00000000, 0x0000ffff },
12873 { MAC_ADDR_0_LOW
, 0x0000,
12874 0x00000000, 0xffffffff },
12875 { MAC_RX_MTU_SIZE
, 0x0000,
12876 0x00000000, 0x0000ffff },
12877 { MAC_TX_MODE
, 0x0000,
12878 0x00000000, 0x00000070 },
12879 { MAC_TX_LENGTHS
, 0x0000,
12880 0x00000000, 0x00003fff },
12881 { MAC_RX_MODE
, TG3_FL_NOT_5705
,
12882 0x00000000, 0x000007fc },
12883 { MAC_RX_MODE
, TG3_FL_5705
,
12884 0x00000000, 0x000007dc },
12885 { MAC_HASH_REG_0
, 0x0000,
12886 0x00000000, 0xffffffff },
12887 { MAC_HASH_REG_1
, 0x0000,
12888 0x00000000, 0xffffffff },
12889 { MAC_HASH_REG_2
, 0x0000,
12890 0x00000000, 0xffffffff },
12891 { MAC_HASH_REG_3
, 0x0000,
12892 0x00000000, 0xffffffff },
12894 /* Receive Data and Receive BD Initiator Control Registers. */
12895 { RCVDBDI_JUMBO_BD
+0, TG3_FL_NOT_5705
,
12896 0x00000000, 0xffffffff },
12897 { RCVDBDI_JUMBO_BD
+4, TG3_FL_NOT_5705
,
12898 0x00000000, 0xffffffff },
12899 { RCVDBDI_JUMBO_BD
+8, TG3_FL_NOT_5705
,
12900 0x00000000, 0x00000003 },
12901 { RCVDBDI_JUMBO_BD
+0xc, TG3_FL_NOT_5705
,
12902 0x00000000, 0xffffffff },
12903 { RCVDBDI_STD_BD
+0, 0x0000,
12904 0x00000000, 0xffffffff },
12905 { RCVDBDI_STD_BD
+4, 0x0000,
12906 0x00000000, 0xffffffff },
12907 { RCVDBDI_STD_BD
+8, 0x0000,
12908 0x00000000, 0xffff0002 },
12909 { RCVDBDI_STD_BD
+0xc, 0x0000,
12910 0x00000000, 0xffffffff },
12912 /* Receive BD Initiator Control Registers. */
12913 { RCVBDI_STD_THRESH
, TG3_FL_NOT_5705
,
12914 0x00000000, 0xffffffff },
12915 { RCVBDI_STD_THRESH
, TG3_FL_5705
,
12916 0x00000000, 0x000003ff },
12917 { RCVBDI_JUMBO_THRESH
, TG3_FL_NOT_5705
,
12918 0x00000000, 0xffffffff },
12920 /* Host Coalescing Control Registers. */
12921 { HOSTCC_MODE
, TG3_FL_NOT_5705
,
12922 0x00000000, 0x00000004 },
12923 { HOSTCC_MODE
, TG3_FL_5705
,
12924 0x00000000, 0x000000f6 },
12925 { HOSTCC_RXCOL_TICKS
, TG3_FL_NOT_5705
,
12926 0x00000000, 0xffffffff },
12927 { HOSTCC_RXCOL_TICKS
, TG3_FL_5705
,
12928 0x00000000, 0x000003ff },
12929 { HOSTCC_TXCOL_TICKS
, TG3_FL_NOT_5705
,
12930 0x00000000, 0xffffffff },
12931 { HOSTCC_TXCOL_TICKS
, TG3_FL_5705
,
12932 0x00000000, 0x000003ff },
12933 { HOSTCC_RXMAX_FRAMES
, TG3_FL_NOT_5705
,
12934 0x00000000, 0xffffffff },
12935 { HOSTCC_RXMAX_FRAMES
, TG3_FL_5705
| TG3_FL_NOT_5788
,
12936 0x00000000, 0x000000ff },
12937 { HOSTCC_TXMAX_FRAMES
, TG3_FL_NOT_5705
,
12938 0x00000000, 0xffffffff },
12939 { HOSTCC_TXMAX_FRAMES
, TG3_FL_5705
| TG3_FL_NOT_5788
,
12940 0x00000000, 0x000000ff },
12941 { HOSTCC_RXCOAL_TICK_INT
, TG3_FL_NOT_5705
,
12942 0x00000000, 0xffffffff },
12943 { HOSTCC_TXCOAL_TICK_INT
, TG3_FL_NOT_5705
,
12944 0x00000000, 0xffffffff },
12945 { HOSTCC_RXCOAL_MAXF_INT
, TG3_FL_NOT_5705
,
12946 0x00000000, 0xffffffff },
12947 { HOSTCC_RXCOAL_MAXF_INT
, TG3_FL_5705
| TG3_FL_NOT_5788
,
12948 0x00000000, 0x000000ff },
12949 { HOSTCC_TXCOAL_MAXF_INT
, TG3_FL_NOT_5705
,
12950 0x00000000, 0xffffffff },
12951 { HOSTCC_TXCOAL_MAXF_INT
, TG3_FL_5705
| TG3_FL_NOT_5788
,
12952 0x00000000, 0x000000ff },
12953 { HOSTCC_STAT_COAL_TICKS
, TG3_FL_NOT_5705
,
12954 0x00000000, 0xffffffff },
12955 { HOSTCC_STATS_BLK_HOST_ADDR
, TG3_FL_NOT_5705
,
12956 0x00000000, 0xffffffff },
12957 { HOSTCC_STATS_BLK_HOST_ADDR
+4, TG3_FL_NOT_5705
,
12958 0x00000000, 0xffffffff },
12959 { HOSTCC_STATUS_BLK_HOST_ADDR
, 0x0000,
12960 0x00000000, 0xffffffff },
12961 { HOSTCC_STATUS_BLK_HOST_ADDR
+4, 0x0000,
12962 0x00000000, 0xffffffff },
12963 { HOSTCC_STATS_BLK_NIC_ADDR
, 0x0000,
12964 0xffffffff, 0x00000000 },
12965 { HOSTCC_STATUS_BLK_NIC_ADDR
, 0x0000,
12966 0xffffffff, 0x00000000 },
12968 /* Buffer Manager Control Registers. */
12969 { BUFMGR_MB_POOL_ADDR
, TG3_FL_NOT_5750
,
12970 0x00000000, 0x007fff80 },
12971 { BUFMGR_MB_POOL_SIZE
, TG3_FL_NOT_5750
,
12972 0x00000000, 0x007fffff },
12973 { BUFMGR_MB_RDMA_LOW_WATER
, 0x0000,
12974 0x00000000, 0x0000003f },
12975 { BUFMGR_MB_MACRX_LOW_WATER
, 0x0000,
12976 0x00000000, 0x000001ff },
12977 { BUFMGR_MB_HIGH_WATER
, 0x0000,
12978 0x00000000, 0x000001ff },
12979 { BUFMGR_DMA_DESC_POOL_ADDR
, TG3_FL_NOT_5705
,
12980 0xffffffff, 0x00000000 },
12981 { BUFMGR_DMA_DESC_POOL_SIZE
, TG3_FL_NOT_5705
,
12982 0xffffffff, 0x00000000 },
12984 /* Mailbox Registers */
12985 { GRCMBOX_RCVSTD_PROD_IDX
+4, 0x0000,
12986 0x00000000, 0x000001ff },
12987 { GRCMBOX_RCVJUMBO_PROD_IDX
+4, TG3_FL_NOT_5705
,
12988 0x00000000, 0x000001ff },
12989 { GRCMBOX_RCVRET_CON_IDX_0
+4, 0x0000,
12990 0x00000000, 0x000007ff },
12991 { GRCMBOX_SNDHOST_PROD_IDX_0
+4, 0x0000,
12992 0x00000000, 0x000001ff },
12994 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
12997 is_5705
= is_5750
= 0;
12998 if (tg3_flag(tp
, 5705_PLUS
)) {
13000 if (tg3_flag(tp
, 5750_PLUS
))
13004 for (i
= 0; reg_tbl
[i
].offset
!= 0xffff; i
++) {
13005 if (is_5705
&& (reg_tbl
[i
].flags
& TG3_FL_NOT_5705
))
13008 if (!is_5705
&& (reg_tbl
[i
].flags
& TG3_FL_5705
))
13011 if (tg3_flag(tp
, IS_5788
) &&
13012 (reg_tbl
[i
].flags
& TG3_FL_NOT_5788
))
13015 if (is_5750
&& (reg_tbl
[i
].flags
& TG3_FL_NOT_5750
))
13018 offset
= (u32
) reg_tbl
[i
].offset
;
13019 read_mask
= reg_tbl
[i
].read_mask
;
13020 write_mask
= reg_tbl
[i
].write_mask
;
13022 /* Save the original register content */
13023 save_val
= tr32(offset
);
13025 /* Determine the read-only value. */
13026 read_val
= save_val
& read_mask
;
13028 /* Write zero to the register, then make sure the read-only bits
13029 * are not changed and the read/write bits are all zeros.
13033 val
= tr32(offset
);
13035 /* Test the read-only and read/write bits. */
13036 if (((val
& read_mask
) != read_val
) || (val
& write_mask
))
13039 /* Write ones to all the bits defined by RdMask and WrMask, then
13040 * make sure the read-only bits are not changed and the
13041 * read/write bits are all ones.
13043 tw32(offset
, read_mask
| write_mask
);
13045 val
= tr32(offset
);
13047 /* Test the read-only bits. */
13048 if ((val
& read_mask
) != read_val
)
13051 /* Test the read/write bits. */
13052 if ((val
& write_mask
) != write_mask
)
13055 tw32(offset
, save_val
);
13061 if (netif_msg_hw(tp
))
13062 netdev_err(tp
->dev
,
13063 "Register test failed at offset %x\n", offset
);
13064 tw32(offset
, save_val
);
13068 static int tg3_do_mem_test(struct tg3
*tp
, u32 offset
, u32 len
)
13070 static const u32 test_pattern
[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
13074 for (i
= 0; i
< ARRAY_SIZE(test_pattern
); i
++) {
13075 for (j
= 0; j
< len
; j
+= 4) {
13078 tg3_write_mem(tp
, offset
+ j
, test_pattern
[i
]);
13079 tg3_read_mem(tp
, offset
+ j
, &val
);
13080 if (val
!= test_pattern
[i
])
13087 static int tg3_test_memory(struct tg3
*tp
)
13089 static struct mem_entry
{
13092 } mem_tbl_570x
[] = {
13093 { 0x00000000, 0x00b50},
13094 { 0x00002000, 0x1c000},
13095 { 0xffffffff, 0x00000}
13096 }, mem_tbl_5705
[] = {
13097 { 0x00000100, 0x0000c},
13098 { 0x00000200, 0x00008},
13099 { 0x00004000, 0x00800},
13100 { 0x00006000, 0x01000},
13101 { 0x00008000, 0x02000},
13102 { 0x00010000, 0x0e000},
13103 { 0xffffffff, 0x00000}
13104 }, mem_tbl_5755
[] = {
13105 { 0x00000200, 0x00008},
13106 { 0x00004000, 0x00800},
13107 { 0x00006000, 0x00800},
13108 { 0x00008000, 0x02000},
13109 { 0x00010000, 0x0c000},
13110 { 0xffffffff, 0x00000}
13111 }, mem_tbl_5906
[] = {
13112 { 0x00000200, 0x00008},
13113 { 0x00004000, 0x00400},
13114 { 0x00006000, 0x00400},
13115 { 0x00008000, 0x01000},
13116 { 0x00010000, 0x01000},
13117 { 0xffffffff, 0x00000}
13118 }, mem_tbl_5717
[] = {
13119 { 0x00000200, 0x00008},
13120 { 0x00010000, 0x0a000},
13121 { 0x00020000, 0x13c00},
13122 { 0xffffffff, 0x00000}
13123 }, mem_tbl_57765
[] = {
13124 { 0x00000200, 0x00008},
13125 { 0x00004000, 0x00800},
13126 { 0x00006000, 0x09800},
13127 { 0x00010000, 0x0a000},
13128 { 0xffffffff, 0x00000}
13130 struct mem_entry
*mem_tbl
;
13134 if (tg3_flag(tp
, 5717_PLUS
))
13135 mem_tbl
= mem_tbl_5717
;
13136 else if (tg3_flag(tp
, 57765_CLASS
) ||
13137 tg3_asic_rev(tp
) == ASIC_REV_5762
)
13138 mem_tbl
= mem_tbl_57765
;
13139 else if (tg3_flag(tp
, 5755_PLUS
))
13140 mem_tbl
= mem_tbl_5755
;
13141 else if (tg3_asic_rev(tp
) == ASIC_REV_5906
)
13142 mem_tbl
= mem_tbl_5906
;
13143 else if (tg3_flag(tp
, 5705_PLUS
))
13144 mem_tbl
= mem_tbl_5705
;
13146 mem_tbl
= mem_tbl_570x
;
13148 for (i
= 0; mem_tbl
[i
].offset
!= 0xffffffff; i
++) {
13149 err
= tg3_do_mem_test(tp
, mem_tbl
[i
].offset
, mem_tbl
[i
].len
);
13157 #define TG3_TSO_MSS 500
13159 #define TG3_TSO_IP_HDR_LEN 20
13160 #define TG3_TSO_TCP_HDR_LEN 20
13161 #define TG3_TSO_TCP_OPT_LEN 12
13163 static const u8 tg3_tso_header
[] = {
13165 0x45, 0x00, 0x00, 0x00,
13166 0x00, 0x00, 0x40, 0x00,
13167 0x40, 0x06, 0x00, 0x00,
13168 0x0a, 0x00, 0x00, 0x01,
13169 0x0a, 0x00, 0x00, 0x02,
13170 0x0d, 0x00, 0xe0, 0x00,
13171 0x00, 0x00, 0x01, 0x00,
13172 0x00, 0x00, 0x02, 0x00,
13173 0x80, 0x10, 0x10, 0x00,
13174 0x14, 0x09, 0x00, 0x00,
13175 0x01, 0x01, 0x08, 0x0a,
13176 0x11, 0x11, 0x11, 0x11,
13177 0x11, 0x11, 0x11, 0x11,
13180 static int tg3_run_loopback(struct tg3
*tp
, u32 pktsz
, bool tso_loopback
)
13182 u32 rx_start_idx
, rx_idx
, tx_idx
, opaque_key
;
13183 u32 base_flags
= 0, mss
= 0, desc_idx
, coal_now
, data_off
, val
;
13185 struct sk_buff
*skb
;
13186 u8
*tx_data
, *rx_data
;
13188 int num_pkts
, tx_len
, rx_len
, i
, err
;
13189 struct tg3_rx_buffer_desc
*desc
;
13190 struct tg3_napi
*tnapi
, *rnapi
;
13191 struct tg3_rx_prodring_set
*tpr
= &tp
->napi
[0].prodring
;
13193 tnapi
= &tp
->napi
[0];
13194 rnapi
= &tp
->napi
[0];
13195 if (tp
->irq_cnt
> 1) {
13196 if (tg3_flag(tp
, ENABLE_RSS
))
13197 rnapi
= &tp
->napi
[1];
13198 if (tg3_flag(tp
, ENABLE_TSS
))
13199 tnapi
= &tp
->napi
[1];
13201 coal_now
= tnapi
->coal_now
| rnapi
->coal_now
;
13206 skb
= netdev_alloc_skb(tp
->dev
, tx_len
);
13210 tx_data
= skb_put(skb
, tx_len
);
13211 memcpy(tx_data
, tp
->dev
->dev_addr
, ETH_ALEN
);
13212 memset(tx_data
+ ETH_ALEN
, 0x0, 8);
13214 tw32(MAC_RX_MTU_SIZE
, tx_len
+ ETH_FCS_LEN
);
13216 if (tso_loopback
) {
13217 struct iphdr
*iph
= (struct iphdr
*)&tx_data
[ETH_HLEN
];
13219 u32 hdr_len
= TG3_TSO_IP_HDR_LEN
+ TG3_TSO_TCP_HDR_LEN
+
13220 TG3_TSO_TCP_OPT_LEN
;
13222 memcpy(tx_data
+ ETH_ALEN
* 2, tg3_tso_header
,
13223 sizeof(tg3_tso_header
));
13226 val
= tx_len
- ETH_ALEN
* 2 - sizeof(tg3_tso_header
);
13227 num_pkts
= DIV_ROUND_UP(val
, TG3_TSO_MSS
);
13229 /* Set the total length field in the IP header */
13230 iph
->tot_len
= htons((u16
)(mss
+ hdr_len
));
13232 base_flags
= (TXD_FLAG_CPU_PRE_DMA
|
13233 TXD_FLAG_CPU_POST_DMA
);
13235 if (tg3_flag(tp
, HW_TSO_1
) ||
13236 tg3_flag(tp
, HW_TSO_2
) ||
13237 tg3_flag(tp
, HW_TSO_3
)) {
13239 val
= ETH_HLEN
+ TG3_TSO_IP_HDR_LEN
;
13240 th
= (struct tcphdr
*)&tx_data
[val
];
13243 base_flags
|= TXD_FLAG_TCPUDP_CSUM
;
13245 if (tg3_flag(tp
, HW_TSO_3
)) {
13246 mss
|= (hdr_len
& 0xc) << 12;
13247 if (hdr_len
& 0x10)
13248 base_flags
|= 0x00000010;
13249 base_flags
|= (hdr_len
& 0x3e0) << 5;
13250 } else if (tg3_flag(tp
, HW_TSO_2
))
13251 mss
|= hdr_len
<< 9;
13252 else if (tg3_flag(tp
, HW_TSO_1
) ||
13253 tg3_asic_rev(tp
) == ASIC_REV_5705
) {
13254 mss
|= (TG3_TSO_TCP_OPT_LEN
<< 9);
13256 base_flags
|= (TG3_TSO_TCP_OPT_LEN
<< 10);
13259 data_off
= ETH_ALEN
* 2 + sizeof(tg3_tso_header
);
13262 data_off
= ETH_HLEN
;
13264 if (tg3_flag(tp
, USE_JUMBO_BDFLAG
) &&
13265 tx_len
> VLAN_ETH_FRAME_LEN
)
13266 base_flags
|= TXD_FLAG_JMB_PKT
;
13269 for (i
= data_off
; i
< tx_len
; i
++)
13270 tx_data
[i
] = (u8
) (i
& 0xff);
13272 map
= pci_map_single(tp
->pdev
, skb
->data
, tx_len
, PCI_DMA_TODEVICE
);
13273 if (pci_dma_mapping_error(tp
->pdev
, map
)) {
13274 dev_kfree_skb(skb
);
13278 val
= tnapi
->tx_prod
;
13279 tnapi
->tx_buffers
[val
].skb
= skb
;
13280 dma_unmap_addr_set(&tnapi
->tx_buffers
[val
], mapping
, map
);
13282 tw32_f(HOSTCC_MODE
, tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
|
13287 rx_start_idx
= rnapi
->hw_status
->idx
[0].rx_producer
;
13289 budget
= tg3_tx_avail(tnapi
);
13290 if (tg3_tx_frag_set(tnapi
, &val
, &budget
, map
, tx_len
,
13291 base_flags
| TXD_FLAG_END
, mss
, 0)) {
13292 tnapi
->tx_buffers
[val
].skb
= NULL
;
13293 dev_kfree_skb(skb
);
13299 /* Sync BD data before updating mailbox */
13302 tw32_tx_mbox(tnapi
->prodmbox
, tnapi
->tx_prod
);
13303 tr32_mailbox(tnapi
->prodmbox
);
13307 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
13308 for (i
= 0; i
< 35; i
++) {
13309 tw32_f(HOSTCC_MODE
, tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
|
13314 tx_idx
= tnapi
->hw_status
->idx
[0].tx_consumer
;
13315 rx_idx
= rnapi
->hw_status
->idx
[0].rx_producer
;
13316 if ((tx_idx
== tnapi
->tx_prod
) &&
13317 (rx_idx
== (rx_start_idx
+ num_pkts
)))
13321 tg3_tx_skb_unmap(tnapi
, tnapi
->tx_prod
- 1, -1);
13322 dev_kfree_skb(skb
);
13324 if (tx_idx
!= tnapi
->tx_prod
)
13327 if (rx_idx
!= rx_start_idx
+ num_pkts
)
13331 while (rx_idx
!= rx_start_idx
) {
13332 desc
= &rnapi
->rx_rcb
[rx_start_idx
++];
13333 desc_idx
= desc
->opaque
& RXD_OPAQUE_INDEX_MASK
;
13334 opaque_key
= desc
->opaque
& RXD_OPAQUE_RING_MASK
;
13336 if ((desc
->err_vlan
& RXD_ERR_MASK
) != 0 &&
13337 (desc
->err_vlan
!= RXD_ERR_ODD_NIBBLE_RCVD_MII
))
13340 rx_len
= ((desc
->idx_len
& RXD_LEN_MASK
) >> RXD_LEN_SHIFT
)
13343 if (!tso_loopback
) {
13344 if (rx_len
!= tx_len
)
13347 if (pktsz
<= TG3_RX_STD_DMA_SZ
- ETH_FCS_LEN
) {
13348 if (opaque_key
!= RXD_OPAQUE_RING_STD
)
13351 if (opaque_key
!= RXD_OPAQUE_RING_JUMBO
)
13354 } else if ((desc
->type_flags
& RXD_FLAG_TCPUDP_CSUM
) &&
13355 (desc
->ip_tcp_csum
& RXD_TCPCSUM_MASK
)
13356 >> RXD_TCPCSUM_SHIFT
!= 0xffff) {
13360 if (opaque_key
== RXD_OPAQUE_RING_STD
) {
13361 rx_data
= tpr
->rx_std_buffers
[desc_idx
].data
;
13362 map
= dma_unmap_addr(&tpr
->rx_std_buffers
[desc_idx
],
13364 } else if (opaque_key
== RXD_OPAQUE_RING_JUMBO
) {
13365 rx_data
= tpr
->rx_jmb_buffers
[desc_idx
].data
;
13366 map
= dma_unmap_addr(&tpr
->rx_jmb_buffers
[desc_idx
],
13371 pci_dma_sync_single_for_cpu(tp
->pdev
, map
, rx_len
,
13372 PCI_DMA_FROMDEVICE
);
13374 rx_data
+= TG3_RX_OFFSET(tp
);
13375 for (i
= data_off
; i
< rx_len
; i
++, val
++) {
13376 if (*(rx_data
+ i
) != (u8
) (val
& 0xff))
13383 /* tg3_free_rings will unmap and free the rx_data */
13388 #define TG3_STD_LOOPBACK_FAILED 1
13389 #define TG3_JMB_LOOPBACK_FAILED 2
13390 #define TG3_TSO_LOOPBACK_FAILED 4
13391 #define TG3_LOOPBACK_FAILED \
13392 (TG3_STD_LOOPBACK_FAILED | \
13393 TG3_JMB_LOOPBACK_FAILED | \
13394 TG3_TSO_LOOPBACK_FAILED)
13396 static int tg3_test_loopback(struct tg3
*tp
, u64
*data
, bool do_extlpbk
)
13400 u32 jmb_pkt_sz
= 9000;
13403 jmb_pkt_sz
= tp
->dma_limit
- ETH_HLEN
;
13405 eee_cap
= tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
;
13406 tp
->phy_flags
&= ~TG3_PHYFLG_EEE_CAP
;
13408 if (!netif_running(tp
->dev
)) {
13409 data
[TG3_MAC_LOOPB_TEST
] = TG3_LOOPBACK_FAILED
;
13410 data
[TG3_PHY_LOOPB_TEST
] = TG3_LOOPBACK_FAILED
;
13412 data
[TG3_EXT_LOOPB_TEST
] = TG3_LOOPBACK_FAILED
;
13416 err
= tg3_reset_hw(tp
, true);
13418 data
[TG3_MAC_LOOPB_TEST
] = TG3_LOOPBACK_FAILED
;
13419 data
[TG3_PHY_LOOPB_TEST
] = TG3_LOOPBACK_FAILED
;
13421 data
[TG3_EXT_LOOPB_TEST
] = TG3_LOOPBACK_FAILED
;
13425 if (tg3_flag(tp
, ENABLE_RSS
)) {
13428 /* Reroute all rx packets to the 1st queue */
13429 for (i
= MAC_RSS_INDIR_TBL_0
;
13430 i
< MAC_RSS_INDIR_TBL_0
+ TG3_RSS_INDIR_TBL_SIZE
; i
+= 4)
13434 /* HW errata - mac loopback fails in some cases on 5780.
13435 * Normal traffic and PHY loopback are not affected by
13436 * errata. Also, the MAC loopback test is deprecated for
13437 * all newer ASIC revisions.
13439 if (tg3_asic_rev(tp
) != ASIC_REV_5780
&&
13440 !tg3_flag(tp
, CPMU_PRESENT
)) {
13441 tg3_mac_loopback(tp
, true);
13443 if (tg3_run_loopback(tp
, ETH_FRAME_LEN
, false))
13444 data
[TG3_MAC_LOOPB_TEST
] |= TG3_STD_LOOPBACK_FAILED
;
13446 if (tg3_flag(tp
, JUMBO_RING_ENABLE
) &&
13447 tg3_run_loopback(tp
, jmb_pkt_sz
+ ETH_HLEN
, false))
13448 data
[TG3_MAC_LOOPB_TEST
] |= TG3_JMB_LOOPBACK_FAILED
;
13450 tg3_mac_loopback(tp
, false);
13453 if (!(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
13454 !tg3_flag(tp
, USE_PHYLIB
)) {
13457 tg3_phy_lpbk_set(tp
, 0, false);
13459 /* Wait for link */
13460 for (i
= 0; i
< 100; i
++) {
13461 if (tr32(MAC_TX_STATUS
) & TX_STATUS_LINK_UP
)
13466 if (tg3_run_loopback(tp
, ETH_FRAME_LEN
, false))
13467 data
[TG3_PHY_LOOPB_TEST
] |= TG3_STD_LOOPBACK_FAILED
;
13468 if (tg3_flag(tp
, TSO_CAPABLE
) &&
13469 tg3_run_loopback(tp
, ETH_FRAME_LEN
, true))
13470 data
[TG3_PHY_LOOPB_TEST
] |= TG3_TSO_LOOPBACK_FAILED
;
13471 if (tg3_flag(tp
, JUMBO_RING_ENABLE
) &&
13472 tg3_run_loopback(tp
, jmb_pkt_sz
+ ETH_HLEN
, false))
13473 data
[TG3_PHY_LOOPB_TEST
] |= TG3_JMB_LOOPBACK_FAILED
;
13476 tg3_phy_lpbk_set(tp
, 0, true);
13478 /* All link indications report up, but the hardware
13479 * isn't really ready for about 20 msec. Double it
13484 if (tg3_run_loopback(tp
, ETH_FRAME_LEN
, false))
13485 data
[TG3_EXT_LOOPB_TEST
] |=
13486 TG3_STD_LOOPBACK_FAILED
;
13487 if (tg3_flag(tp
, TSO_CAPABLE
) &&
13488 tg3_run_loopback(tp
, ETH_FRAME_LEN
, true))
13489 data
[TG3_EXT_LOOPB_TEST
] |=
13490 TG3_TSO_LOOPBACK_FAILED
;
13491 if (tg3_flag(tp
, JUMBO_RING_ENABLE
) &&
13492 tg3_run_loopback(tp
, jmb_pkt_sz
+ ETH_HLEN
, false))
13493 data
[TG3_EXT_LOOPB_TEST
] |=
13494 TG3_JMB_LOOPBACK_FAILED
;
13497 /* Re-enable gphy autopowerdown. */
13498 if (tp
->phy_flags
& TG3_PHYFLG_ENABLE_APD
)
13499 tg3_phy_toggle_apd(tp
, true);
13502 err
= (data
[TG3_MAC_LOOPB_TEST
] | data
[TG3_PHY_LOOPB_TEST
] |
13503 data
[TG3_EXT_LOOPB_TEST
]) ? -EIO
: 0;
13506 tp
->phy_flags
|= eee_cap
;
13511 static void tg3_self_test(struct net_device
*dev
, struct ethtool_test
*etest
,
13514 struct tg3
*tp
= netdev_priv(dev
);
13515 bool doextlpbk
= etest
->flags
& ETH_TEST_FL_EXTERNAL_LB
;
13517 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
) {
13518 if (tg3_power_up(tp
)) {
13519 etest
->flags
|= ETH_TEST_FL_FAILED
;
13520 memset(data
, 1, sizeof(u64
) * TG3_NUM_TEST
);
13523 tg3_ape_driver_state_change(tp
, RESET_KIND_INIT
);
13526 memset(data
, 0, sizeof(u64
) * TG3_NUM_TEST
);
13528 if (tg3_test_nvram(tp
) != 0) {
13529 etest
->flags
|= ETH_TEST_FL_FAILED
;
13530 data
[TG3_NVRAM_TEST
] = 1;
13532 if (!doextlpbk
&& tg3_test_link(tp
)) {
13533 etest
->flags
|= ETH_TEST_FL_FAILED
;
13534 data
[TG3_LINK_TEST
] = 1;
13536 if (etest
->flags
& ETH_TEST_FL_OFFLINE
) {
13537 int err
, err2
= 0, irq_sync
= 0;
13539 if (netif_running(dev
)) {
13541 tg3_netif_stop(tp
);
13545 tg3_full_lock(tp
, irq_sync
);
13546 tg3_halt(tp
, RESET_KIND_SUSPEND
, 1);
13547 err
= tg3_nvram_lock(tp
);
13548 tg3_halt_cpu(tp
, RX_CPU_BASE
);
13549 if (!tg3_flag(tp
, 5705_PLUS
))
13550 tg3_halt_cpu(tp
, TX_CPU_BASE
);
13552 tg3_nvram_unlock(tp
);
13554 if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)
13557 if (tg3_test_registers(tp
) != 0) {
13558 etest
->flags
|= ETH_TEST_FL_FAILED
;
13559 data
[TG3_REGISTER_TEST
] = 1;
13562 if (tg3_test_memory(tp
) != 0) {
13563 etest
->flags
|= ETH_TEST_FL_FAILED
;
13564 data
[TG3_MEMORY_TEST
] = 1;
13568 etest
->flags
|= ETH_TEST_FL_EXTERNAL_LB_DONE
;
13570 if (tg3_test_loopback(tp
, data
, doextlpbk
))
13571 etest
->flags
|= ETH_TEST_FL_FAILED
;
13573 tg3_full_unlock(tp
);
13575 if (tg3_test_interrupt(tp
) != 0) {
13576 etest
->flags
|= ETH_TEST_FL_FAILED
;
13577 data
[TG3_INTERRUPT_TEST
] = 1;
13580 tg3_full_lock(tp
, 0);
13582 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
13583 if (netif_running(dev
)) {
13584 tg3_flag_set(tp
, INIT_COMPLETE
);
13585 err2
= tg3_restart_hw(tp
, true);
13587 tg3_netif_start(tp
);
13590 tg3_full_unlock(tp
);
13592 if (irq_sync
&& !err2
)
13595 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)
13596 tg3_power_down_prepare(tp
);
13600 static int tg3_hwtstamp_set(struct net_device
*dev
, struct ifreq
*ifr
)
13602 struct tg3
*tp
= netdev_priv(dev
);
13603 struct hwtstamp_config stmpconf
;
13605 if (!tg3_flag(tp
, PTP_CAPABLE
))
13606 return -EOPNOTSUPP
;
13608 if (copy_from_user(&stmpconf
, ifr
->ifr_data
, sizeof(stmpconf
)))
13611 if (stmpconf
.flags
)
13614 if (stmpconf
.tx_type
!= HWTSTAMP_TX_ON
&&
13615 stmpconf
.tx_type
!= HWTSTAMP_TX_OFF
)
13618 switch (stmpconf
.rx_filter
) {
13619 case HWTSTAMP_FILTER_NONE
:
13622 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT
:
13623 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V1_EN
|
13624 TG3_RX_PTP_CTL_ALL_V1_EVENTS
;
13626 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC
:
13627 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V1_EN
|
13628 TG3_RX_PTP_CTL_SYNC_EVNT
;
13630 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ
:
13631 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V1_EN
|
13632 TG3_RX_PTP_CTL_DELAY_REQ
;
13634 case HWTSTAMP_FILTER_PTP_V2_EVENT
:
13635 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_EN
|
13636 TG3_RX_PTP_CTL_ALL_V2_EVENTS
;
13638 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT
:
13639 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN
|
13640 TG3_RX_PTP_CTL_ALL_V2_EVENTS
;
13642 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT
:
13643 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN
|
13644 TG3_RX_PTP_CTL_ALL_V2_EVENTS
;
13646 case HWTSTAMP_FILTER_PTP_V2_SYNC
:
13647 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_EN
|
13648 TG3_RX_PTP_CTL_SYNC_EVNT
;
13650 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC
:
13651 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN
|
13652 TG3_RX_PTP_CTL_SYNC_EVNT
;
13654 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC
:
13655 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN
|
13656 TG3_RX_PTP_CTL_SYNC_EVNT
;
13658 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ
:
13659 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_EN
|
13660 TG3_RX_PTP_CTL_DELAY_REQ
;
13662 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ
:
13663 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN
|
13664 TG3_RX_PTP_CTL_DELAY_REQ
;
13666 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ
:
13667 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN
|
13668 TG3_RX_PTP_CTL_DELAY_REQ
;
13674 if (netif_running(dev
) && tp
->rxptpctl
)
13675 tw32(TG3_RX_PTP_CTL
,
13676 tp
->rxptpctl
| TG3_RX_PTP_CTL_HWTS_INTERLOCK
);
13678 if (stmpconf
.tx_type
== HWTSTAMP_TX_ON
)
13679 tg3_flag_set(tp
, TX_TSTAMP_EN
);
13681 tg3_flag_clear(tp
, TX_TSTAMP_EN
);
13683 return copy_to_user(ifr
->ifr_data
, &stmpconf
, sizeof(stmpconf
)) ?
13687 static int tg3_hwtstamp_get(struct net_device
*dev
, struct ifreq
*ifr
)
13689 struct tg3
*tp
= netdev_priv(dev
);
13690 struct hwtstamp_config stmpconf
;
13692 if (!tg3_flag(tp
, PTP_CAPABLE
))
13693 return -EOPNOTSUPP
;
13695 stmpconf
.flags
= 0;
13696 stmpconf
.tx_type
= (tg3_flag(tp
, TX_TSTAMP_EN
) ?
13697 HWTSTAMP_TX_ON
: HWTSTAMP_TX_OFF
);
13699 switch (tp
->rxptpctl
) {
13701 stmpconf
.rx_filter
= HWTSTAMP_FILTER_NONE
;
13703 case TG3_RX_PTP_CTL_RX_PTP_V1_EN
| TG3_RX_PTP_CTL_ALL_V1_EVENTS
:
13704 stmpconf
.rx_filter
= HWTSTAMP_FILTER_PTP_V1_L4_EVENT
;
13706 case TG3_RX_PTP_CTL_RX_PTP_V1_EN
| TG3_RX_PTP_CTL_SYNC_EVNT
:
13707 stmpconf
.rx_filter
= HWTSTAMP_FILTER_PTP_V1_L4_SYNC
;
13709 case TG3_RX_PTP_CTL_RX_PTP_V1_EN
| TG3_RX_PTP_CTL_DELAY_REQ
:
13710 stmpconf
.rx_filter
= HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ
;
13712 case TG3_RX_PTP_CTL_RX_PTP_V2_EN
| TG3_RX_PTP_CTL_ALL_V2_EVENTS
:
13713 stmpconf
.rx_filter
= HWTSTAMP_FILTER_PTP_V2_EVENT
;
13715 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN
| TG3_RX_PTP_CTL_ALL_V2_EVENTS
:
13716 stmpconf
.rx_filter
= HWTSTAMP_FILTER_PTP_V2_L2_EVENT
;
13718 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN
| TG3_RX_PTP_CTL_ALL_V2_EVENTS
:
13719 stmpconf
.rx_filter
= HWTSTAMP_FILTER_PTP_V2_L4_EVENT
;
13721 case TG3_RX_PTP_CTL_RX_PTP_V2_EN
| TG3_RX_PTP_CTL_SYNC_EVNT
:
13722 stmpconf
.rx_filter
= HWTSTAMP_FILTER_PTP_V2_SYNC
;
13724 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN
| TG3_RX_PTP_CTL_SYNC_EVNT
:
13725 stmpconf
.rx_filter
= HWTSTAMP_FILTER_PTP_V2_L2_SYNC
;
13727 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN
| TG3_RX_PTP_CTL_SYNC_EVNT
:
13728 stmpconf
.rx_filter
= HWTSTAMP_FILTER_PTP_V2_L4_SYNC
;
13730 case TG3_RX_PTP_CTL_RX_PTP_V2_EN
| TG3_RX_PTP_CTL_DELAY_REQ
:
13731 stmpconf
.rx_filter
= HWTSTAMP_FILTER_PTP_V2_DELAY_REQ
;
13733 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN
| TG3_RX_PTP_CTL_DELAY_REQ
:
13734 stmpconf
.rx_filter
= HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ
;
13736 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN
| TG3_RX_PTP_CTL_DELAY_REQ
:
13737 stmpconf
.rx_filter
= HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ
;
13744 return copy_to_user(ifr
->ifr_data
, &stmpconf
, sizeof(stmpconf
)) ?
13748 static int tg3_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
13750 struct mii_ioctl_data
*data
= if_mii(ifr
);
13751 struct tg3
*tp
= netdev_priv(dev
);
13754 if (tg3_flag(tp
, USE_PHYLIB
)) {
13755 struct phy_device
*phydev
;
13756 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
13758 phydev
= tp
->mdio_bus
->phy_map
[tp
->phy_addr
];
13759 return phy_mii_ioctl(phydev
, ifr
, cmd
);
13764 data
->phy_id
= tp
->phy_addr
;
13767 case SIOCGMIIREG
: {
13770 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
13771 break; /* We have no PHY */
13773 if (!netif_running(dev
))
13776 spin_lock_bh(&tp
->lock
);
13777 err
= __tg3_readphy(tp
, data
->phy_id
& 0x1f,
13778 data
->reg_num
& 0x1f, &mii_regval
);
13779 spin_unlock_bh(&tp
->lock
);
13781 data
->val_out
= mii_regval
;
13787 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
13788 break; /* We have no PHY */
13790 if (!netif_running(dev
))
13793 spin_lock_bh(&tp
->lock
);
13794 err
= __tg3_writephy(tp
, data
->phy_id
& 0x1f,
13795 data
->reg_num
& 0x1f, data
->val_in
);
13796 spin_unlock_bh(&tp
->lock
);
13800 case SIOCSHWTSTAMP
:
13801 return tg3_hwtstamp_set(dev
, ifr
);
13803 case SIOCGHWTSTAMP
:
13804 return tg3_hwtstamp_get(dev
, ifr
);
13810 return -EOPNOTSUPP
;
13813 static int tg3_get_coalesce(struct net_device
*dev
, struct ethtool_coalesce
*ec
)
13815 struct tg3
*tp
= netdev_priv(dev
);
13817 memcpy(ec
, &tp
->coal
, sizeof(*ec
));
13821 static int tg3_set_coalesce(struct net_device
*dev
, struct ethtool_coalesce
*ec
)
13823 struct tg3
*tp
= netdev_priv(dev
);
13824 u32 max_rxcoal_tick_int
= 0, max_txcoal_tick_int
= 0;
13825 u32 max_stat_coal_ticks
= 0, min_stat_coal_ticks
= 0;
13827 if (!tg3_flag(tp
, 5705_PLUS
)) {
13828 max_rxcoal_tick_int
= MAX_RXCOAL_TICK_INT
;
13829 max_txcoal_tick_int
= MAX_TXCOAL_TICK_INT
;
13830 max_stat_coal_ticks
= MAX_STAT_COAL_TICKS
;
13831 min_stat_coal_ticks
= MIN_STAT_COAL_TICKS
;
13834 if ((ec
->rx_coalesce_usecs
> MAX_RXCOL_TICKS
) ||
13835 (ec
->tx_coalesce_usecs
> MAX_TXCOL_TICKS
) ||
13836 (ec
->rx_max_coalesced_frames
> MAX_RXMAX_FRAMES
) ||
13837 (ec
->tx_max_coalesced_frames
> MAX_TXMAX_FRAMES
) ||
13838 (ec
->rx_coalesce_usecs_irq
> max_rxcoal_tick_int
) ||
13839 (ec
->tx_coalesce_usecs_irq
> max_txcoal_tick_int
) ||
13840 (ec
->rx_max_coalesced_frames_irq
> MAX_RXCOAL_MAXF_INT
) ||
13841 (ec
->tx_max_coalesced_frames_irq
> MAX_TXCOAL_MAXF_INT
) ||
13842 (ec
->stats_block_coalesce_usecs
> max_stat_coal_ticks
) ||
13843 (ec
->stats_block_coalesce_usecs
< min_stat_coal_ticks
))
13846 /* No rx interrupts will be generated if both are zero */
13847 if ((ec
->rx_coalesce_usecs
== 0) &&
13848 (ec
->rx_max_coalesced_frames
== 0))
13851 /* No tx interrupts will be generated if both are zero */
13852 if ((ec
->tx_coalesce_usecs
== 0) &&
13853 (ec
->tx_max_coalesced_frames
== 0))
13856 /* Only copy relevant parameters, ignore all others. */
13857 tp
->coal
.rx_coalesce_usecs
= ec
->rx_coalesce_usecs
;
13858 tp
->coal
.tx_coalesce_usecs
= ec
->tx_coalesce_usecs
;
13859 tp
->coal
.rx_max_coalesced_frames
= ec
->rx_max_coalesced_frames
;
13860 tp
->coal
.tx_max_coalesced_frames
= ec
->tx_max_coalesced_frames
;
13861 tp
->coal
.rx_coalesce_usecs_irq
= ec
->rx_coalesce_usecs_irq
;
13862 tp
->coal
.tx_coalesce_usecs_irq
= ec
->tx_coalesce_usecs_irq
;
13863 tp
->coal
.rx_max_coalesced_frames_irq
= ec
->rx_max_coalesced_frames_irq
;
13864 tp
->coal
.tx_max_coalesced_frames_irq
= ec
->tx_max_coalesced_frames_irq
;
13865 tp
->coal
.stats_block_coalesce_usecs
= ec
->stats_block_coalesce_usecs
;
13867 if (netif_running(dev
)) {
13868 tg3_full_lock(tp
, 0);
13869 __tg3_set_coalesce(tp
, &tp
->coal
);
13870 tg3_full_unlock(tp
);
13875 static int tg3_set_eee(struct net_device
*dev
, struct ethtool_eee
*edata
)
13877 struct tg3
*tp
= netdev_priv(dev
);
13879 if (!(tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
)) {
13880 netdev_warn(tp
->dev
, "Board does not support EEE!\n");
13881 return -EOPNOTSUPP
;
13884 if (edata
->advertised
!= tp
->eee
.advertised
) {
13885 netdev_warn(tp
->dev
,
13886 "Direct manipulation of EEE advertisement is not supported\n");
13890 if (edata
->tx_lpi_timer
> TG3_CPMU_DBTMR1_LNKIDLE_MAX
) {
13891 netdev_warn(tp
->dev
,
13892 "Maximal Tx Lpi timer supported is %#x(u)\n",
13893 TG3_CPMU_DBTMR1_LNKIDLE_MAX
);
13899 tp
->phy_flags
|= TG3_PHYFLG_USER_CONFIGURED
;
13900 tg3_warn_mgmt_link_flap(tp
);
13902 if (netif_running(tp
->dev
)) {
13903 tg3_full_lock(tp
, 0);
13906 tg3_full_unlock(tp
);
13912 static int tg3_get_eee(struct net_device
*dev
, struct ethtool_eee
*edata
)
13914 struct tg3
*tp
= netdev_priv(dev
);
13916 if (!(tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
)) {
13917 netdev_warn(tp
->dev
,
13918 "Board does not support EEE!\n");
13919 return -EOPNOTSUPP
;
13926 static const struct ethtool_ops tg3_ethtool_ops
= {
13927 .get_settings
= tg3_get_settings
,
13928 .set_settings
= tg3_set_settings
,
13929 .get_drvinfo
= tg3_get_drvinfo
,
13930 .get_regs_len
= tg3_get_regs_len
,
13931 .get_regs
= tg3_get_regs
,
13932 .get_wol
= tg3_get_wol
,
13933 .set_wol
= tg3_set_wol
,
13934 .get_msglevel
= tg3_get_msglevel
,
13935 .set_msglevel
= tg3_set_msglevel
,
13936 .nway_reset
= tg3_nway_reset
,
13937 .get_link
= ethtool_op_get_link
,
13938 .get_eeprom_len
= tg3_get_eeprom_len
,
13939 .get_eeprom
= tg3_get_eeprom
,
13940 .set_eeprom
= tg3_set_eeprom
,
13941 .get_ringparam
= tg3_get_ringparam
,
13942 .set_ringparam
= tg3_set_ringparam
,
13943 .get_pauseparam
= tg3_get_pauseparam
,
13944 .set_pauseparam
= tg3_set_pauseparam
,
13945 .self_test
= tg3_self_test
,
13946 .get_strings
= tg3_get_strings
,
13947 .set_phys_id
= tg3_set_phys_id
,
13948 .get_ethtool_stats
= tg3_get_ethtool_stats
,
13949 .get_coalesce
= tg3_get_coalesce
,
13950 .set_coalesce
= tg3_set_coalesce
,
13951 .get_sset_count
= tg3_get_sset_count
,
13952 .get_rxnfc
= tg3_get_rxnfc
,
13953 .get_rxfh_indir_size
= tg3_get_rxfh_indir_size
,
13954 .get_rxfh_indir
= tg3_get_rxfh_indir
,
13955 .set_rxfh_indir
= tg3_set_rxfh_indir
,
13956 .get_channels
= tg3_get_channels
,
13957 .set_channels
= tg3_set_channels
,
13958 .get_ts_info
= tg3_get_ts_info
,
13959 .get_eee
= tg3_get_eee
,
13960 .set_eee
= tg3_set_eee
,
13963 static struct rtnl_link_stats64
*tg3_get_stats64(struct net_device
*dev
,
13964 struct rtnl_link_stats64
*stats
)
13966 struct tg3
*tp
= netdev_priv(dev
);
13968 spin_lock_bh(&tp
->lock
);
13969 if (!tp
->hw_stats
) {
13970 spin_unlock_bh(&tp
->lock
);
13971 return &tp
->net_stats_prev
;
13974 tg3_get_nstats(tp
, stats
);
13975 spin_unlock_bh(&tp
->lock
);
13980 static void tg3_set_rx_mode(struct net_device
*dev
)
13982 struct tg3
*tp
= netdev_priv(dev
);
13984 if (!netif_running(dev
))
13987 tg3_full_lock(tp
, 0);
13988 __tg3_set_rx_mode(dev
);
13989 tg3_full_unlock(tp
);
13992 static inline void tg3_set_mtu(struct net_device
*dev
, struct tg3
*tp
,
13995 dev
->mtu
= new_mtu
;
13997 if (new_mtu
> ETH_DATA_LEN
) {
13998 if (tg3_flag(tp
, 5780_CLASS
)) {
13999 netdev_update_features(dev
);
14000 tg3_flag_clear(tp
, TSO_CAPABLE
);
14002 tg3_flag_set(tp
, JUMBO_RING_ENABLE
);
14005 if (tg3_flag(tp
, 5780_CLASS
)) {
14006 tg3_flag_set(tp
, TSO_CAPABLE
);
14007 netdev_update_features(dev
);
14009 tg3_flag_clear(tp
, JUMBO_RING_ENABLE
);
14013 static int tg3_change_mtu(struct net_device
*dev
, int new_mtu
)
14015 struct tg3
*tp
= netdev_priv(dev
);
14017 bool reset_phy
= false;
14019 if (new_mtu
< TG3_MIN_MTU
|| new_mtu
> TG3_MAX_MTU(tp
))
14022 if (!netif_running(dev
)) {
14023 /* We'll just catch it later when the
14026 tg3_set_mtu(dev
, tp
, new_mtu
);
14032 tg3_netif_stop(tp
);
14034 tg3_full_lock(tp
, 1);
14036 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
14038 tg3_set_mtu(dev
, tp
, new_mtu
);
14040 /* Reset PHY, otherwise the read DMA engine will be in a mode that
14041 * breaks all requests to 256 bytes.
14043 if (tg3_asic_rev(tp
) == ASIC_REV_57766
)
14046 err
= tg3_restart_hw(tp
, reset_phy
);
14049 tg3_netif_start(tp
);
14051 tg3_full_unlock(tp
);
14059 static const struct net_device_ops tg3_netdev_ops
= {
14060 .ndo_open
= tg3_open
,
14061 .ndo_stop
= tg3_close
,
14062 .ndo_start_xmit
= tg3_start_xmit
,
14063 .ndo_get_stats64
= tg3_get_stats64
,
14064 .ndo_validate_addr
= eth_validate_addr
,
14065 .ndo_set_rx_mode
= tg3_set_rx_mode
,
14066 .ndo_set_mac_address
= tg3_set_mac_addr
,
14067 .ndo_do_ioctl
= tg3_ioctl
,
14068 .ndo_tx_timeout
= tg3_tx_timeout
,
14069 .ndo_change_mtu
= tg3_change_mtu
,
14070 .ndo_fix_features
= tg3_fix_features
,
14071 .ndo_set_features
= tg3_set_features
,
14072 #ifdef CONFIG_NET_POLL_CONTROLLER
14073 .ndo_poll_controller
= tg3_poll_controller
,
14077 static void tg3_get_eeprom_size(struct tg3
*tp
)
14079 u32 cursize
, val
, magic
;
14081 tp
->nvram_size
= EEPROM_CHIP_SIZE
;
14083 if (tg3_nvram_read(tp
, 0, &magic
) != 0)
14086 if ((magic
!= TG3_EEPROM_MAGIC
) &&
14087 ((magic
& TG3_EEPROM_MAGIC_FW_MSK
) != TG3_EEPROM_MAGIC_FW
) &&
14088 ((magic
& TG3_EEPROM_MAGIC_HW_MSK
) != TG3_EEPROM_MAGIC_HW
))
14092 * Size the chip by reading offsets at increasing powers of two.
14093 * When we encounter our validation signature, we know the addressing
14094 * has wrapped around, and thus have our chip size.
14098 while (cursize
< tp
->nvram_size
) {
14099 if (tg3_nvram_read(tp
, cursize
, &val
) != 0)
14108 tp
->nvram_size
= cursize
;
14111 static void tg3_get_nvram_size(struct tg3
*tp
)
14115 if (tg3_flag(tp
, NO_NVRAM
) || tg3_nvram_read(tp
, 0, &val
) != 0)
14118 /* Selfboot format */
14119 if (val
!= TG3_EEPROM_MAGIC
) {
14120 tg3_get_eeprom_size(tp
);
14124 if (tg3_nvram_read(tp
, 0xf0, &val
) == 0) {
14126 /* This is confusing. We want to operate on the
14127 * 16-bit value at offset 0xf2. The tg3_nvram_read()
14128 * call will read from NVRAM and byteswap the data
14129 * according to the byteswapping settings for all
14130 * other register accesses. This ensures the data we
14131 * want will always reside in the lower 16-bits.
14132 * However, the data in NVRAM is in LE format, which
14133 * means the data from the NVRAM read will always be
14134 * opposite the endianness of the CPU. The 16-bit
14135 * byteswap then brings the data to CPU endianness.
14137 tp
->nvram_size
= swab16((u16
)(val
& 0x0000ffff)) * 1024;
14141 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
14144 static void tg3_get_nvram_info(struct tg3
*tp
)
14148 nvcfg1
= tr32(NVRAM_CFG1
);
14149 if (nvcfg1
& NVRAM_CFG1_FLASHIF_ENAB
) {
14150 tg3_flag_set(tp
, FLASH
);
14152 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
14153 tw32(NVRAM_CFG1
, nvcfg1
);
14156 if (tg3_asic_rev(tp
) == ASIC_REV_5750
||
14157 tg3_flag(tp
, 5780_CLASS
)) {
14158 switch (nvcfg1
& NVRAM_CFG1_VENDOR_MASK
) {
14159 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED
:
14160 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14161 tp
->nvram_pagesize
= ATMEL_AT45DB0X1B_PAGE_SIZE
;
14162 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14164 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED
:
14165 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14166 tp
->nvram_pagesize
= ATMEL_AT25F512_PAGE_SIZE
;
14168 case FLASH_VENDOR_ATMEL_EEPROM
:
14169 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14170 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
14171 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14173 case FLASH_VENDOR_ST
:
14174 tp
->nvram_jedecnum
= JEDEC_ST
;
14175 tp
->nvram_pagesize
= ST_M45PEX0_PAGE_SIZE
;
14176 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14178 case FLASH_VENDOR_SAIFUN
:
14179 tp
->nvram_jedecnum
= JEDEC_SAIFUN
;
14180 tp
->nvram_pagesize
= SAIFUN_SA25F0XX_PAGE_SIZE
;
14182 case FLASH_VENDOR_SST_SMALL
:
14183 case FLASH_VENDOR_SST_LARGE
:
14184 tp
->nvram_jedecnum
= JEDEC_SST
;
14185 tp
->nvram_pagesize
= SST_25VF0X0_PAGE_SIZE
;
14189 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14190 tp
->nvram_pagesize
= ATMEL_AT45DB0X1B_PAGE_SIZE
;
14191 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14195 static void tg3_nvram_get_pagesize(struct tg3
*tp
, u32 nvmcfg1
)
14197 switch (nvmcfg1
& NVRAM_CFG1_5752PAGE_SIZE_MASK
) {
14198 case FLASH_5752PAGE_SIZE_256
:
14199 tp
->nvram_pagesize
= 256;
14201 case FLASH_5752PAGE_SIZE_512
:
14202 tp
->nvram_pagesize
= 512;
14204 case FLASH_5752PAGE_SIZE_1K
:
14205 tp
->nvram_pagesize
= 1024;
14207 case FLASH_5752PAGE_SIZE_2K
:
14208 tp
->nvram_pagesize
= 2048;
14210 case FLASH_5752PAGE_SIZE_4K
:
14211 tp
->nvram_pagesize
= 4096;
14213 case FLASH_5752PAGE_SIZE_264
:
14214 tp
->nvram_pagesize
= 264;
14216 case FLASH_5752PAGE_SIZE_528
:
14217 tp
->nvram_pagesize
= 528;
14222 static void tg3_get_5752_nvram_info(struct tg3
*tp
)
14226 nvcfg1
= tr32(NVRAM_CFG1
);
14228 /* NVRAM protection for TPM */
14229 if (nvcfg1
& (1 << 27))
14230 tg3_flag_set(tp
, PROTECTED_NVRAM
);
14232 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
14233 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ
:
14234 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ
:
14235 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14236 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14238 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
14239 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14240 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14241 tg3_flag_set(tp
, FLASH
);
14243 case FLASH_5752VENDOR_ST_M45PE10
:
14244 case FLASH_5752VENDOR_ST_M45PE20
:
14245 case FLASH_5752VENDOR_ST_M45PE40
:
14246 tp
->nvram_jedecnum
= JEDEC_ST
;
14247 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14248 tg3_flag_set(tp
, FLASH
);
14252 if (tg3_flag(tp
, FLASH
)) {
14253 tg3_nvram_get_pagesize(tp
, nvcfg1
);
14255 /* For eeprom, set pagesize to maximum eeprom size */
14256 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
14258 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
14259 tw32(NVRAM_CFG1
, nvcfg1
);
14263 static void tg3_get_5755_nvram_info(struct tg3
*tp
)
14265 u32 nvcfg1
, protect
= 0;
14267 nvcfg1
= tr32(NVRAM_CFG1
);
14269 /* NVRAM protection for TPM */
14270 if (nvcfg1
& (1 << 27)) {
14271 tg3_flag_set(tp
, PROTECTED_NVRAM
);
14275 nvcfg1
&= NVRAM_CFG1_5752VENDOR_MASK
;
14277 case FLASH_5755VENDOR_ATMEL_FLASH_1
:
14278 case FLASH_5755VENDOR_ATMEL_FLASH_2
:
14279 case FLASH_5755VENDOR_ATMEL_FLASH_3
:
14280 case FLASH_5755VENDOR_ATMEL_FLASH_5
:
14281 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14282 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14283 tg3_flag_set(tp
, FLASH
);
14284 tp
->nvram_pagesize
= 264;
14285 if (nvcfg1
== FLASH_5755VENDOR_ATMEL_FLASH_1
||
14286 nvcfg1
== FLASH_5755VENDOR_ATMEL_FLASH_5
)
14287 tp
->nvram_size
= (protect
? 0x3e200 :
14288 TG3_NVRAM_SIZE_512KB
);
14289 else if (nvcfg1
== FLASH_5755VENDOR_ATMEL_FLASH_2
)
14290 tp
->nvram_size
= (protect
? 0x1f200 :
14291 TG3_NVRAM_SIZE_256KB
);
14293 tp
->nvram_size
= (protect
? 0x1f200 :
14294 TG3_NVRAM_SIZE_128KB
);
14296 case FLASH_5752VENDOR_ST_M45PE10
:
14297 case FLASH_5752VENDOR_ST_M45PE20
:
14298 case FLASH_5752VENDOR_ST_M45PE40
:
14299 tp
->nvram_jedecnum
= JEDEC_ST
;
14300 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14301 tg3_flag_set(tp
, FLASH
);
14302 tp
->nvram_pagesize
= 256;
14303 if (nvcfg1
== FLASH_5752VENDOR_ST_M45PE10
)
14304 tp
->nvram_size
= (protect
?
14305 TG3_NVRAM_SIZE_64KB
:
14306 TG3_NVRAM_SIZE_128KB
);
14307 else if (nvcfg1
== FLASH_5752VENDOR_ST_M45PE20
)
14308 tp
->nvram_size
= (protect
?
14309 TG3_NVRAM_SIZE_64KB
:
14310 TG3_NVRAM_SIZE_256KB
);
14312 tp
->nvram_size
= (protect
?
14313 TG3_NVRAM_SIZE_128KB
:
14314 TG3_NVRAM_SIZE_512KB
);
14319 static void tg3_get_5787_nvram_info(struct tg3
*tp
)
14323 nvcfg1
= tr32(NVRAM_CFG1
);
14325 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
14326 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ
:
14327 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ
:
14328 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ
:
14329 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ
:
14330 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14331 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14332 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
14334 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
14335 tw32(NVRAM_CFG1
, nvcfg1
);
14337 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
14338 case FLASH_5755VENDOR_ATMEL_FLASH_1
:
14339 case FLASH_5755VENDOR_ATMEL_FLASH_2
:
14340 case FLASH_5755VENDOR_ATMEL_FLASH_3
:
14341 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14342 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14343 tg3_flag_set(tp
, FLASH
);
14344 tp
->nvram_pagesize
= 264;
14346 case FLASH_5752VENDOR_ST_M45PE10
:
14347 case FLASH_5752VENDOR_ST_M45PE20
:
14348 case FLASH_5752VENDOR_ST_M45PE40
:
14349 tp
->nvram_jedecnum
= JEDEC_ST
;
14350 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14351 tg3_flag_set(tp
, FLASH
);
14352 tp
->nvram_pagesize
= 256;
14357 static void tg3_get_5761_nvram_info(struct tg3
*tp
)
14359 u32 nvcfg1
, protect
= 0;
14361 nvcfg1
= tr32(NVRAM_CFG1
);
14363 /* NVRAM protection for TPM */
14364 if (nvcfg1
& (1 << 27)) {
14365 tg3_flag_set(tp
, PROTECTED_NVRAM
);
14369 nvcfg1
&= NVRAM_CFG1_5752VENDOR_MASK
;
14371 case FLASH_5761VENDOR_ATMEL_ADB021D
:
14372 case FLASH_5761VENDOR_ATMEL_ADB041D
:
14373 case FLASH_5761VENDOR_ATMEL_ADB081D
:
14374 case FLASH_5761VENDOR_ATMEL_ADB161D
:
14375 case FLASH_5761VENDOR_ATMEL_MDB021D
:
14376 case FLASH_5761VENDOR_ATMEL_MDB041D
:
14377 case FLASH_5761VENDOR_ATMEL_MDB081D
:
14378 case FLASH_5761VENDOR_ATMEL_MDB161D
:
14379 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14380 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14381 tg3_flag_set(tp
, FLASH
);
14382 tg3_flag_set(tp
, NO_NVRAM_ADDR_TRANS
);
14383 tp
->nvram_pagesize
= 256;
14385 case FLASH_5761VENDOR_ST_A_M45PE20
:
14386 case FLASH_5761VENDOR_ST_A_M45PE40
:
14387 case FLASH_5761VENDOR_ST_A_M45PE80
:
14388 case FLASH_5761VENDOR_ST_A_M45PE16
:
14389 case FLASH_5761VENDOR_ST_M_M45PE20
:
14390 case FLASH_5761VENDOR_ST_M_M45PE40
:
14391 case FLASH_5761VENDOR_ST_M_M45PE80
:
14392 case FLASH_5761VENDOR_ST_M_M45PE16
:
14393 tp
->nvram_jedecnum
= JEDEC_ST
;
14394 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14395 tg3_flag_set(tp
, FLASH
);
14396 tp
->nvram_pagesize
= 256;
14401 tp
->nvram_size
= tr32(NVRAM_ADDR_LOCKOUT
);
14404 case FLASH_5761VENDOR_ATMEL_ADB161D
:
14405 case FLASH_5761VENDOR_ATMEL_MDB161D
:
14406 case FLASH_5761VENDOR_ST_A_M45PE16
:
14407 case FLASH_5761VENDOR_ST_M_M45PE16
:
14408 tp
->nvram_size
= TG3_NVRAM_SIZE_2MB
;
14410 case FLASH_5761VENDOR_ATMEL_ADB081D
:
14411 case FLASH_5761VENDOR_ATMEL_MDB081D
:
14412 case FLASH_5761VENDOR_ST_A_M45PE80
:
14413 case FLASH_5761VENDOR_ST_M_M45PE80
:
14414 tp
->nvram_size
= TG3_NVRAM_SIZE_1MB
;
14416 case FLASH_5761VENDOR_ATMEL_ADB041D
:
14417 case FLASH_5761VENDOR_ATMEL_MDB041D
:
14418 case FLASH_5761VENDOR_ST_A_M45PE40
:
14419 case FLASH_5761VENDOR_ST_M_M45PE40
:
14420 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
14422 case FLASH_5761VENDOR_ATMEL_ADB021D
:
14423 case FLASH_5761VENDOR_ATMEL_MDB021D
:
14424 case FLASH_5761VENDOR_ST_A_M45PE20
:
14425 case FLASH_5761VENDOR_ST_M_M45PE20
:
14426 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
14432 static void tg3_get_5906_nvram_info(struct tg3
*tp
)
14434 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14435 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14436 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
14439 static void tg3_get_57780_nvram_info(struct tg3
*tp
)
14443 nvcfg1
= tr32(NVRAM_CFG1
);
14445 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
14446 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ
:
14447 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ
:
14448 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14449 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14450 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
14452 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
14453 tw32(NVRAM_CFG1
, nvcfg1
);
14455 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
14456 case FLASH_57780VENDOR_ATMEL_AT45DB011D
:
14457 case FLASH_57780VENDOR_ATMEL_AT45DB011B
:
14458 case FLASH_57780VENDOR_ATMEL_AT45DB021D
:
14459 case FLASH_57780VENDOR_ATMEL_AT45DB021B
:
14460 case FLASH_57780VENDOR_ATMEL_AT45DB041D
:
14461 case FLASH_57780VENDOR_ATMEL_AT45DB041B
:
14462 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14463 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14464 tg3_flag_set(tp
, FLASH
);
14466 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
14467 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
14468 case FLASH_57780VENDOR_ATMEL_AT45DB011D
:
14469 case FLASH_57780VENDOR_ATMEL_AT45DB011B
:
14470 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
14472 case FLASH_57780VENDOR_ATMEL_AT45DB021D
:
14473 case FLASH_57780VENDOR_ATMEL_AT45DB021B
:
14474 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
14476 case FLASH_57780VENDOR_ATMEL_AT45DB041D
:
14477 case FLASH_57780VENDOR_ATMEL_AT45DB041B
:
14478 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
14482 case FLASH_5752VENDOR_ST_M45PE10
:
14483 case FLASH_5752VENDOR_ST_M45PE20
:
14484 case FLASH_5752VENDOR_ST_M45PE40
:
14485 tp
->nvram_jedecnum
= JEDEC_ST
;
14486 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14487 tg3_flag_set(tp
, FLASH
);
14489 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
14490 case FLASH_5752VENDOR_ST_M45PE10
:
14491 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
14493 case FLASH_5752VENDOR_ST_M45PE20
:
14494 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
14496 case FLASH_5752VENDOR_ST_M45PE40
:
14497 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
14502 tg3_flag_set(tp
, NO_NVRAM
);
14506 tg3_nvram_get_pagesize(tp
, nvcfg1
);
14507 if (tp
->nvram_pagesize
!= 264 && tp
->nvram_pagesize
!= 528)
14508 tg3_flag_set(tp
, NO_NVRAM_ADDR_TRANS
);
14512 static void tg3_get_5717_nvram_info(struct tg3
*tp
)
14516 nvcfg1
= tr32(NVRAM_CFG1
);
14518 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
14519 case FLASH_5717VENDOR_ATMEL_EEPROM
:
14520 case FLASH_5717VENDOR_MICRO_EEPROM
:
14521 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14522 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14523 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
14525 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
14526 tw32(NVRAM_CFG1
, nvcfg1
);
14528 case FLASH_5717VENDOR_ATMEL_MDB011D
:
14529 case FLASH_5717VENDOR_ATMEL_ADB011B
:
14530 case FLASH_5717VENDOR_ATMEL_ADB011D
:
14531 case FLASH_5717VENDOR_ATMEL_MDB021D
:
14532 case FLASH_5717VENDOR_ATMEL_ADB021B
:
14533 case FLASH_5717VENDOR_ATMEL_ADB021D
:
14534 case FLASH_5717VENDOR_ATMEL_45USPT
:
14535 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14536 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14537 tg3_flag_set(tp
, FLASH
);
14539 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
14540 case FLASH_5717VENDOR_ATMEL_MDB021D
:
14541 /* Detect size with tg3_nvram_get_size() */
14543 case FLASH_5717VENDOR_ATMEL_ADB021B
:
14544 case FLASH_5717VENDOR_ATMEL_ADB021D
:
14545 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
14548 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
14552 case FLASH_5717VENDOR_ST_M_M25PE10
:
14553 case FLASH_5717VENDOR_ST_A_M25PE10
:
14554 case FLASH_5717VENDOR_ST_M_M45PE10
:
14555 case FLASH_5717VENDOR_ST_A_M45PE10
:
14556 case FLASH_5717VENDOR_ST_M_M25PE20
:
14557 case FLASH_5717VENDOR_ST_A_M25PE20
:
14558 case FLASH_5717VENDOR_ST_M_M45PE20
:
14559 case FLASH_5717VENDOR_ST_A_M45PE20
:
14560 case FLASH_5717VENDOR_ST_25USPT
:
14561 case FLASH_5717VENDOR_ST_45USPT
:
14562 tp
->nvram_jedecnum
= JEDEC_ST
;
14563 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14564 tg3_flag_set(tp
, FLASH
);
14566 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
14567 case FLASH_5717VENDOR_ST_M_M25PE20
:
14568 case FLASH_5717VENDOR_ST_M_M45PE20
:
14569 /* Detect size with tg3_nvram_get_size() */
14571 case FLASH_5717VENDOR_ST_A_M25PE20
:
14572 case FLASH_5717VENDOR_ST_A_M45PE20
:
14573 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
14576 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
14581 tg3_flag_set(tp
, NO_NVRAM
);
14585 tg3_nvram_get_pagesize(tp
, nvcfg1
);
14586 if (tp
->nvram_pagesize
!= 264 && tp
->nvram_pagesize
!= 528)
14587 tg3_flag_set(tp
, NO_NVRAM_ADDR_TRANS
);
14590 static void tg3_get_5720_nvram_info(struct tg3
*tp
)
14592 u32 nvcfg1
, nvmpinstrp
;
14594 nvcfg1
= tr32(NVRAM_CFG1
);
14595 nvmpinstrp
= nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
;
14597 if (tg3_asic_rev(tp
) == ASIC_REV_5762
) {
14598 if (!(nvcfg1
& NVRAM_CFG1_5762VENDOR_MASK
)) {
14599 tg3_flag_set(tp
, NO_NVRAM
);
14603 switch (nvmpinstrp
) {
14604 case FLASH_5762_EEPROM_HD
:
14605 nvmpinstrp
= FLASH_5720_EEPROM_HD
;
14607 case FLASH_5762_EEPROM_LD
:
14608 nvmpinstrp
= FLASH_5720_EEPROM_LD
;
14610 case FLASH_5720VENDOR_M_ST_M45PE20
:
14611 /* This pinstrap supports multiple sizes, so force it
14612 * to read the actual size from location 0xf0.
14614 nvmpinstrp
= FLASH_5720VENDOR_ST_45USPT
;
14619 switch (nvmpinstrp
) {
14620 case FLASH_5720_EEPROM_HD
:
14621 case FLASH_5720_EEPROM_LD
:
14622 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14623 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14625 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
14626 tw32(NVRAM_CFG1
, nvcfg1
);
14627 if (nvmpinstrp
== FLASH_5720_EEPROM_HD
)
14628 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
14630 tp
->nvram_pagesize
= ATMEL_AT24C02_CHIP_SIZE
;
14632 case FLASH_5720VENDOR_M_ATMEL_DB011D
:
14633 case FLASH_5720VENDOR_A_ATMEL_DB011B
:
14634 case FLASH_5720VENDOR_A_ATMEL_DB011D
:
14635 case FLASH_5720VENDOR_M_ATMEL_DB021D
:
14636 case FLASH_5720VENDOR_A_ATMEL_DB021B
:
14637 case FLASH_5720VENDOR_A_ATMEL_DB021D
:
14638 case FLASH_5720VENDOR_M_ATMEL_DB041D
:
14639 case FLASH_5720VENDOR_A_ATMEL_DB041B
:
14640 case FLASH_5720VENDOR_A_ATMEL_DB041D
:
14641 case FLASH_5720VENDOR_M_ATMEL_DB081D
:
14642 case FLASH_5720VENDOR_A_ATMEL_DB081D
:
14643 case FLASH_5720VENDOR_ATMEL_45USPT
:
14644 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14645 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14646 tg3_flag_set(tp
, FLASH
);
14648 switch (nvmpinstrp
) {
14649 case FLASH_5720VENDOR_M_ATMEL_DB021D
:
14650 case FLASH_5720VENDOR_A_ATMEL_DB021B
:
14651 case FLASH_5720VENDOR_A_ATMEL_DB021D
:
14652 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
14654 case FLASH_5720VENDOR_M_ATMEL_DB041D
:
14655 case FLASH_5720VENDOR_A_ATMEL_DB041B
:
14656 case FLASH_5720VENDOR_A_ATMEL_DB041D
:
14657 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
14659 case FLASH_5720VENDOR_M_ATMEL_DB081D
:
14660 case FLASH_5720VENDOR_A_ATMEL_DB081D
:
14661 tp
->nvram_size
= TG3_NVRAM_SIZE_1MB
;
14664 if (tg3_asic_rev(tp
) != ASIC_REV_5762
)
14665 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
14669 case FLASH_5720VENDOR_M_ST_M25PE10
:
14670 case FLASH_5720VENDOR_M_ST_M45PE10
:
14671 case FLASH_5720VENDOR_A_ST_M25PE10
:
14672 case FLASH_5720VENDOR_A_ST_M45PE10
:
14673 case FLASH_5720VENDOR_M_ST_M25PE20
:
14674 case FLASH_5720VENDOR_M_ST_M45PE20
:
14675 case FLASH_5720VENDOR_A_ST_M25PE20
:
14676 case FLASH_5720VENDOR_A_ST_M45PE20
:
14677 case FLASH_5720VENDOR_M_ST_M25PE40
:
14678 case FLASH_5720VENDOR_M_ST_M45PE40
:
14679 case FLASH_5720VENDOR_A_ST_M25PE40
:
14680 case FLASH_5720VENDOR_A_ST_M45PE40
:
14681 case FLASH_5720VENDOR_M_ST_M25PE80
:
14682 case FLASH_5720VENDOR_M_ST_M45PE80
:
14683 case FLASH_5720VENDOR_A_ST_M25PE80
:
14684 case FLASH_5720VENDOR_A_ST_M45PE80
:
14685 case FLASH_5720VENDOR_ST_25USPT
:
14686 case FLASH_5720VENDOR_ST_45USPT
:
14687 tp
->nvram_jedecnum
= JEDEC_ST
;
14688 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14689 tg3_flag_set(tp
, FLASH
);
14691 switch (nvmpinstrp
) {
14692 case FLASH_5720VENDOR_M_ST_M25PE20
:
14693 case FLASH_5720VENDOR_M_ST_M45PE20
:
14694 case FLASH_5720VENDOR_A_ST_M25PE20
:
14695 case FLASH_5720VENDOR_A_ST_M45PE20
:
14696 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
14698 case FLASH_5720VENDOR_M_ST_M25PE40
:
14699 case FLASH_5720VENDOR_M_ST_M45PE40
:
14700 case FLASH_5720VENDOR_A_ST_M25PE40
:
14701 case FLASH_5720VENDOR_A_ST_M45PE40
:
14702 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
14704 case FLASH_5720VENDOR_M_ST_M25PE80
:
14705 case FLASH_5720VENDOR_M_ST_M45PE80
:
14706 case FLASH_5720VENDOR_A_ST_M25PE80
:
14707 case FLASH_5720VENDOR_A_ST_M45PE80
:
14708 tp
->nvram_size
= TG3_NVRAM_SIZE_1MB
;
14711 if (tg3_asic_rev(tp
) != ASIC_REV_5762
)
14712 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
14717 tg3_flag_set(tp
, NO_NVRAM
);
14721 tg3_nvram_get_pagesize(tp
, nvcfg1
);
14722 if (tp
->nvram_pagesize
!= 264 && tp
->nvram_pagesize
!= 528)
14723 tg3_flag_set(tp
, NO_NVRAM_ADDR_TRANS
);
14725 if (tg3_asic_rev(tp
) == ASIC_REV_5762
) {
14728 if (tg3_nvram_read(tp
, 0, &val
))
14731 if (val
!= TG3_EEPROM_MAGIC
&&
14732 (val
& TG3_EEPROM_MAGIC_FW_MSK
) != TG3_EEPROM_MAGIC_FW
)
14733 tg3_flag_set(tp
, NO_NVRAM
);
14737 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
14738 static void tg3_nvram_init(struct tg3
*tp
)
14740 if (tg3_flag(tp
, IS_SSB_CORE
)) {
14741 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
14742 tg3_flag_clear(tp
, NVRAM
);
14743 tg3_flag_clear(tp
, NVRAM_BUFFERED
);
14744 tg3_flag_set(tp
, NO_NVRAM
);
14748 tw32_f(GRC_EEPROM_ADDR
,
14749 (EEPROM_ADDR_FSM_RESET
|
14750 (EEPROM_DEFAULT_CLOCK_PERIOD
<<
14751 EEPROM_ADDR_CLKPERD_SHIFT
)));
14755 /* Enable seeprom accesses. */
14756 tw32_f(GRC_LOCAL_CTRL
,
14757 tr32(GRC_LOCAL_CTRL
) | GRC_LCLCTRL_AUTO_SEEPROM
);
14760 if (tg3_asic_rev(tp
) != ASIC_REV_5700
&&
14761 tg3_asic_rev(tp
) != ASIC_REV_5701
) {
14762 tg3_flag_set(tp
, NVRAM
);
14764 if (tg3_nvram_lock(tp
)) {
14765 netdev_warn(tp
->dev
,
14766 "Cannot get nvram lock, %s failed\n",
14770 tg3_enable_nvram_access(tp
);
14772 tp
->nvram_size
= 0;
14774 if (tg3_asic_rev(tp
) == ASIC_REV_5752
)
14775 tg3_get_5752_nvram_info(tp
);
14776 else if (tg3_asic_rev(tp
) == ASIC_REV_5755
)
14777 tg3_get_5755_nvram_info(tp
);
14778 else if (tg3_asic_rev(tp
) == ASIC_REV_5787
||
14779 tg3_asic_rev(tp
) == ASIC_REV_5784
||
14780 tg3_asic_rev(tp
) == ASIC_REV_5785
)
14781 tg3_get_5787_nvram_info(tp
);
14782 else if (tg3_asic_rev(tp
) == ASIC_REV_5761
)
14783 tg3_get_5761_nvram_info(tp
);
14784 else if (tg3_asic_rev(tp
) == ASIC_REV_5906
)
14785 tg3_get_5906_nvram_info(tp
);
14786 else if (tg3_asic_rev(tp
) == ASIC_REV_57780
||
14787 tg3_flag(tp
, 57765_CLASS
))
14788 tg3_get_57780_nvram_info(tp
);
14789 else if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
14790 tg3_asic_rev(tp
) == ASIC_REV_5719
)
14791 tg3_get_5717_nvram_info(tp
);
14792 else if (tg3_asic_rev(tp
) == ASIC_REV_5720
||
14793 tg3_asic_rev(tp
) == ASIC_REV_5762
)
14794 tg3_get_5720_nvram_info(tp
);
14796 tg3_get_nvram_info(tp
);
14798 if (tp
->nvram_size
== 0)
14799 tg3_get_nvram_size(tp
);
14801 tg3_disable_nvram_access(tp
);
14802 tg3_nvram_unlock(tp
);
14805 tg3_flag_clear(tp
, NVRAM
);
14806 tg3_flag_clear(tp
, NVRAM_BUFFERED
);
14808 tg3_get_eeprom_size(tp
);
14812 struct subsys_tbl_ent
{
14813 u16 subsys_vendor
, subsys_devid
;
14817 static struct subsys_tbl_ent subsys_id_to_phy_id
[] = {
14818 /* Broadcom boards. */
14819 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
14820 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6
, TG3_PHY_ID_BCM5401
},
14821 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
14822 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5
, TG3_PHY_ID_BCM5701
},
14823 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
14824 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6
, TG3_PHY_ID_BCM8002
},
14825 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
14826 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9
, 0 },
14827 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
14828 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1
, TG3_PHY_ID_BCM5701
},
14829 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
14830 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8
, TG3_PHY_ID_BCM5701
},
14831 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
14832 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7
, 0 },
14833 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
14834 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10
, TG3_PHY_ID_BCM5701
},
14835 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
14836 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12
, TG3_PHY_ID_BCM5701
},
14837 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
14838 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1
, TG3_PHY_ID_BCM5703
},
14839 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
14840 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2
, TG3_PHY_ID_BCM5703
},
14843 { TG3PCI_SUBVENDOR_ID_3COM
,
14844 TG3PCI_SUBDEVICE_ID_3COM_3C996T
, TG3_PHY_ID_BCM5401
},
14845 { TG3PCI_SUBVENDOR_ID_3COM
,
14846 TG3PCI_SUBDEVICE_ID_3COM_3C996BT
, TG3_PHY_ID_BCM5701
},
14847 { TG3PCI_SUBVENDOR_ID_3COM
,
14848 TG3PCI_SUBDEVICE_ID_3COM_3C996SX
, 0 },
14849 { TG3PCI_SUBVENDOR_ID_3COM
,
14850 TG3PCI_SUBDEVICE_ID_3COM_3C1000T
, TG3_PHY_ID_BCM5701
},
14851 { TG3PCI_SUBVENDOR_ID_3COM
,
14852 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01
, TG3_PHY_ID_BCM5701
},
14855 { TG3PCI_SUBVENDOR_ID_DELL
,
14856 TG3PCI_SUBDEVICE_ID_DELL_VIPER
, TG3_PHY_ID_BCM5401
},
14857 { TG3PCI_SUBVENDOR_ID_DELL
,
14858 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR
, TG3_PHY_ID_BCM5401
},
14859 { TG3PCI_SUBVENDOR_ID_DELL
,
14860 TG3PCI_SUBDEVICE_ID_DELL_MERLOT
, TG3_PHY_ID_BCM5411
},
14861 { TG3PCI_SUBVENDOR_ID_DELL
,
14862 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT
, TG3_PHY_ID_BCM5411
},
14864 /* Compaq boards. */
14865 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
14866 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE
, TG3_PHY_ID_BCM5701
},
14867 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
14868 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2
, TG3_PHY_ID_BCM5701
},
14869 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
14870 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING
, 0 },
14871 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
14872 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780
, TG3_PHY_ID_BCM5701
},
14873 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
14874 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2
, TG3_PHY_ID_BCM5701
},
14877 { TG3PCI_SUBVENDOR_ID_IBM
,
14878 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2
, 0 }
14881 static struct subsys_tbl_ent
*tg3_lookup_by_subsys(struct tg3
*tp
)
14885 for (i
= 0; i
< ARRAY_SIZE(subsys_id_to_phy_id
); i
++) {
14886 if ((subsys_id_to_phy_id
[i
].subsys_vendor
==
14887 tp
->pdev
->subsystem_vendor
) &&
14888 (subsys_id_to_phy_id
[i
].subsys_devid
==
14889 tp
->pdev
->subsystem_device
))
14890 return &subsys_id_to_phy_id
[i
];
14895 static void tg3_get_eeprom_hw_cfg(struct tg3
*tp
)
14899 tp
->phy_id
= TG3_PHY_ID_INVALID
;
14900 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
14902 /* Assume an onboard device and WOL capable by default. */
14903 tg3_flag_set(tp
, EEPROM_WRITE_PROT
);
14904 tg3_flag_set(tp
, WOL_CAP
);
14906 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
14907 if (!(tr32(PCIE_TRANSACTION_CFG
) & PCIE_TRANS_CFG_LOM
)) {
14908 tg3_flag_clear(tp
, EEPROM_WRITE_PROT
);
14909 tg3_flag_set(tp
, IS_NIC
);
14911 val
= tr32(VCPU_CFGSHDW
);
14912 if (val
& VCPU_CFGSHDW_ASPM_DBNC
)
14913 tg3_flag_set(tp
, ASPM_WORKAROUND
);
14914 if ((val
& VCPU_CFGSHDW_WOL_ENABLE
) &&
14915 (val
& VCPU_CFGSHDW_WOL_MAGPKT
)) {
14916 tg3_flag_set(tp
, WOL_ENABLE
);
14917 device_set_wakeup_enable(&tp
->pdev
->dev
, true);
14922 tg3_read_mem(tp
, NIC_SRAM_DATA_SIG
, &val
);
14923 if (val
== NIC_SRAM_DATA_SIG_MAGIC
) {
14924 u32 nic_cfg
, led_cfg
;
14925 u32 cfg2
= 0, cfg4
= 0, cfg5
= 0;
14926 u32 nic_phy_id
, ver
, eeprom_phy_id
;
14927 int eeprom_phy_serdes
= 0;
14929 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG
, &nic_cfg
);
14930 tp
->nic_sram_data_cfg
= nic_cfg
;
14932 tg3_read_mem(tp
, NIC_SRAM_DATA_VER
, &ver
);
14933 ver
>>= NIC_SRAM_DATA_VER_SHIFT
;
14934 if (tg3_asic_rev(tp
) != ASIC_REV_5700
&&
14935 tg3_asic_rev(tp
) != ASIC_REV_5701
&&
14936 tg3_asic_rev(tp
) != ASIC_REV_5703
&&
14937 (ver
> 0) && (ver
< 0x100))
14938 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG_2
, &cfg2
);
14940 if (tg3_asic_rev(tp
) == ASIC_REV_5785
)
14941 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG_4
, &cfg4
);
14943 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
14944 tg3_asic_rev(tp
) == ASIC_REV_5719
||
14945 tg3_asic_rev(tp
) == ASIC_REV_5720
)
14946 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG_5
, &cfg5
);
14948 if ((nic_cfg
& NIC_SRAM_DATA_CFG_PHY_TYPE_MASK
) ==
14949 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER
)
14950 eeprom_phy_serdes
= 1;
14952 tg3_read_mem(tp
, NIC_SRAM_DATA_PHY_ID
, &nic_phy_id
);
14953 if (nic_phy_id
!= 0) {
14954 u32 id1
= nic_phy_id
& NIC_SRAM_DATA_PHY_ID1_MASK
;
14955 u32 id2
= nic_phy_id
& NIC_SRAM_DATA_PHY_ID2_MASK
;
14957 eeprom_phy_id
= (id1
>> 16) << 10;
14958 eeprom_phy_id
|= (id2
& 0xfc00) << 16;
14959 eeprom_phy_id
|= (id2
& 0x03ff) << 0;
14963 tp
->phy_id
= eeprom_phy_id
;
14964 if (eeprom_phy_serdes
) {
14965 if (!tg3_flag(tp
, 5705_PLUS
))
14966 tp
->phy_flags
|= TG3_PHYFLG_PHY_SERDES
;
14968 tp
->phy_flags
|= TG3_PHYFLG_MII_SERDES
;
14971 if (tg3_flag(tp
, 5750_PLUS
))
14972 led_cfg
= cfg2
& (NIC_SRAM_DATA_CFG_LED_MODE_MASK
|
14973 SHASTA_EXT_LED_MODE_MASK
);
14975 led_cfg
= nic_cfg
& NIC_SRAM_DATA_CFG_LED_MODE_MASK
;
14979 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1
:
14980 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
14983 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2
:
14984 tp
->led_ctrl
= LED_CTRL_MODE_PHY_2
;
14987 case NIC_SRAM_DATA_CFG_LED_MODE_MAC
:
14988 tp
->led_ctrl
= LED_CTRL_MODE_MAC
;
14990 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
14991 * read on some older 5700/5701 bootcode.
14993 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
14994 tg3_asic_rev(tp
) == ASIC_REV_5701
)
14995 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
14999 case SHASTA_EXT_LED_SHARED
:
15000 tp
->led_ctrl
= LED_CTRL_MODE_SHARED
;
15001 if (tg3_chip_rev_id(tp
) != CHIPREV_ID_5750_A0
&&
15002 tg3_chip_rev_id(tp
) != CHIPREV_ID_5750_A1
)
15003 tp
->led_ctrl
|= (LED_CTRL_MODE_PHY_1
|
15004 LED_CTRL_MODE_PHY_2
);
15006 if (tg3_flag(tp
, 5717_PLUS
) ||
15007 tg3_asic_rev(tp
) == ASIC_REV_5762
)
15008 tp
->led_ctrl
|= LED_CTRL_BLINK_RATE_OVERRIDE
|
15009 LED_CTRL_BLINK_RATE_MASK
;
15013 case SHASTA_EXT_LED_MAC
:
15014 tp
->led_ctrl
= LED_CTRL_MODE_SHASTA_MAC
;
15017 case SHASTA_EXT_LED_COMBO
:
15018 tp
->led_ctrl
= LED_CTRL_MODE_COMBO
;
15019 if (tg3_chip_rev_id(tp
) != CHIPREV_ID_5750_A0
)
15020 tp
->led_ctrl
|= (LED_CTRL_MODE_PHY_1
|
15021 LED_CTRL_MODE_PHY_2
);
15026 if ((tg3_asic_rev(tp
) == ASIC_REV_5700
||
15027 tg3_asic_rev(tp
) == ASIC_REV_5701
) &&
15028 tp
->pdev
->subsystem_vendor
== PCI_VENDOR_ID_DELL
)
15029 tp
->led_ctrl
= LED_CTRL_MODE_PHY_2
;
15031 if (tg3_chip_rev(tp
) == CHIPREV_5784_AX
)
15032 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
15034 if (nic_cfg
& NIC_SRAM_DATA_CFG_EEPROM_WP
) {
15035 tg3_flag_set(tp
, EEPROM_WRITE_PROT
);
15036 if ((tp
->pdev
->subsystem_vendor
==
15037 PCI_VENDOR_ID_ARIMA
) &&
15038 (tp
->pdev
->subsystem_device
== 0x205a ||
15039 tp
->pdev
->subsystem_device
== 0x2063))
15040 tg3_flag_clear(tp
, EEPROM_WRITE_PROT
);
15042 tg3_flag_clear(tp
, EEPROM_WRITE_PROT
);
15043 tg3_flag_set(tp
, IS_NIC
);
15046 if (nic_cfg
& NIC_SRAM_DATA_CFG_ASF_ENABLE
) {
15047 tg3_flag_set(tp
, ENABLE_ASF
);
15048 if (tg3_flag(tp
, 5750_PLUS
))
15049 tg3_flag_set(tp
, ASF_NEW_HANDSHAKE
);
15052 if ((nic_cfg
& NIC_SRAM_DATA_CFG_APE_ENABLE
) &&
15053 tg3_flag(tp
, 5750_PLUS
))
15054 tg3_flag_set(tp
, ENABLE_APE
);
15056 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
&&
15057 !(nic_cfg
& NIC_SRAM_DATA_CFG_FIBER_WOL
))
15058 tg3_flag_clear(tp
, WOL_CAP
);
15060 if (tg3_flag(tp
, WOL_CAP
) &&
15061 (nic_cfg
& NIC_SRAM_DATA_CFG_WOL_ENABLE
)) {
15062 tg3_flag_set(tp
, WOL_ENABLE
);
15063 device_set_wakeup_enable(&tp
->pdev
->dev
, true);
15066 if (cfg2
& (1 << 17))
15067 tp
->phy_flags
|= TG3_PHYFLG_CAPACITIVE_COUPLING
;
15069 /* serdes signal pre-emphasis in register 0x590 set by */
15070 /* bootcode if bit 18 is set */
15071 if (cfg2
& (1 << 18))
15072 tp
->phy_flags
|= TG3_PHYFLG_SERDES_PREEMPHASIS
;
15074 if ((tg3_flag(tp
, 57765_PLUS
) ||
15075 (tg3_asic_rev(tp
) == ASIC_REV_5784
&&
15076 tg3_chip_rev(tp
) != CHIPREV_5784_AX
)) &&
15077 (cfg2
& NIC_SRAM_DATA_CFG_2_APD_EN
))
15078 tp
->phy_flags
|= TG3_PHYFLG_ENABLE_APD
;
15080 if (tg3_flag(tp
, PCI_EXPRESS
)) {
15083 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG_3
, &cfg3
);
15084 if (tg3_asic_rev(tp
) != ASIC_REV_5785
&&
15085 !tg3_flag(tp
, 57765_PLUS
) &&
15086 (cfg3
& NIC_SRAM_ASPM_DEBOUNCE
))
15087 tg3_flag_set(tp
, ASPM_WORKAROUND
);
15088 if (cfg3
& NIC_SRAM_LNK_FLAP_AVOID
)
15089 tp
->phy_flags
|= TG3_PHYFLG_KEEP_LINK_ON_PWRDN
;
15090 if (cfg3
& NIC_SRAM_1G_ON_VAUX_OK
)
15091 tp
->phy_flags
|= TG3_PHYFLG_1G_ON_VAUX_OK
;
15094 if (cfg4
& NIC_SRAM_RGMII_INBAND_DISABLE
)
15095 tg3_flag_set(tp
, RGMII_INBAND_DISABLE
);
15096 if (cfg4
& NIC_SRAM_RGMII_EXT_IBND_RX_EN
)
15097 tg3_flag_set(tp
, RGMII_EXT_IBND_RX_EN
);
15098 if (cfg4
& NIC_SRAM_RGMII_EXT_IBND_TX_EN
)
15099 tg3_flag_set(tp
, RGMII_EXT_IBND_TX_EN
);
15101 if (cfg5
& NIC_SRAM_DISABLE_1G_HALF_ADV
)
15102 tp
->phy_flags
|= TG3_PHYFLG_DISABLE_1G_HD_ADV
;
15105 if (tg3_flag(tp
, WOL_CAP
))
15106 device_set_wakeup_enable(&tp
->pdev
->dev
,
15107 tg3_flag(tp
, WOL_ENABLE
));
15109 device_set_wakeup_capable(&tp
->pdev
->dev
, false);
15112 static int tg3_ape_otp_read(struct tg3
*tp
, u32 offset
, u32
*val
)
15115 u32 val2
, off
= offset
* 8;
15117 err
= tg3_nvram_lock(tp
);
15121 tg3_ape_write32(tp
, TG3_APE_OTP_ADDR
, off
| APE_OTP_ADDR_CPU_ENABLE
);
15122 tg3_ape_write32(tp
, TG3_APE_OTP_CTRL
, APE_OTP_CTRL_PROG_EN
|
15123 APE_OTP_CTRL_CMD_RD
| APE_OTP_CTRL_START
);
15124 tg3_ape_read32(tp
, TG3_APE_OTP_CTRL
);
15127 for (i
= 0; i
< 100; i
++) {
15128 val2
= tg3_ape_read32(tp
, TG3_APE_OTP_STATUS
);
15129 if (val2
& APE_OTP_STATUS_CMD_DONE
) {
15130 *val
= tg3_ape_read32(tp
, TG3_APE_OTP_RD_DATA
);
15136 tg3_ape_write32(tp
, TG3_APE_OTP_CTRL
, 0);
15138 tg3_nvram_unlock(tp
);
15139 if (val2
& APE_OTP_STATUS_CMD_DONE
)
15145 static int tg3_issue_otp_command(struct tg3
*tp
, u32 cmd
)
15150 tw32(OTP_CTRL
, cmd
| OTP_CTRL_OTP_CMD_START
);
15151 tw32(OTP_CTRL
, cmd
);
15153 /* Wait for up to 1 ms for command to execute. */
15154 for (i
= 0; i
< 100; i
++) {
15155 val
= tr32(OTP_STATUS
);
15156 if (val
& OTP_STATUS_CMD_DONE
)
15161 return (val
& OTP_STATUS_CMD_DONE
) ? 0 : -EBUSY
;
15164 /* Read the gphy configuration from the OTP region of the chip. The gphy
15165 * configuration is a 32-bit value that straddles the alignment boundary.
15166 * We do two 32-bit reads and then shift and merge the results.
15168 static u32
tg3_read_otp_phycfg(struct tg3
*tp
)
15170 u32 bhalf_otp
, thalf_otp
;
15172 tw32(OTP_MODE
, OTP_MODE_OTP_THRU_GRC
);
15174 if (tg3_issue_otp_command(tp
, OTP_CTRL_OTP_CMD_INIT
))
15177 tw32(OTP_ADDRESS
, OTP_ADDRESS_MAGIC1
);
15179 if (tg3_issue_otp_command(tp
, OTP_CTRL_OTP_CMD_READ
))
15182 thalf_otp
= tr32(OTP_READ_DATA
);
15184 tw32(OTP_ADDRESS
, OTP_ADDRESS_MAGIC2
);
15186 if (tg3_issue_otp_command(tp
, OTP_CTRL_OTP_CMD_READ
))
15189 bhalf_otp
= tr32(OTP_READ_DATA
);
15191 return ((thalf_otp
& 0x0000ffff) << 16) | (bhalf_otp
>> 16);
15194 static void tg3_phy_init_link_config(struct tg3
*tp
)
15196 u32 adv
= ADVERTISED_Autoneg
;
15198 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
15199 if (!(tp
->phy_flags
& TG3_PHYFLG_DISABLE_1G_HD_ADV
))
15200 adv
|= ADVERTISED_1000baseT_Half
;
15201 adv
|= ADVERTISED_1000baseT_Full
;
15204 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
15205 adv
|= ADVERTISED_100baseT_Half
|
15206 ADVERTISED_100baseT_Full
|
15207 ADVERTISED_10baseT_Half
|
15208 ADVERTISED_10baseT_Full
|
15211 adv
|= ADVERTISED_FIBRE
;
15213 tp
->link_config
.advertising
= adv
;
15214 tp
->link_config
.speed
= SPEED_UNKNOWN
;
15215 tp
->link_config
.duplex
= DUPLEX_UNKNOWN
;
15216 tp
->link_config
.autoneg
= AUTONEG_ENABLE
;
15217 tp
->link_config
.active_speed
= SPEED_UNKNOWN
;
15218 tp
->link_config
.active_duplex
= DUPLEX_UNKNOWN
;
15223 static int tg3_phy_probe(struct tg3
*tp
)
15225 u32 hw_phy_id_1
, hw_phy_id_2
;
15226 u32 hw_phy_id
, hw_phy_id_masked
;
15229 /* flow control autonegotiation is default behavior */
15230 tg3_flag_set(tp
, PAUSE_AUTONEG
);
15231 tp
->link_config
.flowctrl
= FLOW_CTRL_TX
| FLOW_CTRL_RX
;
15233 if (tg3_flag(tp
, ENABLE_APE
)) {
15234 switch (tp
->pci_fn
) {
15236 tp
->phy_ape_lock
= TG3_APE_LOCK_PHY0
;
15239 tp
->phy_ape_lock
= TG3_APE_LOCK_PHY1
;
15242 tp
->phy_ape_lock
= TG3_APE_LOCK_PHY2
;
15245 tp
->phy_ape_lock
= TG3_APE_LOCK_PHY3
;
15250 if (!tg3_flag(tp
, ENABLE_ASF
) &&
15251 !(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
) &&
15252 !(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
))
15253 tp
->phy_flags
&= ~(TG3_PHYFLG_1G_ON_VAUX_OK
|
15254 TG3_PHYFLG_KEEP_LINK_ON_PWRDN
);
15256 if (tg3_flag(tp
, USE_PHYLIB
))
15257 return tg3_phy_init(tp
);
15259 /* Reading the PHY ID register can conflict with ASF
15260 * firmware access to the PHY hardware.
15263 if (tg3_flag(tp
, ENABLE_ASF
) || tg3_flag(tp
, ENABLE_APE
)) {
15264 hw_phy_id
= hw_phy_id_masked
= TG3_PHY_ID_INVALID
;
15266 /* Now read the physical PHY_ID from the chip and verify
15267 * that it is sane. If it doesn't look good, we fall back
15268 * to either the hard-coded table based PHY_ID and failing
15269 * that the value found in the eeprom area.
15271 err
|= tg3_readphy(tp
, MII_PHYSID1
, &hw_phy_id_1
);
15272 err
|= tg3_readphy(tp
, MII_PHYSID2
, &hw_phy_id_2
);
15274 hw_phy_id
= (hw_phy_id_1
& 0xffff) << 10;
15275 hw_phy_id
|= (hw_phy_id_2
& 0xfc00) << 16;
15276 hw_phy_id
|= (hw_phy_id_2
& 0x03ff) << 0;
15278 hw_phy_id_masked
= hw_phy_id
& TG3_PHY_ID_MASK
;
15281 if (!err
&& TG3_KNOWN_PHY_ID(hw_phy_id_masked
)) {
15282 tp
->phy_id
= hw_phy_id
;
15283 if (hw_phy_id_masked
== TG3_PHY_ID_BCM8002
)
15284 tp
->phy_flags
|= TG3_PHYFLG_PHY_SERDES
;
15286 tp
->phy_flags
&= ~TG3_PHYFLG_PHY_SERDES
;
15288 if (tp
->phy_id
!= TG3_PHY_ID_INVALID
) {
15289 /* Do nothing, phy ID already set up in
15290 * tg3_get_eeprom_hw_cfg().
15293 struct subsys_tbl_ent
*p
;
15295 /* No eeprom signature? Try the hardcoded
15296 * subsys device table.
15298 p
= tg3_lookup_by_subsys(tp
);
15300 tp
->phy_id
= p
->phy_id
;
15301 } else if (!tg3_flag(tp
, IS_SSB_CORE
)) {
15302 /* For now we saw the IDs 0xbc050cd0,
15303 * 0xbc050f80 and 0xbc050c30 on devices
15304 * connected to an BCM4785 and there are
15305 * probably more. Just assume that the phy is
15306 * supported when it is connected to a SSB core
15313 tp
->phy_id
== TG3_PHY_ID_BCM8002
)
15314 tp
->phy_flags
|= TG3_PHYFLG_PHY_SERDES
;
15318 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
) &&
15319 (tg3_asic_rev(tp
) == ASIC_REV_5719
||
15320 tg3_asic_rev(tp
) == ASIC_REV_5720
||
15321 tg3_asic_rev(tp
) == ASIC_REV_57766
||
15322 tg3_asic_rev(tp
) == ASIC_REV_5762
||
15323 (tg3_asic_rev(tp
) == ASIC_REV_5717
&&
15324 tg3_chip_rev_id(tp
) != CHIPREV_ID_5717_A0
) ||
15325 (tg3_asic_rev(tp
) == ASIC_REV_57765
&&
15326 tg3_chip_rev_id(tp
) != CHIPREV_ID_57765_A0
))) {
15327 tp
->phy_flags
|= TG3_PHYFLG_EEE_CAP
;
15329 tp
->eee
.supported
= SUPPORTED_100baseT_Full
|
15330 SUPPORTED_1000baseT_Full
;
15331 tp
->eee
.advertised
= ADVERTISED_100baseT_Full
|
15332 ADVERTISED_1000baseT_Full
;
15333 tp
->eee
.eee_enabled
= 1;
15334 tp
->eee
.tx_lpi_enabled
= 1;
15335 tp
->eee
.tx_lpi_timer
= TG3_CPMU_DBTMR1_LNKIDLE_2047US
;
15338 tg3_phy_init_link_config(tp
);
15340 if (!(tp
->phy_flags
& TG3_PHYFLG_KEEP_LINK_ON_PWRDN
) &&
15341 !(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
) &&
15342 !tg3_flag(tp
, ENABLE_APE
) &&
15343 !tg3_flag(tp
, ENABLE_ASF
)) {
15346 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
15347 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
15348 (bmsr
& BMSR_LSTATUS
))
15349 goto skip_phy_reset
;
15351 err
= tg3_phy_reset(tp
);
15355 tg3_phy_set_wirespeed(tp
);
15357 if (!tg3_phy_copper_an_config_ok(tp
, &dummy
)) {
15358 tg3_phy_autoneg_cfg(tp
, tp
->link_config
.advertising
,
15359 tp
->link_config
.flowctrl
);
15361 tg3_writephy(tp
, MII_BMCR
,
15362 BMCR_ANENABLE
| BMCR_ANRESTART
);
15367 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5401
) {
15368 err
= tg3_init_5401phy_dsp(tp
);
15372 err
= tg3_init_5401phy_dsp(tp
);
15378 static void tg3_read_vpd(struct tg3
*tp
)
15381 unsigned int block_end
, rosize
, len
;
15385 vpd_data
= (u8
*)tg3_vpd_readblock(tp
, &vpdlen
);
15389 i
= pci_vpd_find_tag(vpd_data
, 0, vpdlen
, PCI_VPD_LRDT_RO_DATA
);
15391 goto out_not_found
;
15393 rosize
= pci_vpd_lrdt_size(&vpd_data
[i
]);
15394 block_end
= i
+ PCI_VPD_LRDT_TAG_SIZE
+ rosize
;
15395 i
+= PCI_VPD_LRDT_TAG_SIZE
;
15397 if (block_end
> vpdlen
)
15398 goto out_not_found
;
15400 j
= pci_vpd_find_info_keyword(vpd_data
, i
, rosize
,
15401 PCI_VPD_RO_KEYWORD_MFR_ID
);
15403 len
= pci_vpd_info_field_size(&vpd_data
[j
]);
15405 j
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
15406 if (j
+ len
> block_end
|| len
!= 4 ||
15407 memcmp(&vpd_data
[j
], "1028", 4))
15410 j
= pci_vpd_find_info_keyword(vpd_data
, i
, rosize
,
15411 PCI_VPD_RO_KEYWORD_VENDOR0
);
15415 len
= pci_vpd_info_field_size(&vpd_data
[j
]);
15417 j
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
15418 if (j
+ len
> block_end
)
15421 if (len
>= sizeof(tp
->fw_ver
))
15422 len
= sizeof(tp
->fw_ver
) - 1;
15423 memset(tp
->fw_ver
, 0, sizeof(tp
->fw_ver
));
15424 snprintf(tp
->fw_ver
, sizeof(tp
->fw_ver
), "%.*s bc ", len
,
15429 i
= pci_vpd_find_info_keyword(vpd_data
, i
, rosize
,
15430 PCI_VPD_RO_KEYWORD_PARTNO
);
15432 goto out_not_found
;
15434 len
= pci_vpd_info_field_size(&vpd_data
[i
]);
15436 i
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
15437 if (len
> TG3_BPN_SIZE
||
15438 (len
+ i
) > vpdlen
)
15439 goto out_not_found
;
15441 memcpy(tp
->board_part_number
, &vpd_data
[i
], len
);
15445 if (tp
->board_part_number
[0])
15449 if (tg3_asic_rev(tp
) == ASIC_REV_5717
) {
15450 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717
||
15451 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717_C
)
15452 strcpy(tp
->board_part_number
, "BCM5717");
15453 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5718
)
15454 strcpy(tp
->board_part_number
, "BCM5718");
15457 } else if (tg3_asic_rev(tp
) == ASIC_REV_57780
) {
15458 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57780
)
15459 strcpy(tp
->board_part_number
, "BCM57780");
15460 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57760
)
15461 strcpy(tp
->board_part_number
, "BCM57760");
15462 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57790
)
15463 strcpy(tp
->board_part_number
, "BCM57790");
15464 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57788
)
15465 strcpy(tp
->board_part_number
, "BCM57788");
15468 } else if (tg3_asic_rev(tp
) == ASIC_REV_57765
) {
15469 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57761
)
15470 strcpy(tp
->board_part_number
, "BCM57761");
15471 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57765
)
15472 strcpy(tp
->board_part_number
, "BCM57765");
15473 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57781
)
15474 strcpy(tp
->board_part_number
, "BCM57781");
15475 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57785
)
15476 strcpy(tp
->board_part_number
, "BCM57785");
15477 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57791
)
15478 strcpy(tp
->board_part_number
, "BCM57791");
15479 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57795
)
15480 strcpy(tp
->board_part_number
, "BCM57795");
15483 } else if (tg3_asic_rev(tp
) == ASIC_REV_57766
) {
15484 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57762
)
15485 strcpy(tp
->board_part_number
, "BCM57762");
15486 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57766
)
15487 strcpy(tp
->board_part_number
, "BCM57766");
15488 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57782
)
15489 strcpy(tp
->board_part_number
, "BCM57782");
15490 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57786
)
15491 strcpy(tp
->board_part_number
, "BCM57786");
15494 } else if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
15495 strcpy(tp
->board_part_number
, "BCM95906");
15498 strcpy(tp
->board_part_number
, "none");
15502 static int tg3_fw_img_is_valid(struct tg3
*tp
, u32 offset
)
15506 if (tg3_nvram_read(tp
, offset
, &val
) ||
15507 (val
& 0xfc000000) != 0x0c000000 ||
15508 tg3_nvram_read(tp
, offset
+ 4, &val
) ||
15515 static void tg3_read_bc_ver(struct tg3
*tp
)
15517 u32 val
, offset
, start
, ver_offset
;
15519 bool newver
= false;
15521 if (tg3_nvram_read(tp
, 0xc, &offset
) ||
15522 tg3_nvram_read(tp
, 0x4, &start
))
15525 offset
= tg3_nvram_logical_addr(tp
, offset
);
15527 if (tg3_nvram_read(tp
, offset
, &val
))
15530 if ((val
& 0xfc000000) == 0x0c000000) {
15531 if (tg3_nvram_read(tp
, offset
+ 4, &val
))
15538 dst_off
= strlen(tp
->fw_ver
);
15541 if (TG3_VER_SIZE
- dst_off
< 16 ||
15542 tg3_nvram_read(tp
, offset
+ 8, &ver_offset
))
15545 offset
= offset
+ ver_offset
- start
;
15546 for (i
= 0; i
< 16; i
+= 4) {
15548 if (tg3_nvram_read_be32(tp
, offset
+ i
, &v
))
15551 memcpy(tp
->fw_ver
+ dst_off
+ i
, &v
, sizeof(v
));
15556 if (tg3_nvram_read(tp
, TG3_NVM_PTREV_BCVER
, &ver_offset
))
15559 major
= (ver_offset
& TG3_NVM_BCVER_MAJMSK
) >>
15560 TG3_NVM_BCVER_MAJSFT
;
15561 minor
= ver_offset
& TG3_NVM_BCVER_MINMSK
;
15562 snprintf(&tp
->fw_ver
[dst_off
], TG3_VER_SIZE
- dst_off
,
15563 "v%d.%02d", major
, minor
);
15567 static void tg3_read_hwsb_ver(struct tg3
*tp
)
15569 u32 val
, major
, minor
;
15571 /* Use native endian representation */
15572 if (tg3_nvram_read(tp
, TG3_NVM_HWSB_CFG1
, &val
))
15575 major
= (val
& TG3_NVM_HWSB_CFG1_MAJMSK
) >>
15576 TG3_NVM_HWSB_CFG1_MAJSFT
;
15577 minor
= (val
& TG3_NVM_HWSB_CFG1_MINMSK
) >>
15578 TG3_NVM_HWSB_CFG1_MINSFT
;
15580 snprintf(&tp
->fw_ver
[0], 32, "sb v%d.%02d", major
, minor
);
15583 static void tg3_read_sb_ver(struct tg3
*tp
, u32 val
)
15585 u32 offset
, major
, minor
, build
;
15587 strncat(tp
->fw_ver
, "sb", TG3_VER_SIZE
- strlen(tp
->fw_ver
) - 1);
15589 if ((val
& TG3_EEPROM_SB_FORMAT_MASK
) != TG3_EEPROM_SB_FORMAT_1
)
15592 switch (val
& TG3_EEPROM_SB_REVISION_MASK
) {
15593 case TG3_EEPROM_SB_REVISION_0
:
15594 offset
= TG3_EEPROM_SB_F1R0_EDH_OFF
;
15596 case TG3_EEPROM_SB_REVISION_2
:
15597 offset
= TG3_EEPROM_SB_F1R2_EDH_OFF
;
15599 case TG3_EEPROM_SB_REVISION_3
:
15600 offset
= TG3_EEPROM_SB_F1R3_EDH_OFF
;
15602 case TG3_EEPROM_SB_REVISION_4
:
15603 offset
= TG3_EEPROM_SB_F1R4_EDH_OFF
;
15605 case TG3_EEPROM_SB_REVISION_5
:
15606 offset
= TG3_EEPROM_SB_F1R5_EDH_OFF
;
15608 case TG3_EEPROM_SB_REVISION_6
:
15609 offset
= TG3_EEPROM_SB_F1R6_EDH_OFF
;
15615 if (tg3_nvram_read(tp
, offset
, &val
))
15618 build
= (val
& TG3_EEPROM_SB_EDH_BLD_MASK
) >>
15619 TG3_EEPROM_SB_EDH_BLD_SHFT
;
15620 major
= (val
& TG3_EEPROM_SB_EDH_MAJ_MASK
) >>
15621 TG3_EEPROM_SB_EDH_MAJ_SHFT
;
15622 minor
= val
& TG3_EEPROM_SB_EDH_MIN_MASK
;
15624 if (minor
> 99 || build
> 26)
15627 offset
= strlen(tp
->fw_ver
);
15628 snprintf(&tp
->fw_ver
[offset
], TG3_VER_SIZE
- offset
,
15629 " v%d.%02d", major
, minor
);
15632 offset
= strlen(tp
->fw_ver
);
15633 if (offset
< TG3_VER_SIZE
- 1)
15634 tp
->fw_ver
[offset
] = 'a' + build
- 1;
15638 static void tg3_read_mgmtfw_ver(struct tg3
*tp
)
15640 u32 val
, offset
, start
;
15643 for (offset
= TG3_NVM_DIR_START
;
15644 offset
< TG3_NVM_DIR_END
;
15645 offset
+= TG3_NVM_DIRENT_SIZE
) {
15646 if (tg3_nvram_read(tp
, offset
, &val
))
15649 if ((val
>> TG3_NVM_DIRTYPE_SHIFT
) == TG3_NVM_DIRTYPE_ASFINI
)
15653 if (offset
== TG3_NVM_DIR_END
)
15656 if (!tg3_flag(tp
, 5705_PLUS
))
15657 start
= 0x08000000;
15658 else if (tg3_nvram_read(tp
, offset
- 4, &start
))
15661 if (tg3_nvram_read(tp
, offset
+ 4, &offset
) ||
15662 !tg3_fw_img_is_valid(tp
, offset
) ||
15663 tg3_nvram_read(tp
, offset
+ 8, &val
))
15666 offset
+= val
- start
;
15668 vlen
= strlen(tp
->fw_ver
);
15670 tp
->fw_ver
[vlen
++] = ',';
15671 tp
->fw_ver
[vlen
++] = ' ';
15673 for (i
= 0; i
< 4; i
++) {
15675 if (tg3_nvram_read_be32(tp
, offset
, &v
))
15678 offset
+= sizeof(v
);
15680 if (vlen
> TG3_VER_SIZE
- sizeof(v
)) {
15681 memcpy(&tp
->fw_ver
[vlen
], &v
, TG3_VER_SIZE
- vlen
);
15685 memcpy(&tp
->fw_ver
[vlen
], &v
, sizeof(v
));
15690 static void tg3_probe_ncsi(struct tg3
*tp
)
15694 apedata
= tg3_ape_read32(tp
, TG3_APE_SEG_SIG
);
15695 if (apedata
!= APE_SEG_SIG_MAGIC
)
15698 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_STATUS
);
15699 if (!(apedata
& APE_FW_STATUS_READY
))
15702 if (tg3_ape_read32(tp
, TG3_APE_FW_FEATURES
) & TG3_APE_FW_FEATURE_NCSI
)
15703 tg3_flag_set(tp
, APE_HAS_NCSI
);
15706 static void tg3_read_dash_ver(struct tg3
*tp
)
15712 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_VERSION
);
15714 if (tg3_flag(tp
, APE_HAS_NCSI
))
15716 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5725
)
15721 vlen
= strlen(tp
->fw_ver
);
15723 snprintf(&tp
->fw_ver
[vlen
], TG3_VER_SIZE
- vlen
, " %s v%d.%d.%d.%d",
15725 (apedata
& APE_FW_VERSION_MAJMSK
) >> APE_FW_VERSION_MAJSFT
,
15726 (apedata
& APE_FW_VERSION_MINMSK
) >> APE_FW_VERSION_MINSFT
,
15727 (apedata
& APE_FW_VERSION_REVMSK
) >> APE_FW_VERSION_REVSFT
,
15728 (apedata
& APE_FW_VERSION_BLDMSK
));
15731 static void tg3_read_otp_ver(struct tg3
*tp
)
15735 if (tg3_asic_rev(tp
) != ASIC_REV_5762
)
15738 if (!tg3_ape_otp_read(tp
, OTP_ADDRESS_MAGIC0
, &val
) &&
15739 !tg3_ape_otp_read(tp
, OTP_ADDRESS_MAGIC0
+ 4, &val2
) &&
15740 TG3_OTP_MAGIC0_VALID(val
)) {
15741 u64 val64
= (u64
) val
<< 32 | val2
;
15745 for (i
= 0; i
< 7; i
++) {
15746 if ((val64
& 0xff) == 0)
15748 ver
= val64
& 0xff;
15751 vlen
= strlen(tp
->fw_ver
);
15752 snprintf(&tp
->fw_ver
[vlen
], TG3_VER_SIZE
- vlen
, " .%02d", ver
);
15756 static void tg3_read_fw_ver(struct tg3
*tp
)
15759 bool vpd_vers
= false;
15761 if (tp
->fw_ver
[0] != 0)
15764 if (tg3_flag(tp
, NO_NVRAM
)) {
15765 strcat(tp
->fw_ver
, "sb");
15766 tg3_read_otp_ver(tp
);
15770 if (tg3_nvram_read(tp
, 0, &val
))
15773 if (val
== TG3_EEPROM_MAGIC
)
15774 tg3_read_bc_ver(tp
);
15775 else if ((val
& TG3_EEPROM_MAGIC_FW_MSK
) == TG3_EEPROM_MAGIC_FW
)
15776 tg3_read_sb_ver(tp
, val
);
15777 else if ((val
& TG3_EEPROM_MAGIC_HW_MSK
) == TG3_EEPROM_MAGIC_HW
)
15778 tg3_read_hwsb_ver(tp
);
15780 if (tg3_flag(tp
, ENABLE_ASF
)) {
15781 if (tg3_flag(tp
, ENABLE_APE
)) {
15782 tg3_probe_ncsi(tp
);
15784 tg3_read_dash_ver(tp
);
15785 } else if (!vpd_vers
) {
15786 tg3_read_mgmtfw_ver(tp
);
15790 tp
->fw_ver
[TG3_VER_SIZE
- 1] = 0;
15793 static inline u32
tg3_rx_ret_ring_size(struct tg3
*tp
)
15795 if (tg3_flag(tp
, LRG_PROD_RING_CAP
))
15796 return TG3_RX_RET_MAX_SIZE_5717
;
15797 else if (tg3_flag(tp
, JUMBO_CAPABLE
) && !tg3_flag(tp
, 5780_CLASS
))
15798 return TG3_RX_RET_MAX_SIZE_5700
;
15800 return TG3_RX_RET_MAX_SIZE_5705
;
15803 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets
) = {
15804 { PCI_DEVICE(PCI_VENDOR_ID_AMD
, PCI_DEVICE_ID_AMD_FE_GATE_700C
) },
15805 { PCI_DEVICE(PCI_VENDOR_ID_AMD
, PCI_DEVICE_ID_AMD_8131_BRIDGE
) },
15806 { PCI_DEVICE(PCI_VENDOR_ID_VIA
, PCI_DEVICE_ID_VIA_8385_0
) },
15810 static struct pci_dev
*tg3_find_peer(struct tg3
*tp
)
15812 struct pci_dev
*peer
;
15813 unsigned int func
, devnr
= tp
->pdev
->devfn
& ~7;
15815 for (func
= 0; func
< 8; func
++) {
15816 peer
= pci_get_slot(tp
->pdev
->bus
, devnr
| func
);
15817 if (peer
&& peer
!= tp
->pdev
)
15821 /* 5704 can be configured in single-port mode, set peer to
15822 * tp->pdev in that case.
15830 * We don't need to keep the refcount elevated; there's no way
15831 * to remove one half of this device without removing the other
15838 static void tg3_detect_asic_rev(struct tg3
*tp
, u32 misc_ctrl_reg
)
15840 tp
->pci_chip_rev_id
= misc_ctrl_reg
>> MISC_HOST_CTRL_CHIPREV_SHIFT
;
15841 if (tg3_asic_rev(tp
) == ASIC_REV_USE_PROD_ID_REG
) {
15844 /* All devices that use the alternate
15845 * ASIC REV location have a CPMU.
15847 tg3_flag_set(tp
, CPMU_PRESENT
);
15849 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717
||
15850 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717_C
||
15851 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5718
||
15852 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5719
||
15853 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5720
||
15854 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57767
||
15855 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57764
||
15856 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5762
||
15857 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5725
||
15858 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5727
||
15859 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57787
)
15860 reg
= TG3PCI_GEN2_PRODID_ASICREV
;
15861 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57781
||
15862 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57785
||
15863 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57761
||
15864 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57765
||
15865 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57791
||
15866 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57795
||
15867 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57762
||
15868 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57766
||
15869 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57782
||
15870 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57786
)
15871 reg
= TG3PCI_GEN15_PRODID_ASICREV
;
15873 reg
= TG3PCI_PRODID_ASICREV
;
15875 pci_read_config_dword(tp
->pdev
, reg
, &tp
->pci_chip_rev_id
);
15878 /* Wrong chip ID in 5752 A0. This code can be removed later
15879 * as A0 is not in production.
15881 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5752_A0_HW
)
15882 tp
->pci_chip_rev_id
= CHIPREV_ID_5752_A0
;
15884 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5717_C0
)
15885 tp
->pci_chip_rev_id
= CHIPREV_ID_5720_A0
;
15887 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
15888 tg3_asic_rev(tp
) == ASIC_REV_5719
||
15889 tg3_asic_rev(tp
) == ASIC_REV_5720
)
15890 tg3_flag_set(tp
, 5717_PLUS
);
15892 if (tg3_asic_rev(tp
) == ASIC_REV_57765
||
15893 tg3_asic_rev(tp
) == ASIC_REV_57766
)
15894 tg3_flag_set(tp
, 57765_CLASS
);
15896 if (tg3_flag(tp
, 57765_CLASS
) || tg3_flag(tp
, 5717_PLUS
) ||
15897 tg3_asic_rev(tp
) == ASIC_REV_5762
)
15898 tg3_flag_set(tp
, 57765_PLUS
);
15900 /* Intentionally exclude ASIC_REV_5906 */
15901 if (tg3_asic_rev(tp
) == ASIC_REV_5755
||
15902 tg3_asic_rev(tp
) == ASIC_REV_5787
||
15903 tg3_asic_rev(tp
) == ASIC_REV_5784
||
15904 tg3_asic_rev(tp
) == ASIC_REV_5761
||
15905 tg3_asic_rev(tp
) == ASIC_REV_5785
||
15906 tg3_asic_rev(tp
) == ASIC_REV_57780
||
15907 tg3_flag(tp
, 57765_PLUS
))
15908 tg3_flag_set(tp
, 5755_PLUS
);
15910 if (tg3_asic_rev(tp
) == ASIC_REV_5780
||
15911 tg3_asic_rev(tp
) == ASIC_REV_5714
)
15912 tg3_flag_set(tp
, 5780_CLASS
);
15914 if (tg3_asic_rev(tp
) == ASIC_REV_5750
||
15915 tg3_asic_rev(tp
) == ASIC_REV_5752
||
15916 tg3_asic_rev(tp
) == ASIC_REV_5906
||
15917 tg3_flag(tp
, 5755_PLUS
) ||
15918 tg3_flag(tp
, 5780_CLASS
))
15919 tg3_flag_set(tp
, 5750_PLUS
);
15921 if (tg3_asic_rev(tp
) == ASIC_REV_5705
||
15922 tg3_flag(tp
, 5750_PLUS
))
15923 tg3_flag_set(tp
, 5705_PLUS
);
15926 static bool tg3_10_100_only_device(struct tg3
*tp
,
15927 const struct pci_device_id
*ent
)
15929 u32 grc_misc_cfg
= tr32(GRC_MISC_CFG
) & GRC_MISC_CFG_BOARD_ID_MASK
;
15931 if ((tg3_asic_rev(tp
) == ASIC_REV_5703
&&
15932 (grc_misc_cfg
== 0x8000 || grc_misc_cfg
== 0x4000)) ||
15933 (tp
->phy_flags
& TG3_PHYFLG_IS_FET
))
15936 if (ent
->driver_data
& TG3_DRV_DATA_FLAG_10_100_ONLY
) {
15937 if (tg3_asic_rev(tp
) == ASIC_REV_5705
) {
15938 if (ent
->driver_data
& TG3_DRV_DATA_FLAG_5705_10_100
)
15948 static int tg3_get_invariants(struct tg3
*tp
, const struct pci_device_id
*ent
)
15951 u32 pci_state_reg
, grc_misc_cfg
;
15956 /* Force memory write invalidate off. If we leave it on,
15957 * then on 5700_BX chips we have to enable a workaround.
15958 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
15959 * to match the cacheline size. The Broadcom driver have this
15960 * workaround but turns MWI off all the times so never uses
15961 * it. This seems to suggest that the workaround is insufficient.
15963 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
15964 pci_cmd
&= ~PCI_COMMAND_INVALIDATE
;
15965 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
15967 /* Important! -- Make sure register accesses are byteswapped
15968 * correctly. Also, for those chips that require it, make
15969 * sure that indirect register accesses are enabled before
15970 * the first operation.
15972 pci_read_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
15974 tp
->misc_host_ctrl
|= (misc_ctrl_reg
&
15975 MISC_HOST_CTRL_CHIPREV
);
15976 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
15977 tp
->misc_host_ctrl
);
15979 tg3_detect_asic_rev(tp
, misc_ctrl_reg
);
15981 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
15982 * we need to disable memory and use config. cycles
15983 * only to access all registers. The 5702/03 chips
15984 * can mistakenly decode the special cycles from the
15985 * ICH chipsets as memory write cycles, causing corruption
15986 * of register and memory space. Only certain ICH bridges
15987 * will drive special cycles with non-zero data during the
15988 * address phase which can fall within the 5703's address
15989 * range. This is not an ICH bug as the PCI spec allows
15990 * non-zero address during special cycles. However, only
15991 * these ICH bridges are known to drive non-zero addresses
15992 * during special cycles.
15994 * Since special cycles do not cross PCI bridges, we only
15995 * enable this workaround if the 5703 is on the secondary
15996 * bus of these ICH bridges.
15998 if ((tg3_chip_rev_id(tp
) == CHIPREV_ID_5703_A1
) ||
15999 (tg3_chip_rev_id(tp
) == CHIPREV_ID_5703_A2
)) {
16000 static struct tg3_dev_id
{
16004 } ich_chipsets
[] = {
16005 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801AA_8
,
16007 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801AB_8
,
16009 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801BA_11
,
16011 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801BA_6
,
16015 struct tg3_dev_id
*pci_id
= &ich_chipsets
[0];
16016 struct pci_dev
*bridge
= NULL
;
16018 while (pci_id
->vendor
!= 0) {
16019 bridge
= pci_get_device(pci_id
->vendor
, pci_id
->device
,
16025 if (pci_id
->rev
!= PCI_ANY_ID
) {
16026 if (bridge
->revision
> pci_id
->rev
)
16029 if (bridge
->subordinate
&&
16030 (bridge
->subordinate
->number
==
16031 tp
->pdev
->bus
->number
)) {
16032 tg3_flag_set(tp
, ICH_WORKAROUND
);
16033 pci_dev_put(bridge
);
16039 if (tg3_asic_rev(tp
) == ASIC_REV_5701
) {
16040 static struct tg3_dev_id
{
16043 } bridge_chipsets
[] = {
16044 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_PXH_0
},
16045 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_PXH_1
},
16048 struct tg3_dev_id
*pci_id
= &bridge_chipsets
[0];
16049 struct pci_dev
*bridge
= NULL
;
16051 while (pci_id
->vendor
!= 0) {
16052 bridge
= pci_get_device(pci_id
->vendor
,
16059 if (bridge
->subordinate
&&
16060 (bridge
->subordinate
->number
<=
16061 tp
->pdev
->bus
->number
) &&
16062 (bridge
->subordinate
->busn_res
.end
>=
16063 tp
->pdev
->bus
->number
)) {
16064 tg3_flag_set(tp
, 5701_DMA_BUG
);
16065 pci_dev_put(bridge
);
16071 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
16072 * DMA addresses > 40-bit. This bridge may have other additional
16073 * 57xx devices behind it in some 4-port NIC designs for example.
16074 * Any tg3 device found behind the bridge will also need the 40-bit
16077 if (tg3_flag(tp
, 5780_CLASS
)) {
16078 tg3_flag_set(tp
, 40BIT_DMA_BUG
);
16079 tp
->msi_cap
= tp
->pdev
->msi_cap
;
16081 struct pci_dev
*bridge
= NULL
;
16084 bridge
= pci_get_device(PCI_VENDOR_ID_SERVERWORKS
,
16085 PCI_DEVICE_ID_SERVERWORKS_EPB
,
16087 if (bridge
&& bridge
->subordinate
&&
16088 (bridge
->subordinate
->number
<=
16089 tp
->pdev
->bus
->number
) &&
16090 (bridge
->subordinate
->busn_res
.end
>=
16091 tp
->pdev
->bus
->number
)) {
16092 tg3_flag_set(tp
, 40BIT_DMA_BUG
);
16093 pci_dev_put(bridge
);
16099 if (tg3_asic_rev(tp
) == ASIC_REV_5704
||
16100 tg3_asic_rev(tp
) == ASIC_REV_5714
)
16101 tp
->pdev_peer
= tg3_find_peer(tp
);
16103 /* Determine TSO capabilities */
16104 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5719_A0
)
16105 ; /* Do nothing. HW bug. */
16106 else if (tg3_flag(tp
, 57765_PLUS
))
16107 tg3_flag_set(tp
, HW_TSO_3
);
16108 else if (tg3_flag(tp
, 5755_PLUS
) ||
16109 tg3_asic_rev(tp
) == ASIC_REV_5906
)
16110 tg3_flag_set(tp
, HW_TSO_2
);
16111 else if (tg3_flag(tp
, 5750_PLUS
)) {
16112 tg3_flag_set(tp
, HW_TSO_1
);
16113 tg3_flag_set(tp
, TSO_BUG
);
16114 if (tg3_asic_rev(tp
) == ASIC_REV_5750
&&
16115 tg3_chip_rev_id(tp
) >= CHIPREV_ID_5750_C2
)
16116 tg3_flag_clear(tp
, TSO_BUG
);
16117 } else if (tg3_asic_rev(tp
) != ASIC_REV_5700
&&
16118 tg3_asic_rev(tp
) != ASIC_REV_5701
&&
16119 tg3_chip_rev_id(tp
) != CHIPREV_ID_5705_A0
) {
16120 tg3_flag_set(tp
, FW_TSO
);
16121 tg3_flag_set(tp
, TSO_BUG
);
16122 if (tg3_asic_rev(tp
) == ASIC_REV_5705
)
16123 tp
->fw_needed
= FIRMWARE_TG3TSO5
;
16125 tp
->fw_needed
= FIRMWARE_TG3TSO
;
16128 /* Selectively allow TSO based on operating conditions */
16129 if (tg3_flag(tp
, HW_TSO_1
) ||
16130 tg3_flag(tp
, HW_TSO_2
) ||
16131 tg3_flag(tp
, HW_TSO_3
) ||
16132 tg3_flag(tp
, FW_TSO
)) {
16133 /* For firmware TSO, assume ASF is disabled.
16134 * We'll disable TSO later if we discover ASF
16135 * is enabled in tg3_get_eeprom_hw_cfg().
16137 tg3_flag_set(tp
, TSO_CAPABLE
);
16139 tg3_flag_clear(tp
, TSO_CAPABLE
);
16140 tg3_flag_clear(tp
, TSO_BUG
);
16141 tp
->fw_needed
= NULL
;
16144 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
)
16145 tp
->fw_needed
= FIRMWARE_TG3
;
16147 if (tg3_asic_rev(tp
) == ASIC_REV_57766
)
16148 tp
->fw_needed
= FIRMWARE_TG357766
;
16152 if (tg3_flag(tp
, 5750_PLUS
)) {
16153 tg3_flag_set(tp
, SUPPORT_MSI
);
16154 if (tg3_chip_rev(tp
) == CHIPREV_5750_AX
||
16155 tg3_chip_rev(tp
) == CHIPREV_5750_BX
||
16156 (tg3_asic_rev(tp
) == ASIC_REV_5714
&&
16157 tg3_chip_rev_id(tp
) <= CHIPREV_ID_5714_A2
&&
16158 tp
->pdev_peer
== tp
->pdev
))
16159 tg3_flag_clear(tp
, SUPPORT_MSI
);
16161 if (tg3_flag(tp
, 5755_PLUS
) ||
16162 tg3_asic_rev(tp
) == ASIC_REV_5906
) {
16163 tg3_flag_set(tp
, 1SHOT_MSI
);
16166 if (tg3_flag(tp
, 57765_PLUS
)) {
16167 tg3_flag_set(tp
, SUPPORT_MSIX
);
16168 tp
->irq_max
= TG3_IRQ_MAX_VECS
;
16174 if (tp
->irq_max
> 1) {
16175 tp
->rxq_max
= TG3_RSS_MAX_NUM_QS
;
16176 tg3_rss_init_dflt_indir_tbl(tp
, TG3_RSS_MAX_NUM_QS
);
16178 if (tg3_asic_rev(tp
) == ASIC_REV_5719
||
16179 tg3_asic_rev(tp
) == ASIC_REV_5720
)
16180 tp
->txq_max
= tp
->irq_max
- 1;
16183 if (tg3_flag(tp
, 5755_PLUS
) ||
16184 tg3_asic_rev(tp
) == ASIC_REV_5906
)
16185 tg3_flag_set(tp
, SHORT_DMA_BUG
);
16187 if (tg3_asic_rev(tp
) == ASIC_REV_5719
)
16188 tp
->dma_limit
= TG3_TX_BD_DMA_MAX_4K
;
16190 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
16191 tg3_asic_rev(tp
) == ASIC_REV_5719
||
16192 tg3_asic_rev(tp
) == ASIC_REV_5720
||
16193 tg3_asic_rev(tp
) == ASIC_REV_5762
)
16194 tg3_flag_set(tp
, LRG_PROD_RING_CAP
);
16196 if (tg3_flag(tp
, 57765_PLUS
) &&
16197 tg3_chip_rev_id(tp
) != CHIPREV_ID_5719_A0
)
16198 tg3_flag_set(tp
, USE_JUMBO_BDFLAG
);
16200 if (!tg3_flag(tp
, 5705_PLUS
) ||
16201 tg3_flag(tp
, 5780_CLASS
) ||
16202 tg3_flag(tp
, USE_JUMBO_BDFLAG
))
16203 tg3_flag_set(tp
, JUMBO_CAPABLE
);
16205 pci_read_config_dword(tp
->pdev
, TG3PCI_PCISTATE
,
16208 if (pci_is_pcie(tp
->pdev
)) {
16211 tg3_flag_set(tp
, PCI_EXPRESS
);
16213 pcie_capability_read_word(tp
->pdev
, PCI_EXP_LNKCTL
, &lnkctl
);
16214 if (lnkctl
& PCI_EXP_LNKCTL_CLKREQ_EN
) {
16215 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
16216 tg3_flag_clear(tp
, HW_TSO_2
);
16217 tg3_flag_clear(tp
, TSO_CAPABLE
);
16219 if (tg3_asic_rev(tp
) == ASIC_REV_5784
||
16220 tg3_asic_rev(tp
) == ASIC_REV_5761
||
16221 tg3_chip_rev_id(tp
) == CHIPREV_ID_57780_A0
||
16222 tg3_chip_rev_id(tp
) == CHIPREV_ID_57780_A1
)
16223 tg3_flag_set(tp
, CLKREQ_BUG
);
16224 } else if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5717_A0
) {
16225 tg3_flag_set(tp
, L1PLLPD_EN
);
16227 } else if (tg3_asic_rev(tp
) == ASIC_REV_5785
) {
16228 /* BCM5785 devices are effectively PCIe devices, and should
16229 * follow PCIe codepaths, but do not have a PCIe capabilities
16232 tg3_flag_set(tp
, PCI_EXPRESS
);
16233 } else if (!tg3_flag(tp
, 5705_PLUS
) ||
16234 tg3_flag(tp
, 5780_CLASS
)) {
16235 tp
->pcix_cap
= pci_find_capability(tp
->pdev
, PCI_CAP_ID_PCIX
);
16236 if (!tp
->pcix_cap
) {
16237 dev_err(&tp
->pdev
->dev
,
16238 "Cannot find PCI-X capability, aborting\n");
16242 if (!(pci_state_reg
& PCISTATE_CONV_PCI_MODE
))
16243 tg3_flag_set(tp
, PCIX_MODE
);
16246 /* If we have an AMD 762 or VIA K8T800 chipset, write
16247 * reordering to the mailbox registers done by the host
16248 * controller can cause major troubles. We read back from
16249 * every mailbox register write to force the writes to be
16250 * posted to the chip in order.
16252 if (pci_dev_present(tg3_write_reorder_chipsets
) &&
16253 !tg3_flag(tp
, PCI_EXPRESS
))
16254 tg3_flag_set(tp
, MBOX_WRITE_REORDER
);
16256 pci_read_config_byte(tp
->pdev
, PCI_CACHE_LINE_SIZE
,
16257 &tp
->pci_cacheline_sz
);
16258 pci_read_config_byte(tp
->pdev
, PCI_LATENCY_TIMER
,
16259 &tp
->pci_lat_timer
);
16260 if (tg3_asic_rev(tp
) == ASIC_REV_5703
&&
16261 tp
->pci_lat_timer
< 64) {
16262 tp
->pci_lat_timer
= 64;
16263 pci_write_config_byte(tp
->pdev
, PCI_LATENCY_TIMER
,
16264 tp
->pci_lat_timer
);
16267 /* Important! -- It is critical that the PCI-X hw workaround
16268 * situation is decided before the first MMIO register access.
16270 if (tg3_chip_rev(tp
) == CHIPREV_5700_BX
) {
16271 /* 5700 BX chips need to have their TX producer index
16272 * mailboxes written twice to workaround a bug.
16274 tg3_flag_set(tp
, TXD_MBOX_HWBUG
);
16276 /* If we are in PCI-X mode, enable register write workaround.
16278 * The workaround is to use indirect register accesses
16279 * for all chip writes not to mailbox registers.
16281 if (tg3_flag(tp
, PCIX_MODE
)) {
16284 tg3_flag_set(tp
, PCIX_TARGET_HWBUG
);
16286 /* The chip can have it's power management PCI config
16287 * space registers clobbered due to this bug.
16288 * So explicitly force the chip into D0 here.
16290 pci_read_config_dword(tp
->pdev
,
16291 tp
->pdev
->pm_cap
+ PCI_PM_CTRL
,
16293 pm_reg
&= ~PCI_PM_CTRL_STATE_MASK
;
16294 pm_reg
|= PCI_PM_CTRL_PME_ENABLE
| 0 /* D0 */;
16295 pci_write_config_dword(tp
->pdev
,
16296 tp
->pdev
->pm_cap
+ PCI_PM_CTRL
,
16299 /* Also, force SERR#/PERR# in PCI command. */
16300 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
16301 pci_cmd
|= PCI_COMMAND_PARITY
| PCI_COMMAND_SERR
;
16302 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
16306 if ((pci_state_reg
& PCISTATE_BUS_SPEED_HIGH
) != 0)
16307 tg3_flag_set(tp
, PCI_HIGH_SPEED
);
16308 if ((pci_state_reg
& PCISTATE_BUS_32BIT
) != 0)
16309 tg3_flag_set(tp
, PCI_32BIT
);
16311 /* Chip-specific fixup from Broadcom driver */
16312 if ((tg3_chip_rev_id(tp
) == CHIPREV_ID_5704_A0
) &&
16313 (!(pci_state_reg
& PCISTATE_RETRY_SAME_DMA
))) {
16314 pci_state_reg
|= PCISTATE_RETRY_SAME_DMA
;
16315 pci_write_config_dword(tp
->pdev
, TG3PCI_PCISTATE
, pci_state_reg
);
16318 /* Default fast path register access methods */
16319 tp
->read32
= tg3_read32
;
16320 tp
->write32
= tg3_write32
;
16321 tp
->read32_mbox
= tg3_read32
;
16322 tp
->write32_mbox
= tg3_write32
;
16323 tp
->write32_tx_mbox
= tg3_write32
;
16324 tp
->write32_rx_mbox
= tg3_write32
;
16326 /* Various workaround register access methods */
16327 if (tg3_flag(tp
, PCIX_TARGET_HWBUG
))
16328 tp
->write32
= tg3_write_indirect_reg32
;
16329 else if (tg3_asic_rev(tp
) == ASIC_REV_5701
||
16330 (tg3_flag(tp
, PCI_EXPRESS
) &&
16331 tg3_chip_rev_id(tp
) == CHIPREV_ID_5750_A0
)) {
16333 * Back to back register writes can cause problems on these
16334 * chips, the workaround is to read back all reg writes
16335 * except those to mailbox regs.
16337 * See tg3_write_indirect_reg32().
16339 tp
->write32
= tg3_write_flush_reg32
;
16342 if (tg3_flag(tp
, TXD_MBOX_HWBUG
) || tg3_flag(tp
, MBOX_WRITE_REORDER
)) {
16343 tp
->write32_tx_mbox
= tg3_write32_tx_mbox
;
16344 if (tg3_flag(tp
, MBOX_WRITE_REORDER
))
16345 tp
->write32_rx_mbox
= tg3_write_flush_reg32
;
16348 if (tg3_flag(tp
, ICH_WORKAROUND
)) {
16349 tp
->read32
= tg3_read_indirect_reg32
;
16350 tp
->write32
= tg3_write_indirect_reg32
;
16351 tp
->read32_mbox
= tg3_read_indirect_mbox
;
16352 tp
->write32_mbox
= tg3_write_indirect_mbox
;
16353 tp
->write32_tx_mbox
= tg3_write_indirect_mbox
;
16354 tp
->write32_rx_mbox
= tg3_write_indirect_mbox
;
16359 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
16360 pci_cmd
&= ~PCI_COMMAND_MEMORY
;
16361 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
16363 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
16364 tp
->read32_mbox
= tg3_read32_mbox_5906
;
16365 tp
->write32_mbox
= tg3_write32_mbox_5906
;
16366 tp
->write32_tx_mbox
= tg3_write32_mbox_5906
;
16367 tp
->write32_rx_mbox
= tg3_write32_mbox_5906
;
16370 if (tp
->write32
== tg3_write_indirect_reg32
||
16371 (tg3_flag(tp
, PCIX_MODE
) &&
16372 (tg3_asic_rev(tp
) == ASIC_REV_5700
||
16373 tg3_asic_rev(tp
) == ASIC_REV_5701
)))
16374 tg3_flag_set(tp
, SRAM_USE_CONFIG
);
16376 /* The memory arbiter has to be enabled in order for SRAM accesses
16377 * to succeed. Normally on powerup the tg3 chip firmware will make
16378 * sure it is enabled, but other entities such as system netboot
16379 * code might disable it.
16381 val
= tr32(MEMARB_MODE
);
16382 tw32(MEMARB_MODE
, val
| MEMARB_MODE_ENABLE
);
16384 tp
->pci_fn
= PCI_FUNC(tp
->pdev
->devfn
) & 3;
16385 if (tg3_asic_rev(tp
) == ASIC_REV_5704
||
16386 tg3_flag(tp
, 5780_CLASS
)) {
16387 if (tg3_flag(tp
, PCIX_MODE
)) {
16388 pci_read_config_dword(tp
->pdev
,
16389 tp
->pcix_cap
+ PCI_X_STATUS
,
16391 tp
->pci_fn
= val
& 0x7;
16393 } else if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
16394 tg3_asic_rev(tp
) == ASIC_REV_5719
||
16395 tg3_asic_rev(tp
) == ASIC_REV_5720
) {
16396 tg3_read_mem(tp
, NIC_SRAM_CPMU_STATUS
, &val
);
16397 if ((val
& NIC_SRAM_CPMUSTAT_SIG_MSK
) != NIC_SRAM_CPMUSTAT_SIG
)
16398 val
= tr32(TG3_CPMU_STATUS
);
16400 if (tg3_asic_rev(tp
) == ASIC_REV_5717
)
16401 tp
->pci_fn
= (val
& TG3_CPMU_STATUS_FMSK_5717
) ? 1 : 0;
16403 tp
->pci_fn
= (val
& TG3_CPMU_STATUS_FMSK_5719
) >>
16404 TG3_CPMU_STATUS_FSHFT_5719
;
16407 if (tg3_flag(tp
, FLUSH_POSTED_WRITES
)) {
16408 tp
->write32_tx_mbox
= tg3_write_flush_reg32
;
16409 tp
->write32_rx_mbox
= tg3_write_flush_reg32
;
16412 /* Get eeprom hw config before calling tg3_set_power_state().
16413 * In particular, the TG3_FLAG_IS_NIC flag must be
16414 * determined before calling tg3_set_power_state() so that
16415 * we know whether or not to switch out of Vaux power.
16416 * When the flag is set, it means that GPIO1 is used for eeprom
16417 * write protect and also implies that it is a LOM where GPIOs
16418 * are not used to switch power.
16420 tg3_get_eeprom_hw_cfg(tp
);
16422 if (tg3_flag(tp
, FW_TSO
) && tg3_flag(tp
, ENABLE_ASF
)) {
16423 tg3_flag_clear(tp
, TSO_CAPABLE
);
16424 tg3_flag_clear(tp
, TSO_BUG
);
16425 tp
->fw_needed
= NULL
;
16428 if (tg3_flag(tp
, ENABLE_APE
)) {
16429 /* Allow reads and writes to the
16430 * APE register and memory space.
16432 pci_state_reg
|= PCISTATE_ALLOW_APE_CTLSPC_WR
|
16433 PCISTATE_ALLOW_APE_SHMEM_WR
|
16434 PCISTATE_ALLOW_APE_PSPACE_WR
;
16435 pci_write_config_dword(tp
->pdev
, TG3PCI_PCISTATE
,
16438 tg3_ape_lock_init(tp
);
16441 /* Set up tp->grc_local_ctrl before calling
16442 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
16443 * will bring 5700's external PHY out of reset.
16444 * It is also used as eeprom write protect on LOMs.
16446 tp
->grc_local_ctrl
= GRC_LCLCTRL_INT_ON_ATTN
| GRC_LCLCTRL_AUTO_SEEPROM
;
16447 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
16448 tg3_flag(tp
, EEPROM_WRITE_PROT
))
16449 tp
->grc_local_ctrl
|= (GRC_LCLCTRL_GPIO_OE1
|
16450 GRC_LCLCTRL_GPIO_OUTPUT1
);
16451 /* Unused GPIO3 must be driven as output on 5752 because there
16452 * are no pull-up resistors on unused GPIO pins.
16454 else if (tg3_asic_rev(tp
) == ASIC_REV_5752
)
16455 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE3
;
16457 if (tg3_asic_rev(tp
) == ASIC_REV_5755
||
16458 tg3_asic_rev(tp
) == ASIC_REV_57780
||
16459 tg3_flag(tp
, 57765_CLASS
))
16460 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_UART_SEL
;
16462 if (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5761
||
16463 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5761S
) {
16464 /* Turn off the debug UART. */
16465 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_UART_SEL
;
16466 if (tg3_flag(tp
, IS_NIC
))
16467 /* Keep VMain power. */
16468 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE0
|
16469 GRC_LCLCTRL_GPIO_OUTPUT0
;
16472 if (tg3_asic_rev(tp
) == ASIC_REV_5762
)
16473 tp
->grc_local_ctrl
|=
16474 tr32(GRC_LOCAL_CTRL
) & GRC_LCLCTRL_GPIO_UART_SEL
;
16476 /* Switch out of Vaux if it is a NIC */
16477 tg3_pwrsrc_switch_to_vmain(tp
);
16479 /* Derive initial jumbo mode from MTU assigned in
16480 * ether_setup() via the alloc_etherdev() call
16482 if (tp
->dev
->mtu
> ETH_DATA_LEN
&& !tg3_flag(tp
, 5780_CLASS
))
16483 tg3_flag_set(tp
, JUMBO_RING_ENABLE
);
16485 /* Determine WakeOnLan speed to use. */
16486 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
16487 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
||
16488 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B0
||
16489 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B2
) {
16490 tg3_flag_clear(tp
, WOL_SPEED_100MB
);
16492 tg3_flag_set(tp
, WOL_SPEED_100MB
);
16495 if (tg3_asic_rev(tp
) == ASIC_REV_5906
)
16496 tp
->phy_flags
|= TG3_PHYFLG_IS_FET
;
16498 /* A few boards don't want Ethernet@WireSpeed phy feature */
16499 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
16500 (tg3_asic_rev(tp
) == ASIC_REV_5705
&&
16501 (tg3_chip_rev_id(tp
) != CHIPREV_ID_5705_A0
) &&
16502 (tg3_chip_rev_id(tp
) != CHIPREV_ID_5705_A1
)) ||
16503 (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) ||
16504 (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
16505 tp
->phy_flags
|= TG3_PHYFLG_NO_ETH_WIRE_SPEED
;
16507 if (tg3_chip_rev(tp
) == CHIPREV_5703_AX
||
16508 tg3_chip_rev(tp
) == CHIPREV_5704_AX
)
16509 tp
->phy_flags
|= TG3_PHYFLG_ADC_BUG
;
16510 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5704_A0
)
16511 tp
->phy_flags
|= TG3_PHYFLG_5704_A0_BUG
;
16513 if (tg3_flag(tp
, 5705_PLUS
) &&
16514 !(tp
->phy_flags
& TG3_PHYFLG_IS_FET
) &&
16515 tg3_asic_rev(tp
) != ASIC_REV_5785
&&
16516 tg3_asic_rev(tp
) != ASIC_REV_57780
&&
16517 !tg3_flag(tp
, 57765_PLUS
)) {
16518 if (tg3_asic_rev(tp
) == ASIC_REV_5755
||
16519 tg3_asic_rev(tp
) == ASIC_REV_5787
||
16520 tg3_asic_rev(tp
) == ASIC_REV_5784
||
16521 tg3_asic_rev(tp
) == ASIC_REV_5761
) {
16522 if (tp
->pdev
->device
!= PCI_DEVICE_ID_TIGON3_5756
&&
16523 tp
->pdev
->device
!= PCI_DEVICE_ID_TIGON3_5722
)
16524 tp
->phy_flags
|= TG3_PHYFLG_JITTER_BUG
;
16525 if (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5755M
)
16526 tp
->phy_flags
|= TG3_PHYFLG_ADJUST_TRIM
;
16528 tp
->phy_flags
|= TG3_PHYFLG_BER_BUG
;
16531 if (tg3_asic_rev(tp
) == ASIC_REV_5784
&&
16532 tg3_chip_rev(tp
) != CHIPREV_5784_AX
) {
16533 tp
->phy_otp
= tg3_read_otp_phycfg(tp
);
16534 if (tp
->phy_otp
== 0)
16535 tp
->phy_otp
= TG3_OTP_DEFAULT
;
16538 if (tg3_flag(tp
, CPMU_PRESENT
))
16539 tp
->mi_mode
= MAC_MI_MODE_500KHZ_CONST
;
16541 tp
->mi_mode
= MAC_MI_MODE_BASE
;
16543 tp
->coalesce_mode
= 0;
16544 if (tg3_chip_rev(tp
) != CHIPREV_5700_AX
&&
16545 tg3_chip_rev(tp
) != CHIPREV_5700_BX
)
16546 tp
->coalesce_mode
|= HOSTCC_MODE_32BYTE
;
16548 /* Set these bits to enable statistics workaround. */
16549 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
16550 tg3_asic_rev(tp
) == ASIC_REV_5762
||
16551 tg3_chip_rev_id(tp
) == CHIPREV_ID_5719_A0
||
16552 tg3_chip_rev_id(tp
) == CHIPREV_ID_5720_A0
) {
16553 tp
->coalesce_mode
|= HOSTCC_MODE_ATTN
;
16554 tp
->grc_mode
|= GRC_MODE_IRQ_ON_FLOW_ATTN
;
16557 if (tg3_asic_rev(tp
) == ASIC_REV_5785
||
16558 tg3_asic_rev(tp
) == ASIC_REV_57780
)
16559 tg3_flag_set(tp
, USE_PHYLIB
);
16561 err
= tg3_mdio_init(tp
);
16565 /* Initialize data/descriptor byte/word swapping. */
16566 val
= tr32(GRC_MODE
);
16567 if (tg3_asic_rev(tp
) == ASIC_REV_5720
||
16568 tg3_asic_rev(tp
) == ASIC_REV_5762
)
16569 val
&= (GRC_MODE_BYTE_SWAP_B2HRX_DATA
|
16570 GRC_MODE_WORD_SWAP_B2HRX_DATA
|
16571 GRC_MODE_B2HRX_ENABLE
|
16572 GRC_MODE_HTX2B_ENABLE
|
16573 GRC_MODE_HOST_STACKUP
);
16575 val
&= GRC_MODE_HOST_STACKUP
;
16577 tw32(GRC_MODE
, val
| tp
->grc_mode
);
16579 tg3_switch_clocks(tp
);
16581 /* Clear this out for sanity. */
16582 tw32(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
16584 /* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */
16585 tw32(TG3PCI_REG_BASE_ADDR
, 0);
16587 pci_read_config_dword(tp
->pdev
, TG3PCI_PCISTATE
,
16589 if ((pci_state_reg
& PCISTATE_CONV_PCI_MODE
) == 0 &&
16590 !tg3_flag(tp
, PCIX_TARGET_HWBUG
)) {
16591 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
||
16592 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B0
||
16593 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B2
||
16594 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B5
) {
16595 void __iomem
*sram_base
;
16597 /* Write some dummy words into the SRAM status block
16598 * area, see if it reads back correctly. If the return
16599 * value is bad, force enable the PCIX workaround.
16601 sram_base
= tp
->regs
+ NIC_SRAM_WIN_BASE
+ NIC_SRAM_STATS_BLK
;
16603 writel(0x00000000, sram_base
);
16604 writel(0x00000000, sram_base
+ 4);
16605 writel(0xffffffff, sram_base
+ 4);
16606 if (readl(sram_base
) != 0x00000000)
16607 tg3_flag_set(tp
, PCIX_TARGET_HWBUG
);
16612 tg3_nvram_init(tp
);
16614 /* If the device has an NVRAM, no need to load patch firmware */
16615 if (tg3_asic_rev(tp
) == ASIC_REV_57766
&&
16616 !tg3_flag(tp
, NO_NVRAM
))
16617 tp
->fw_needed
= NULL
;
16619 grc_misc_cfg
= tr32(GRC_MISC_CFG
);
16620 grc_misc_cfg
&= GRC_MISC_CFG_BOARD_ID_MASK
;
16622 if (tg3_asic_rev(tp
) == ASIC_REV_5705
&&
16623 (grc_misc_cfg
== GRC_MISC_CFG_BOARD_ID_5788
||
16624 grc_misc_cfg
== GRC_MISC_CFG_BOARD_ID_5788M
))
16625 tg3_flag_set(tp
, IS_5788
);
16627 if (!tg3_flag(tp
, IS_5788
) &&
16628 tg3_asic_rev(tp
) != ASIC_REV_5700
)
16629 tg3_flag_set(tp
, TAGGED_STATUS
);
16630 if (tg3_flag(tp
, TAGGED_STATUS
)) {
16631 tp
->coalesce_mode
|= (HOSTCC_MODE_CLRTICK_RXBD
|
16632 HOSTCC_MODE_CLRTICK_TXBD
);
16634 tp
->misc_host_ctrl
|= MISC_HOST_CTRL_TAGGED_STATUS
;
16635 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
16636 tp
->misc_host_ctrl
);
16639 /* Preserve the APE MAC_MODE bits */
16640 if (tg3_flag(tp
, ENABLE_APE
))
16641 tp
->mac_mode
= MAC_MODE_APE_TX_EN
| MAC_MODE_APE_RX_EN
;
16645 if (tg3_10_100_only_device(tp
, ent
))
16646 tp
->phy_flags
|= TG3_PHYFLG_10_100_ONLY
;
16648 err
= tg3_phy_probe(tp
);
16650 dev_err(&tp
->pdev
->dev
, "phy probe failed, err %d\n", err
);
16651 /* ... but do not return immediately ... */
16656 tg3_read_fw_ver(tp
);
16658 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
16659 tp
->phy_flags
&= ~TG3_PHYFLG_USE_MI_INTERRUPT
;
16661 if (tg3_asic_rev(tp
) == ASIC_REV_5700
)
16662 tp
->phy_flags
|= TG3_PHYFLG_USE_MI_INTERRUPT
;
16664 tp
->phy_flags
&= ~TG3_PHYFLG_USE_MI_INTERRUPT
;
16667 /* 5700 {AX,BX} chips have a broken status block link
16668 * change bit implementation, so we must use the
16669 * status register in those cases.
16671 if (tg3_asic_rev(tp
) == ASIC_REV_5700
)
16672 tg3_flag_set(tp
, USE_LINKCHG_REG
);
16674 tg3_flag_clear(tp
, USE_LINKCHG_REG
);
16676 /* The led_ctrl is set during tg3_phy_probe, here we might
16677 * have to force the link status polling mechanism based
16678 * upon subsystem IDs.
16680 if (tp
->pdev
->subsystem_vendor
== PCI_VENDOR_ID_DELL
&&
16681 tg3_asic_rev(tp
) == ASIC_REV_5701
&&
16682 !(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)) {
16683 tp
->phy_flags
|= TG3_PHYFLG_USE_MI_INTERRUPT
;
16684 tg3_flag_set(tp
, USE_LINKCHG_REG
);
16687 /* For all SERDES we poll the MAC status register. */
16688 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
16689 tg3_flag_set(tp
, POLL_SERDES
);
16691 tg3_flag_clear(tp
, POLL_SERDES
);
16693 tp
->rx_offset
= NET_SKB_PAD
+ NET_IP_ALIGN
;
16694 tp
->rx_copy_thresh
= TG3_RX_COPY_THRESHOLD
;
16695 if (tg3_asic_rev(tp
) == ASIC_REV_5701
&&
16696 tg3_flag(tp
, PCIX_MODE
)) {
16697 tp
->rx_offset
= NET_SKB_PAD
;
16698 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
16699 tp
->rx_copy_thresh
= ~(u16
)0;
16703 tp
->rx_std_ring_mask
= TG3_RX_STD_RING_SIZE(tp
) - 1;
16704 tp
->rx_jmb_ring_mask
= TG3_RX_JMB_RING_SIZE(tp
) - 1;
16705 tp
->rx_ret_ring_mask
= tg3_rx_ret_ring_size(tp
) - 1;
16707 tp
->rx_std_max_post
= tp
->rx_std_ring_mask
+ 1;
16709 /* Increment the rx prod index on the rx std ring by at most
16710 * 8 for these chips to workaround hw errata.
16712 if (tg3_asic_rev(tp
) == ASIC_REV_5750
||
16713 tg3_asic_rev(tp
) == ASIC_REV_5752
||
16714 tg3_asic_rev(tp
) == ASIC_REV_5755
)
16715 tp
->rx_std_max_post
= 8;
16717 if (tg3_flag(tp
, ASPM_WORKAROUND
))
16718 tp
->pwrmgmt_thresh
= tr32(PCIE_PWR_MGMT_THRESH
) &
16719 PCIE_PWR_MGMT_L1_THRESH_MSK
;
16724 #ifdef CONFIG_SPARC
16725 static int tg3_get_macaddr_sparc(struct tg3
*tp
)
16727 struct net_device
*dev
= tp
->dev
;
16728 struct pci_dev
*pdev
= tp
->pdev
;
16729 struct device_node
*dp
= pci_device_to_OF_node(pdev
);
16730 const unsigned char *addr
;
16733 addr
= of_get_property(dp
, "local-mac-address", &len
);
16734 if (addr
&& len
== ETH_ALEN
) {
16735 memcpy(dev
->dev_addr
, addr
, ETH_ALEN
);
16741 static int tg3_get_default_macaddr_sparc(struct tg3
*tp
)
16743 struct net_device
*dev
= tp
->dev
;
16745 memcpy(dev
->dev_addr
, idprom
->id_ethaddr
, ETH_ALEN
);
16750 static int tg3_get_device_address(struct tg3
*tp
)
16752 struct net_device
*dev
= tp
->dev
;
16753 u32 hi
, lo
, mac_offset
;
16757 #ifdef CONFIG_SPARC
16758 if (!tg3_get_macaddr_sparc(tp
))
16762 if (tg3_flag(tp
, IS_SSB_CORE
)) {
16763 err
= ssb_gige_get_macaddr(tp
->pdev
, &dev
->dev_addr
[0]);
16764 if (!err
&& is_valid_ether_addr(&dev
->dev_addr
[0]))
16769 if (tg3_asic_rev(tp
) == ASIC_REV_5704
||
16770 tg3_flag(tp
, 5780_CLASS
)) {
16771 if (tr32(TG3PCI_DUAL_MAC_CTRL
) & DUAL_MAC_CTRL_ID
)
16773 if (tg3_nvram_lock(tp
))
16774 tw32_f(NVRAM_CMD
, NVRAM_CMD_RESET
);
16776 tg3_nvram_unlock(tp
);
16777 } else if (tg3_flag(tp
, 5717_PLUS
)) {
16778 if (tp
->pci_fn
& 1)
16780 if (tp
->pci_fn
> 1)
16781 mac_offset
+= 0x18c;
16782 } else if (tg3_asic_rev(tp
) == ASIC_REV_5906
)
16785 /* First try to get it from MAC address mailbox. */
16786 tg3_read_mem(tp
, NIC_SRAM_MAC_ADDR_HIGH_MBOX
, &hi
);
16787 if ((hi
>> 16) == 0x484b) {
16788 dev
->dev_addr
[0] = (hi
>> 8) & 0xff;
16789 dev
->dev_addr
[1] = (hi
>> 0) & 0xff;
16791 tg3_read_mem(tp
, NIC_SRAM_MAC_ADDR_LOW_MBOX
, &lo
);
16792 dev
->dev_addr
[2] = (lo
>> 24) & 0xff;
16793 dev
->dev_addr
[3] = (lo
>> 16) & 0xff;
16794 dev
->dev_addr
[4] = (lo
>> 8) & 0xff;
16795 dev
->dev_addr
[5] = (lo
>> 0) & 0xff;
16797 /* Some old bootcode may report a 0 MAC address in SRAM */
16798 addr_ok
= is_valid_ether_addr(&dev
->dev_addr
[0]);
16801 /* Next, try NVRAM. */
16802 if (!tg3_flag(tp
, NO_NVRAM
) &&
16803 !tg3_nvram_read_be32(tp
, mac_offset
+ 0, &hi
) &&
16804 !tg3_nvram_read_be32(tp
, mac_offset
+ 4, &lo
)) {
16805 memcpy(&dev
->dev_addr
[0], ((char *)&hi
) + 2, 2);
16806 memcpy(&dev
->dev_addr
[2], (char *)&lo
, sizeof(lo
));
16808 /* Finally just fetch it out of the MAC control regs. */
16810 hi
= tr32(MAC_ADDR_0_HIGH
);
16811 lo
= tr32(MAC_ADDR_0_LOW
);
16813 dev
->dev_addr
[5] = lo
& 0xff;
16814 dev
->dev_addr
[4] = (lo
>> 8) & 0xff;
16815 dev
->dev_addr
[3] = (lo
>> 16) & 0xff;
16816 dev
->dev_addr
[2] = (lo
>> 24) & 0xff;
16817 dev
->dev_addr
[1] = hi
& 0xff;
16818 dev
->dev_addr
[0] = (hi
>> 8) & 0xff;
16822 if (!is_valid_ether_addr(&dev
->dev_addr
[0])) {
16823 #ifdef CONFIG_SPARC
16824 if (!tg3_get_default_macaddr_sparc(tp
))
16832 #define BOUNDARY_SINGLE_CACHELINE 1
16833 #define BOUNDARY_MULTI_CACHELINE 2
16835 static u32
tg3_calc_dma_bndry(struct tg3
*tp
, u32 val
)
16837 int cacheline_size
;
16841 pci_read_config_byte(tp
->pdev
, PCI_CACHE_LINE_SIZE
, &byte
);
16843 cacheline_size
= 1024;
16845 cacheline_size
= (int) byte
* 4;
16847 /* On 5703 and later chips, the boundary bits have no
16850 if (tg3_asic_rev(tp
) != ASIC_REV_5700
&&
16851 tg3_asic_rev(tp
) != ASIC_REV_5701
&&
16852 !tg3_flag(tp
, PCI_EXPRESS
))
16855 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
16856 goal
= BOUNDARY_MULTI_CACHELINE
;
16858 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
16859 goal
= BOUNDARY_SINGLE_CACHELINE
;
16865 if (tg3_flag(tp
, 57765_PLUS
)) {
16866 val
= goal
? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT
;
16873 /* PCI controllers on most RISC systems tend to disconnect
16874 * when a device tries to burst across a cache-line boundary.
16875 * Therefore, letting tg3 do so just wastes PCI bandwidth.
16877 * Unfortunately, for PCI-E there are only limited
16878 * write-side controls for this, and thus for reads
16879 * we will still get the disconnects. We'll also waste
16880 * these PCI cycles for both read and write for chips
16881 * other than 5700 and 5701 which do not implement the
16884 if (tg3_flag(tp
, PCIX_MODE
) && !tg3_flag(tp
, PCI_EXPRESS
)) {
16885 switch (cacheline_size
) {
16890 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
16891 val
|= (DMA_RWCTRL_READ_BNDRY_128_PCIX
|
16892 DMA_RWCTRL_WRITE_BNDRY_128_PCIX
);
16894 val
|= (DMA_RWCTRL_READ_BNDRY_384_PCIX
|
16895 DMA_RWCTRL_WRITE_BNDRY_384_PCIX
);
16900 val
|= (DMA_RWCTRL_READ_BNDRY_256_PCIX
|
16901 DMA_RWCTRL_WRITE_BNDRY_256_PCIX
);
16905 val
|= (DMA_RWCTRL_READ_BNDRY_384_PCIX
|
16906 DMA_RWCTRL_WRITE_BNDRY_384_PCIX
);
16909 } else if (tg3_flag(tp
, PCI_EXPRESS
)) {
16910 switch (cacheline_size
) {
16914 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
16915 val
&= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE
;
16916 val
|= DMA_RWCTRL_WRITE_BNDRY_64_PCIE
;
16922 val
&= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE
;
16923 val
|= DMA_RWCTRL_WRITE_BNDRY_128_PCIE
;
16927 switch (cacheline_size
) {
16929 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
16930 val
|= (DMA_RWCTRL_READ_BNDRY_16
|
16931 DMA_RWCTRL_WRITE_BNDRY_16
);
16936 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
16937 val
|= (DMA_RWCTRL_READ_BNDRY_32
|
16938 DMA_RWCTRL_WRITE_BNDRY_32
);
16943 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
16944 val
|= (DMA_RWCTRL_READ_BNDRY_64
|
16945 DMA_RWCTRL_WRITE_BNDRY_64
);
16950 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
16951 val
|= (DMA_RWCTRL_READ_BNDRY_128
|
16952 DMA_RWCTRL_WRITE_BNDRY_128
);
16957 val
|= (DMA_RWCTRL_READ_BNDRY_256
|
16958 DMA_RWCTRL_WRITE_BNDRY_256
);
16961 val
|= (DMA_RWCTRL_READ_BNDRY_512
|
16962 DMA_RWCTRL_WRITE_BNDRY_512
);
16966 val
|= (DMA_RWCTRL_READ_BNDRY_1024
|
16967 DMA_RWCTRL_WRITE_BNDRY_1024
);
16976 static int tg3_do_test_dma(struct tg3
*tp
, u32
*buf
, dma_addr_t buf_dma
,
16977 int size
, bool to_device
)
16979 struct tg3_internal_buffer_desc test_desc
;
16980 u32 sram_dma_descs
;
16983 sram_dma_descs
= NIC_SRAM_DMA_DESC_POOL_BASE
;
16985 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ
, 0);
16986 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ
, 0);
16987 tw32(RDMAC_STATUS
, 0);
16988 tw32(WDMAC_STATUS
, 0);
16990 tw32(BUFMGR_MODE
, 0);
16991 tw32(FTQ_RESET
, 0);
16993 test_desc
.addr_hi
= ((u64
) buf_dma
) >> 32;
16994 test_desc
.addr_lo
= buf_dma
& 0xffffffff;
16995 test_desc
.nic_mbuf
= 0x00002100;
16996 test_desc
.len
= size
;
16999 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
17000 * the *second* time the tg3 driver was getting loaded after an
17003 * Broadcom tells me:
17004 * ...the DMA engine is connected to the GRC block and a DMA
17005 * reset may affect the GRC block in some unpredictable way...
17006 * The behavior of resets to individual blocks has not been tested.
17008 * Broadcom noted the GRC reset will also reset all sub-components.
17011 test_desc
.cqid_sqid
= (13 << 8) | 2;
17013 tw32_f(RDMAC_MODE
, RDMAC_MODE_ENABLE
);
17016 test_desc
.cqid_sqid
= (16 << 8) | 7;
17018 tw32_f(WDMAC_MODE
, WDMAC_MODE_ENABLE
);
17021 test_desc
.flags
= 0x00000005;
17023 for (i
= 0; i
< (sizeof(test_desc
) / sizeof(u32
)); i
++) {
17026 val
= *(((u32
*)&test_desc
) + i
);
17027 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
,
17028 sram_dma_descs
+ (i
* sizeof(u32
)));
17029 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_DATA
, val
);
17031 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, 0);
17034 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ
, sram_dma_descs
);
17036 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ
, sram_dma_descs
);
17039 for (i
= 0; i
< 40; i
++) {
17043 val
= tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ
);
17045 val
= tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ
);
17046 if ((val
& 0xffff) == sram_dma_descs
) {
17057 #define TEST_BUFFER_SIZE 0x2000
17059 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets
) = {
17060 { PCI_DEVICE(PCI_VENDOR_ID_APPLE
, PCI_DEVICE_ID_APPLE_UNI_N_PCI15
) },
17064 static int tg3_test_dma(struct tg3
*tp
)
17066 dma_addr_t buf_dma
;
17067 u32
*buf
, saved_dma_rwctrl
;
17070 buf
= dma_alloc_coherent(&tp
->pdev
->dev
, TEST_BUFFER_SIZE
,
17071 &buf_dma
, GFP_KERNEL
);
17077 tp
->dma_rwctrl
= ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT
) |
17078 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT
));
17080 tp
->dma_rwctrl
= tg3_calc_dma_bndry(tp
, tp
->dma_rwctrl
);
17082 if (tg3_flag(tp
, 57765_PLUS
))
17085 if (tg3_flag(tp
, PCI_EXPRESS
)) {
17086 /* DMA read watermark not used on PCIE */
17087 tp
->dma_rwctrl
|= 0x00180000;
17088 } else if (!tg3_flag(tp
, PCIX_MODE
)) {
17089 if (tg3_asic_rev(tp
) == ASIC_REV_5705
||
17090 tg3_asic_rev(tp
) == ASIC_REV_5750
)
17091 tp
->dma_rwctrl
|= 0x003f0000;
17093 tp
->dma_rwctrl
|= 0x003f000f;
17095 if (tg3_asic_rev(tp
) == ASIC_REV_5703
||
17096 tg3_asic_rev(tp
) == ASIC_REV_5704
) {
17097 u32 ccval
= (tr32(TG3PCI_CLOCK_CTRL
) & 0x1f);
17098 u32 read_water
= 0x7;
17100 /* If the 5704 is behind the EPB bridge, we can
17101 * do the less restrictive ONE_DMA workaround for
17102 * better performance.
17104 if (tg3_flag(tp
, 40BIT_DMA_BUG
) &&
17105 tg3_asic_rev(tp
) == ASIC_REV_5704
)
17106 tp
->dma_rwctrl
|= 0x8000;
17107 else if (ccval
== 0x6 || ccval
== 0x7)
17108 tp
->dma_rwctrl
|= DMA_RWCTRL_ONE_DMA
;
17110 if (tg3_asic_rev(tp
) == ASIC_REV_5703
)
17112 /* Set bit 23 to enable PCIX hw bug fix */
17114 (read_water
<< DMA_RWCTRL_READ_WATER_SHIFT
) |
17115 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT
) |
17117 } else if (tg3_asic_rev(tp
) == ASIC_REV_5780
) {
17118 /* 5780 always in PCIX mode */
17119 tp
->dma_rwctrl
|= 0x00144000;
17120 } else if (tg3_asic_rev(tp
) == ASIC_REV_5714
) {
17121 /* 5714 always in PCIX mode */
17122 tp
->dma_rwctrl
|= 0x00148000;
17124 tp
->dma_rwctrl
|= 0x001b000f;
17127 if (tg3_flag(tp
, ONE_DMA_AT_ONCE
))
17128 tp
->dma_rwctrl
|= DMA_RWCTRL_ONE_DMA
;
17130 if (tg3_asic_rev(tp
) == ASIC_REV_5703
||
17131 tg3_asic_rev(tp
) == ASIC_REV_5704
)
17132 tp
->dma_rwctrl
&= 0xfffffff0;
17134 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
17135 tg3_asic_rev(tp
) == ASIC_REV_5701
) {
17136 /* Remove this if it causes problems for some boards. */
17137 tp
->dma_rwctrl
|= DMA_RWCTRL_USE_MEM_READ_MULT
;
17139 /* On 5700/5701 chips, we need to set this bit.
17140 * Otherwise the chip will issue cacheline transactions
17141 * to streamable DMA memory with not all the byte
17142 * enables turned on. This is an error on several
17143 * RISC PCI controllers, in particular sparc64.
17145 * On 5703/5704 chips, this bit has been reassigned
17146 * a different meaning. In particular, it is used
17147 * on those chips to enable a PCI-X workaround.
17149 tp
->dma_rwctrl
|= DMA_RWCTRL_ASSERT_ALL_BE
;
17152 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
17155 if (tg3_asic_rev(tp
) != ASIC_REV_5700
&&
17156 tg3_asic_rev(tp
) != ASIC_REV_5701
)
17159 /* It is best to perform DMA test with maximum write burst size
17160 * to expose the 5700/5701 write DMA bug.
17162 saved_dma_rwctrl
= tp
->dma_rwctrl
;
17163 tp
->dma_rwctrl
&= ~DMA_RWCTRL_WRITE_BNDRY_MASK
;
17164 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
17169 for (i
= 0; i
< TEST_BUFFER_SIZE
/ sizeof(u32
); i
++)
17172 /* Send the buffer to the chip. */
17173 ret
= tg3_do_test_dma(tp
, buf
, buf_dma
, TEST_BUFFER_SIZE
, true);
17175 dev_err(&tp
->pdev
->dev
,
17176 "%s: Buffer write failed. err = %d\n",
17181 /* Now read it back. */
17182 ret
= tg3_do_test_dma(tp
, buf
, buf_dma
, TEST_BUFFER_SIZE
, false);
17184 dev_err(&tp
->pdev
->dev
, "%s: Buffer read failed. "
17185 "err = %d\n", __func__
, ret
);
17190 for (i
= 0; i
< TEST_BUFFER_SIZE
/ sizeof(u32
); i
++) {
17194 if ((tp
->dma_rwctrl
& DMA_RWCTRL_WRITE_BNDRY_MASK
) !=
17195 DMA_RWCTRL_WRITE_BNDRY_16
) {
17196 tp
->dma_rwctrl
&= ~DMA_RWCTRL_WRITE_BNDRY_MASK
;
17197 tp
->dma_rwctrl
|= DMA_RWCTRL_WRITE_BNDRY_16
;
17198 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
17201 dev_err(&tp
->pdev
->dev
,
17202 "%s: Buffer corrupted on read back! "
17203 "(%d != %d)\n", __func__
, p
[i
], i
);
17209 if (i
== (TEST_BUFFER_SIZE
/ sizeof(u32
))) {
17215 if ((tp
->dma_rwctrl
& DMA_RWCTRL_WRITE_BNDRY_MASK
) !=
17216 DMA_RWCTRL_WRITE_BNDRY_16
) {
17217 /* DMA test passed without adjusting DMA boundary,
17218 * now look for chipsets that are known to expose the
17219 * DMA bug without failing the test.
17221 if (pci_dev_present(tg3_dma_wait_state_chipsets
)) {
17222 tp
->dma_rwctrl
&= ~DMA_RWCTRL_WRITE_BNDRY_MASK
;
17223 tp
->dma_rwctrl
|= DMA_RWCTRL_WRITE_BNDRY_16
;
17225 /* Safe to use the calculated DMA boundary. */
17226 tp
->dma_rwctrl
= saved_dma_rwctrl
;
17229 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
17233 dma_free_coherent(&tp
->pdev
->dev
, TEST_BUFFER_SIZE
, buf
, buf_dma
);
17238 static void tg3_init_bufmgr_config(struct tg3
*tp
)
17240 if (tg3_flag(tp
, 57765_PLUS
)) {
17241 tp
->bufmgr_config
.mbuf_read_dma_low_water
=
17242 DEFAULT_MB_RDMA_LOW_WATER_5705
;
17243 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
17244 DEFAULT_MB_MACRX_LOW_WATER_57765
;
17245 tp
->bufmgr_config
.mbuf_high_water
=
17246 DEFAULT_MB_HIGH_WATER_57765
;
17248 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
=
17249 DEFAULT_MB_RDMA_LOW_WATER_5705
;
17250 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
=
17251 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765
;
17252 tp
->bufmgr_config
.mbuf_high_water_jumbo
=
17253 DEFAULT_MB_HIGH_WATER_JUMBO_57765
;
17254 } else if (tg3_flag(tp
, 5705_PLUS
)) {
17255 tp
->bufmgr_config
.mbuf_read_dma_low_water
=
17256 DEFAULT_MB_RDMA_LOW_WATER_5705
;
17257 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
17258 DEFAULT_MB_MACRX_LOW_WATER_5705
;
17259 tp
->bufmgr_config
.mbuf_high_water
=
17260 DEFAULT_MB_HIGH_WATER_5705
;
17261 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
17262 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
17263 DEFAULT_MB_MACRX_LOW_WATER_5906
;
17264 tp
->bufmgr_config
.mbuf_high_water
=
17265 DEFAULT_MB_HIGH_WATER_5906
;
17268 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
=
17269 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780
;
17270 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
=
17271 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780
;
17272 tp
->bufmgr_config
.mbuf_high_water_jumbo
=
17273 DEFAULT_MB_HIGH_WATER_JUMBO_5780
;
17275 tp
->bufmgr_config
.mbuf_read_dma_low_water
=
17276 DEFAULT_MB_RDMA_LOW_WATER
;
17277 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
17278 DEFAULT_MB_MACRX_LOW_WATER
;
17279 tp
->bufmgr_config
.mbuf_high_water
=
17280 DEFAULT_MB_HIGH_WATER
;
17282 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
=
17283 DEFAULT_MB_RDMA_LOW_WATER_JUMBO
;
17284 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
=
17285 DEFAULT_MB_MACRX_LOW_WATER_JUMBO
;
17286 tp
->bufmgr_config
.mbuf_high_water_jumbo
=
17287 DEFAULT_MB_HIGH_WATER_JUMBO
;
17290 tp
->bufmgr_config
.dma_low_water
= DEFAULT_DMA_LOW_WATER
;
17291 tp
->bufmgr_config
.dma_high_water
= DEFAULT_DMA_HIGH_WATER
;
17294 static char *tg3_phy_string(struct tg3
*tp
)
17296 switch (tp
->phy_id
& TG3_PHY_ID_MASK
) {
17297 case TG3_PHY_ID_BCM5400
: return "5400";
17298 case TG3_PHY_ID_BCM5401
: return "5401";
17299 case TG3_PHY_ID_BCM5411
: return "5411";
17300 case TG3_PHY_ID_BCM5701
: return "5701";
17301 case TG3_PHY_ID_BCM5703
: return "5703";
17302 case TG3_PHY_ID_BCM5704
: return "5704";
17303 case TG3_PHY_ID_BCM5705
: return "5705";
17304 case TG3_PHY_ID_BCM5750
: return "5750";
17305 case TG3_PHY_ID_BCM5752
: return "5752";
17306 case TG3_PHY_ID_BCM5714
: return "5714";
17307 case TG3_PHY_ID_BCM5780
: return "5780";
17308 case TG3_PHY_ID_BCM5755
: return "5755";
17309 case TG3_PHY_ID_BCM5787
: return "5787";
17310 case TG3_PHY_ID_BCM5784
: return "5784";
17311 case TG3_PHY_ID_BCM5756
: return "5722/5756";
17312 case TG3_PHY_ID_BCM5906
: return "5906";
17313 case TG3_PHY_ID_BCM5761
: return "5761";
17314 case TG3_PHY_ID_BCM5718C
: return "5718C";
17315 case TG3_PHY_ID_BCM5718S
: return "5718S";
17316 case TG3_PHY_ID_BCM57765
: return "57765";
17317 case TG3_PHY_ID_BCM5719C
: return "5719C";
17318 case TG3_PHY_ID_BCM5720C
: return "5720C";
17319 case TG3_PHY_ID_BCM5762
: return "5762C";
17320 case TG3_PHY_ID_BCM8002
: return "8002/serdes";
17321 case 0: return "serdes";
17322 default: return "unknown";
17326 static char *tg3_bus_string(struct tg3
*tp
, char *str
)
17328 if (tg3_flag(tp
, PCI_EXPRESS
)) {
17329 strcpy(str
, "PCI Express");
17331 } else if (tg3_flag(tp
, PCIX_MODE
)) {
17332 u32 clock_ctrl
= tr32(TG3PCI_CLOCK_CTRL
) & 0x1f;
17334 strcpy(str
, "PCIX:");
17336 if ((clock_ctrl
== 7) ||
17337 ((tr32(GRC_MISC_CFG
) & GRC_MISC_CFG_BOARD_ID_MASK
) ==
17338 GRC_MISC_CFG_BOARD_ID_5704CIOBE
))
17339 strcat(str
, "133MHz");
17340 else if (clock_ctrl
== 0)
17341 strcat(str
, "33MHz");
17342 else if (clock_ctrl
== 2)
17343 strcat(str
, "50MHz");
17344 else if (clock_ctrl
== 4)
17345 strcat(str
, "66MHz");
17346 else if (clock_ctrl
== 6)
17347 strcat(str
, "100MHz");
17349 strcpy(str
, "PCI:");
17350 if (tg3_flag(tp
, PCI_HIGH_SPEED
))
17351 strcat(str
, "66MHz");
17353 strcat(str
, "33MHz");
17355 if (tg3_flag(tp
, PCI_32BIT
))
17356 strcat(str
, ":32-bit");
17358 strcat(str
, ":64-bit");
17362 static void tg3_init_coal(struct tg3
*tp
)
17364 struct ethtool_coalesce
*ec
= &tp
->coal
;
17366 memset(ec
, 0, sizeof(*ec
));
17367 ec
->cmd
= ETHTOOL_GCOALESCE
;
17368 ec
->rx_coalesce_usecs
= LOW_RXCOL_TICKS
;
17369 ec
->tx_coalesce_usecs
= LOW_TXCOL_TICKS
;
17370 ec
->rx_max_coalesced_frames
= LOW_RXMAX_FRAMES
;
17371 ec
->tx_max_coalesced_frames
= LOW_TXMAX_FRAMES
;
17372 ec
->rx_coalesce_usecs_irq
= DEFAULT_RXCOAL_TICK_INT
;
17373 ec
->tx_coalesce_usecs_irq
= DEFAULT_TXCOAL_TICK_INT
;
17374 ec
->rx_max_coalesced_frames_irq
= DEFAULT_RXCOAL_MAXF_INT
;
17375 ec
->tx_max_coalesced_frames_irq
= DEFAULT_TXCOAL_MAXF_INT
;
17376 ec
->stats_block_coalesce_usecs
= DEFAULT_STAT_COAL_TICKS
;
17378 if (tp
->coalesce_mode
& (HOSTCC_MODE_CLRTICK_RXBD
|
17379 HOSTCC_MODE_CLRTICK_TXBD
)) {
17380 ec
->rx_coalesce_usecs
= LOW_RXCOL_TICKS_CLRTCKS
;
17381 ec
->rx_coalesce_usecs_irq
= DEFAULT_RXCOAL_TICK_INT_CLRTCKS
;
17382 ec
->tx_coalesce_usecs
= LOW_TXCOL_TICKS_CLRTCKS
;
17383 ec
->tx_coalesce_usecs_irq
= DEFAULT_TXCOAL_TICK_INT_CLRTCKS
;
17386 if (tg3_flag(tp
, 5705_PLUS
)) {
17387 ec
->rx_coalesce_usecs_irq
= 0;
17388 ec
->tx_coalesce_usecs_irq
= 0;
17389 ec
->stats_block_coalesce_usecs
= 0;
17393 static int tg3_init_one(struct pci_dev
*pdev
,
17394 const struct pci_device_id
*ent
)
17396 struct net_device
*dev
;
17399 u32 sndmbx
, rcvmbx
, intmbx
;
17401 u64 dma_mask
, persist_dma_mask
;
17402 netdev_features_t features
= 0;
17404 printk_once(KERN_INFO
"%s\n", version
);
17406 err
= pci_enable_device(pdev
);
17408 dev_err(&pdev
->dev
, "Cannot enable PCI device, aborting\n");
17412 err
= pci_request_regions(pdev
, DRV_MODULE_NAME
);
17414 dev_err(&pdev
->dev
, "Cannot obtain PCI resources, aborting\n");
17415 goto err_out_disable_pdev
;
17418 pci_set_master(pdev
);
17420 dev
= alloc_etherdev_mq(sizeof(*tp
), TG3_IRQ_MAX_VECS
);
17423 goto err_out_free_res
;
17426 SET_NETDEV_DEV(dev
, &pdev
->dev
);
17428 tp
= netdev_priv(dev
);
17431 tp
->rx_mode
= TG3_DEF_RX_MODE
;
17432 tp
->tx_mode
= TG3_DEF_TX_MODE
;
17436 tp
->msg_enable
= tg3_debug
;
17438 tp
->msg_enable
= TG3_DEF_MSG_ENABLE
;
17440 if (pdev_is_ssb_gige_core(pdev
)) {
17441 tg3_flag_set(tp
, IS_SSB_CORE
);
17442 if (ssb_gige_must_flush_posted_writes(pdev
))
17443 tg3_flag_set(tp
, FLUSH_POSTED_WRITES
);
17444 if (ssb_gige_one_dma_at_once(pdev
))
17445 tg3_flag_set(tp
, ONE_DMA_AT_ONCE
);
17446 if (ssb_gige_have_roboswitch(pdev
)) {
17447 tg3_flag_set(tp
, USE_PHYLIB
);
17448 tg3_flag_set(tp
, ROBOSWITCH
);
17450 if (ssb_gige_is_rgmii(pdev
))
17451 tg3_flag_set(tp
, RGMII_MODE
);
17454 /* The word/byte swap controls here control register access byte
17455 * swapping. DMA data byte swapping is controlled in the GRC_MODE
17458 tp
->misc_host_ctrl
=
17459 MISC_HOST_CTRL_MASK_PCI_INT
|
17460 MISC_HOST_CTRL_WORD_SWAP
|
17461 MISC_HOST_CTRL_INDIR_ACCESS
|
17462 MISC_HOST_CTRL_PCISTATE_RW
;
17464 /* The NONFRM (non-frame) byte/word swap controls take effect
17465 * on descriptor entries, anything which isn't packet data.
17467 * The StrongARM chips on the board (one for tx, one for rx)
17468 * are running in big-endian mode.
17470 tp
->grc_mode
= (GRC_MODE_WSWAP_DATA
| GRC_MODE_BSWAP_DATA
|
17471 GRC_MODE_WSWAP_NONFRM_DATA
);
17472 #ifdef __BIG_ENDIAN
17473 tp
->grc_mode
|= GRC_MODE_BSWAP_NONFRM_DATA
;
17475 spin_lock_init(&tp
->lock
);
17476 spin_lock_init(&tp
->indirect_lock
);
17477 INIT_WORK(&tp
->reset_task
, tg3_reset_task
);
17479 tp
->regs
= pci_ioremap_bar(pdev
, BAR_0
);
17481 dev_err(&pdev
->dev
, "Cannot map device registers, aborting\n");
17483 goto err_out_free_dev
;
17486 if (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5761
||
17487 tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5761E
||
17488 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5761S
||
17489 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5761SE
||
17490 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717
||
17491 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717_C
||
17492 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5718
||
17493 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5719
||
17494 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5720
||
17495 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57767
||
17496 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57764
||
17497 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5762
||
17498 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5725
||
17499 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5727
||
17500 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57787
) {
17501 tg3_flag_set(tp
, ENABLE_APE
);
17502 tp
->aperegs
= pci_ioremap_bar(pdev
, BAR_2
);
17503 if (!tp
->aperegs
) {
17504 dev_err(&pdev
->dev
,
17505 "Cannot map APE registers, aborting\n");
17507 goto err_out_iounmap
;
17511 tp
->rx_pending
= TG3_DEF_RX_RING_PENDING
;
17512 tp
->rx_jumbo_pending
= TG3_DEF_RX_JUMBO_RING_PENDING
;
17514 dev
->ethtool_ops
= &tg3_ethtool_ops
;
17515 dev
->watchdog_timeo
= TG3_TX_TIMEOUT
;
17516 dev
->netdev_ops
= &tg3_netdev_ops
;
17517 dev
->irq
= pdev
->irq
;
17519 err
= tg3_get_invariants(tp
, ent
);
17521 dev_err(&pdev
->dev
,
17522 "Problem fetching invariants of chip, aborting\n");
17523 goto err_out_apeunmap
;
17526 /* The EPB bridge inside 5714, 5715, and 5780 and any
17527 * device behind the EPB cannot support DMA addresses > 40-bit.
17528 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
17529 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
17530 * do DMA address check in tg3_start_xmit().
17532 if (tg3_flag(tp
, IS_5788
))
17533 persist_dma_mask
= dma_mask
= DMA_BIT_MASK(32);
17534 else if (tg3_flag(tp
, 40BIT_DMA_BUG
)) {
17535 persist_dma_mask
= dma_mask
= DMA_BIT_MASK(40);
17536 #ifdef CONFIG_HIGHMEM
17537 dma_mask
= DMA_BIT_MASK(64);
17540 persist_dma_mask
= dma_mask
= DMA_BIT_MASK(64);
17542 /* Configure DMA attributes. */
17543 if (dma_mask
> DMA_BIT_MASK(32)) {
17544 err
= pci_set_dma_mask(pdev
, dma_mask
);
17546 features
|= NETIF_F_HIGHDMA
;
17547 err
= pci_set_consistent_dma_mask(pdev
,
17550 dev_err(&pdev
->dev
, "Unable to obtain 64 bit "
17551 "DMA for consistent allocations\n");
17552 goto err_out_apeunmap
;
17556 if (err
|| dma_mask
== DMA_BIT_MASK(32)) {
17557 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
17559 dev_err(&pdev
->dev
,
17560 "No usable DMA configuration, aborting\n");
17561 goto err_out_apeunmap
;
17565 tg3_init_bufmgr_config(tp
);
17567 features
|= NETIF_F_HW_VLAN_CTAG_TX
| NETIF_F_HW_VLAN_CTAG_RX
;
17569 /* 5700 B0 chips do not support checksumming correctly due
17570 * to hardware bugs.
17572 if (tg3_chip_rev_id(tp
) != CHIPREV_ID_5700_B0
) {
17573 features
|= NETIF_F_SG
| NETIF_F_IP_CSUM
| NETIF_F_RXCSUM
;
17575 if (tg3_flag(tp
, 5755_PLUS
))
17576 features
|= NETIF_F_IPV6_CSUM
;
17579 /* TSO is on by default on chips that support hardware TSO.
17580 * Firmware TSO on older chips gives lower performance, so it
17581 * is off by default, but can be enabled using ethtool.
17583 if ((tg3_flag(tp
, HW_TSO_1
) ||
17584 tg3_flag(tp
, HW_TSO_2
) ||
17585 tg3_flag(tp
, HW_TSO_3
)) &&
17586 (features
& NETIF_F_IP_CSUM
))
17587 features
|= NETIF_F_TSO
;
17588 if (tg3_flag(tp
, HW_TSO_2
) || tg3_flag(tp
, HW_TSO_3
)) {
17589 if (features
& NETIF_F_IPV6_CSUM
)
17590 features
|= NETIF_F_TSO6
;
17591 if (tg3_flag(tp
, HW_TSO_3
) ||
17592 tg3_asic_rev(tp
) == ASIC_REV_5761
||
17593 (tg3_asic_rev(tp
) == ASIC_REV_5784
&&
17594 tg3_chip_rev(tp
) != CHIPREV_5784_AX
) ||
17595 tg3_asic_rev(tp
) == ASIC_REV_5785
||
17596 tg3_asic_rev(tp
) == ASIC_REV_57780
)
17597 features
|= NETIF_F_TSO_ECN
;
17600 dev
->features
|= features
;
17601 dev
->vlan_features
|= features
;
17604 * Add loopback capability only for a subset of devices that support
17605 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
17606 * loopback for the remaining devices.
17608 if (tg3_asic_rev(tp
) != ASIC_REV_5780
&&
17609 !tg3_flag(tp
, CPMU_PRESENT
))
17610 /* Add the loopback capability */
17611 features
|= NETIF_F_LOOPBACK
;
17613 dev
->hw_features
|= features
;
17615 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5705_A1
&&
17616 !tg3_flag(tp
, TSO_CAPABLE
) &&
17617 !(tr32(TG3PCI_PCISTATE
) & PCISTATE_BUS_SPEED_HIGH
)) {
17618 tg3_flag_set(tp
, MAX_RXPEND_64
);
17619 tp
->rx_pending
= 63;
17622 err
= tg3_get_device_address(tp
);
17624 dev_err(&pdev
->dev
,
17625 "Could not obtain valid ethernet address, aborting\n");
17626 goto err_out_apeunmap
;
17630 * Reset chip in case UNDI or EFI driver did not shutdown
17631 * DMA self test will enable WDMAC and we'll see (spurious)
17632 * pending DMA on the PCI bus at that point.
17634 if ((tr32(HOSTCC_MODE
) & HOSTCC_MODE_ENABLE
) ||
17635 (tr32(WDMAC_MODE
) & WDMAC_MODE_ENABLE
)) {
17636 tw32(MEMARB_MODE
, MEMARB_MODE_ENABLE
);
17637 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
17640 err
= tg3_test_dma(tp
);
17642 dev_err(&pdev
->dev
, "DMA engine test failed, aborting\n");
17643 goto err_out_apeunmap
;
17646 intmbx
= MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
;
17647 rcvmbx
= MAILBOX_RCVRET_CON_IDX_0
+ TG3_64BIT_REG_LOW
;
17648 sndmbx
= MAILBOX_SNDHOST_PROD_IDX_0
+ TG3_64BIT_REG_LOW
;
17649 for (i
= 0; i
< tp
->irq_max
; i
++) {
17650 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
17653 tnapi
->tx_pending
= TG3_DEF_TX_RING_PENDING
;
17655 tnapi
->int_mbox
= intmbx
;
17661 tnapi
->consmbox
= rcvmbx
;
17662 tnapi
->prodmbox
= sndmbx
;
17665 tnapi
->coal_now
= HOSTCC_MODE_COAL_VEC1_NOW
<< (i
- 1);
17667 tnapi
->coal_now
= HOSTCC_MODE_NOW
;
17669 if (!tg3_flag(tp
, SUPPORT_MSIX
))
17673 * If we support MSIX, we'll be using RSS. If we're using
17674 * RSS, the first vector only handles link interrupts and the
17675 * remaining vectors handle rx and tx interrupts. Reuse the
17676 * mailbox values for the next iteration. The values we setup
17677 * above are still useful for the single vectored mode.
17692 pci_set_drvdata(pdev
, dev
);
17694 if (tg3_asic_rev(tp
) == ASIC_REV_5719
||
17695 tg3_asic_rev(tp
) == ASIC_REV_5720
||
17696 tg3_asic_rev(tp
) == ASIC_REV_5762
)
17697 tg3_flag_set(tp
, PTP_CAPABLE
);
17699 tg3_timer_init(tp
);
17701 tg3_carrier_off(tp
);
17703 err
= register_netdev(dev
);
17705 dev_err(&pdev
->dev
, "Cannot register net device, aborting\n");
17706 goto err_out_apeunmap
;
17709 netdev_info(dev
, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17710 tp
->board_part_number
,
17711 tg3_chip_rev_id(tp
),
17712 tg3_bus_string(tp
, str
),
17715 if (tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
) {
17716 struct phy_device
*phydev
;
17717 phydev
= tp
->mdio_bus
->phy_map
[tp
->phy_addr
];
17719 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
17720 phydev
->drv
->name
, dev_name(&phydev
->dev
));
17724 if (tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)
17725 ethtype
= "10/100Base-TX";
17726 else if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)
17727 ethtype
= "1000Base-SX";
17729 ethtype
= "10/100/1000Base-T";
17731 netdev_info(dev
, "attached PHY is %s (%s Ethernet) "
17732 "(WireSpeed[%d], EEE[%d])\n",
17733 tg3_phy_string(tp
), ethtype
,
17734 (tp
->phy_flags
& TG3_PHYFLG_NO_ETH_WIRE_SPEED
) == 0,
17735 (tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
) != 0);
17738 netdev_info(dev
, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
17739 (dev
->features
& NETIF_F_RXCSUM
) != 0,
17740 tg3_flag(tp
, USE_LINKCHG_REG
) != 0,
17741 (tp
->phy_flags
& TG3_PHYFLG_USE_MI_INTERRUPT
) != 0,
17742 tg3_flag(tp
, ENABLE_ASF
) != 0,
17743 tg3_flag(tp
, TSO_CAPABLE
) != 0);
17744 netdev_info(dev
, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
17746 pdev
->dma_mask
== DMA_BIT_MASK(32) ? 32 :
17747 ((u64
)pdev
->dma_mask
) == DMA_BIT_MASK(40) ? 40 : 64);
17749 pci_save_state(pdev
);
17755 iounmap(tp
->aperegs
);
17756 tp
->aperegs
= NULL
;
17769 pci_release_regions(pdev
);
17771 err_out_disable_pdev
:
17772 if (pci_is_enabled(pdev
))
17773 pci_disable_device(pdev
);
17777 static void tg3_remove_one(struct pci_dev
*pdev
)
17779 struct net_device
*dev
= pci_get_drvdata(pdev
);
17782 struct tg3
*tp
= netdev_priv(dev
);
17784 release_firmware(tp
->fw
);
17786 tg3_reset_task_cancel(tp
);
17788 if (tg3_flag(tp
, USE_PHYLIB
)) {
17793 unregister_netdev(dev
);
17795 iounmap(tp
->aperegs
);
17796 tp
->aperegs
= NULL
;
17803 pci_release_regions(pdev
);
17804 pci_disable_device(pdev
);
17808 #ifdef CONFIG_PM_SLEEP
17809 static int tg3_suspend(struct device
*device
)
17811 struct pci_dev
*pdev
= to_pci_dev(device
);
17812 struct net_device
*dev
= pci_get_drvdata(pdev
);
17813 struct tg3
*tp
= netdev_priv(dev
);
17818 if (!netif_running(dev
))
17821 tg3_reset_task_cancel(tp
);
17823 tg3_netif_stop(tp
);
17825 tg3_timer_stop(tp
);
17827 tg3_full_lock(tp
, 1);
17828 tg3_disable_ints(tp
);
17829 tg3_full_unlock(tp
);
17831 netif_device_detach(dev
);
17833 tg3_full_lock(tp
, 0);
17834 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
17835 tg3_flag_clear(tp
, INIT_COMPLETE
);
17836 tg3_full_unlock(tp
);
17838 err
= tg3_power_down_prepare(tp
);
17842 tg3_full_lock(tp
, 0);
17844 tg3_flag_set(tp
, INIT_COMPLETE
);
17845 err2
= tg3_restart_hw(tp
, true);
17849 tg3_timer_start(tp
);
17851 netif_device_attach(dev
);
17852 tg3_netif_start(tp
);
17855 tg3_full_unlock(tp
);
17866 static int tg3_resume(struct device
*device
)
17868 struct pci_dev
*pdev
= to_pci_dev(device
);
17869 struct net_device
*dev
= pci_get_drvdata(pdev
);
17870 struct tg3
*tp
= netdev_priv(dev
);
17875 if (!netif_running(dev
))
17878 netif_device_attach(dev
);
17880 tg3_full_lock(tp
, 0);
17882 tg3_ape_driver_state_change(tp
, RESET_KIND_INIT
);
17884 tg3_flag_set(tp
, INIT_COMPLETE
);
17885 err
= tg3_restart_hw(tp
,
17886 !(tp
->phy_flags
& TG3_PHYFLG_KEEP_LINK_ON_PWRDN
));
17890 tg3_timer_start(tp
);
17892 tg3_netif_start(tp
);
17895 tg3_full_unlock(tp
);
17904 #endif /* CONFIG_PM_SLEEP */
17906 static SIMPLE_DEV_PM_OPS(tg3_pm_ops
, tg3_suspend
, tg3_resume
);
17908 static void tg3_shutdown(struct pci_dev
*pdev
)
17910 struct net_device
*dev
= pci_get_drvdata(pdev
);
17911 struct tg3
*tp
= netdev_priv(dev
);
17914 netif_device_detach(dev
);
17916 if (netif_running(dev
))
17919 if (system_state
== SYSTEM_POWER_OFF
)
17920 tg3_power_down(tp
);
17926 * tg3_io_error_detected - called when PCI error is detected
17927 * @pdev: Pointer to PCI device
17928 * @state: The current pci connection state
17930 * This function is called after a PCI bus error affecting
17931 * this device has been detected.
17933 static pci_ers_result_t
tg3_io_error_detected(struct pci_dev
*pdev
,
17934 pci_channel_state_t state
)
17936 struct net_device
*netdev
= pci_get_drvdata(pdev
);
17937 struct tg3
*tp
= netdev_priv(netdev
);
17938 pci_ers_result_t err
= PCI_ERS_RESULT_NEED_RESET
;
17940 netdev_info(netdev
, "PCI I/O error detected\n");
17944 /* We probably don't have netdev yet */
17945 if (!netdev
|| !netif_running(netdev
))
17950 tg3_netif_stop(tp
);
17952 tg3_timer_stop(tp
);
17954 /* Want to make sure that the reset task doesn't run */
17955 tg3_reset_task_cancel(tp
);
17957 netif_device_detach(netdev
);
17959 /* Clean up software state, even if MMIO is blocked */
17960 tg3_full_lock(tp
, 0);
17961 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 0);
17962 tg3_full_unlock(tp
);
17965 if (state
== pci_channel_io_perm_failure
) {
17967 tg3_napi_enable(tp
);
17970 err
= PCI_ERS_RESULT_DISCONNECT
;
17972 pci_disable_device(pdev
);
17981 * tg3_io_slot_reset - called after the pci bus has been reset.
17982 * @pdev: Pointer to PCI device
17984 * Restart the card from scratch, as if from a cold-boot.
17985 * At this point, the card has exprienced a hard reset,
17986 * followed by fixups by BIOS, and has its config space
17987 * set up identically to what it was at cold boot.
17989 static pci_ers_result_t
tg3_io_slot_reset(struct pci_dev
*pdev
)
17991 struct net_device
*netdev
= pci_get_drvdata(pdev
);
17992 struct tg3
*tp
= netdev_priv(netdev
);
17993 pci_ers_result_t rc
= PCI_ERS_RESULT_DISCONNECT
;
17998 if (pci_enable_device(pdev
)) {
17999 dev_err(&pdev
->dev
,
18000 "Cannot re-enable PCI device after reset.\n");
18004 pci_set_master(pdev
);
18005 pci_restore_state(pdev
);
18006 pci_save_state(pdev
);
18008 if (!netdev
|| !netif_running(netdev
)) {
18009 rc
= PCI_ERS_RESULT_RECOVERED
;
18013 err
= tg3_power_up(tp
);
18017 rc
= PCI_ERS_RESULT_RECOVERED
;
18020 if (rc
!= PCI_ERS_RESULT_RECOVERED
&& netdev
&& netif_running(netdev
)) {
18021 tg3_napi_enable(tp
);
18030 * tg3_io_resume - called when traffic can start flowing again.
18031 * @pdev: Pointer to PCI device
18033 * This callback is called when the error recovery driver tells
18034 * us that its OK to resume normal operation.
18036 static void tg3_io_resume(struct pci_dev
*pdev
)
18038 struct net_device
*netdev
= pci_get_drvdata(pdev
);
18039 struct tg3
*tp
= netdev_priv(netdev
);
18044 if (!netif_running(netdev
))
18047 tg3_full_lock(tp
, 0);
18048 tg3_ape_driver_state_change(tp
, RESET_KIND_INIT
);
18049 tg3_flag_set(tp
, INIT_COMPLETE
);
18050 err
= tg3_restart_hw(tp
, true);
18052 tg3_full_unlock(tp
);
18053 netdev_err(netdev
, "Cannot restart hardware after reset.\n");
18057 netif_device_attach(netdev
);
18059 tg3_timer_start(tp
);
18061 tg3_netif_start(tp
);
18063 tg3_full_unlock(tp
);
18071 static const struct pci_error_handlers tg3_err_handler
= {
18072 .error_detected
= tg3_io_error_detected
,
18073 .slot_reset
= tg3_io_slot_reset
,
18074 .resume
= tg3_io_resume
18077 static struct pci_driver tg3_driver
= {
18078 .name
= DRV_MODULE_NAME
,
18079 .id_table
= tg3_pci_tbl
,
18080 .probe
= tg3_init_one
,
18081 .remove
= tg3_remove_one
,
18082 .err_handler
= &tg3_err_handler
,
18083 .driver
.pm
= &tg3_pm_ops
,
18084 .shutdown
= tg3_shutdown
,
18087 module_pci_driver(tg3_driver
);