]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - drivers/net/ethernet/broadcom/tg3.c
Merge tag 'devicetree-for-linus' of git://git.secretlab.ca/git/linux
[mirror_ubuntu-zesty-kernel.git] / drivers / net / ethernet / broadcom / tg3.c
1 /*
2 * tg3.c: Broadcom Tigon3 ethernet driver.
3 *
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2013 Broadcom Corporation.
8 *
9 * Firmware is:
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
12 *
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
16 */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47 #include <linux/ssb/ssb_driver_gige.h>
48 #include <linux/hwmon.h>
49 #include <linux/hwmon-sysfs.h>
50
51 #include <net/checksum.h>
52 #include <net/ip.h>
53
54 #include <linux/io.h>
55 #include <asm/byteorder.h>
56 #include <linux/uaccess.h>
57
58 #include <uapi/linux/net_tstamp.h>
59 #include <linux/ptp_clock_kernel.h>
60
61 #ifdef CONFIG_SPARC
62 #include <asm/idprom.h>
63 #include <asm/prom.h>
64 #endif
65
66 #define BAR_0 0
67 #define BAR_2 2
68
69 #include "tg3.h"
70
71 /* Functions & macros to verify TG3_FLAGS types */
72
73 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
74 {
75 return test_bit(flag, bits);
76 }
77
78 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
79 {
80 set_bit(flag, bits);
81 }
82
83 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
84 {
85 clear_bit(flag, bits);
86 }
87
88 #define tg3_flag(tp, flag) \
89 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
90 #define tg3_flag_set(tp, flag) \
91 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
92 #define tg3_flag_clear(tp, flag) \
93 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
94
95 #define DRV_MODULE_NAME "tg3"
96 #define TG3_MAJ_NUM 3
97 #define TG3_MIN_NUM 131
98 #define DRV_MODULE_VERSION \
99 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
100 #define DRV_MODULE_RELDATE "April 09, 2013"
101
102 #define RESET_KIND_SHUTDOWN 0
103 #define RESET_KIND_INIT 1
104 #define RESET_KIND_SUSPEND 2
105
106 #define TG3_DEF_RX_MODE 0
107 #define TG3_DEF_TX_MODE 0
108 #define TG3_DEF_MSG_ENABLE \
109 (NETIF_MSG_DRV | \
110 NETIF_MSG_PROBE | \
111 NETIF_MSG_LINK | \
112 NETIF_MSG_TIMER | \
113 NETIF_MSG_IFDOWN | \
114 NETIF_MSG_IFUP | \
115 NETIF_MSG_RX_ERR | \
116 NETIF_MSG_TX_ERR)
117
118 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100
119
120 /* length of time before we decide the hardware is borked,
121 * and dev->tx_timeout() should be called to fix the problem
122 */
123
124 #define TG3_TX_TIMEOUT (5 * HZ)
125
126 /* hardware minimum and maximum for a single frame's data payload */
127 #define TG3_MIN_MTU 60
128 #define TG3_MAX_MTU(tp) \
129 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
130
131 /* These numbers seem to be hard coded in the NIC firmware somehow.
132 * You can't change the ring sizes, but you can change where you place
133 * them in the NIC onboard memory.
134 */
135 #define TG3_RX_STD_RING_SIZE(tp) \
136 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
137 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
138 #define TG3_DEF_RX_RING_PENDING 200
139 #define TG3_RX_JMB_RING_SIZE(tp) \
140 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
141 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
142 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
143
144 /* Do not place this n-ring entries value into the tp struct itself,
145 * we really want to expose these constants to GCC so that modulo et
146 * al. operations are done with shifts and masks instead of with
147 * hw multiply/modulo instructions. Another solution would be to
148 * replace things like '% foo' with '& (foo - 1)'.
149 */
150
151 #define TG3_TX_RING_SIZE 512
152 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
153
154 #define TG3_RX_STD_RING_BYTES(tp) \
155 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
156 #define TG3_RX_JMB_RING_BYTES(tp) \
157 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
158 #define TG3_RX_RCB_RING_BYTES(tp) \
159 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
160 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
161 TG3_TX_RING_SIZE)
162 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
163
164 #define TG3_DMA_BYTE_ENAB 64
165
166 #define TG3_RX_STD_DMA_SZ 1536
167 #define TG3_RX_JMB_DMA_SZ 9046
168
169 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
170
171 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
172 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
173
174 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
175 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
176
177 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
178 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
179
180 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
181 * that are at least dword aligned when used in PCIX mode. The driver
182 * works around this bug by double copying the packet. This workaround
183 * is built into the normal double copy length check for efficiency.
184 *
185 * However, the double copy is only necessary on those architectures
186 * where unaligned memory accesses are inefficient. For those architectures
187 * where unaligned memory accesses incur little penalty, we can reintegrate
188 * the 5701 in the normal rx path. Doing so saves a device structure
189 * dereference by hardcoding the double copy threshold in place.
190 */
191 #define TG3_RX_COPY_THRESHOLD 256
192 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
193 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
194 #else
195 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
196 #endif
197
198 #if (NET_IP_ALIGN != 0)
199 #define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
200 #else
201 #define TG3_RX_OFFSET(tp) (NET_SKB_PAD)
202 #endif
203
204 /* minimum number of free TX descriptors required to wake up TX process */
205 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
206 #define TG3_TX_BD_DMA_MAX_2K 2048
207 #define TG3_TX_BD_DMA_MAX_4K 4096
208
209 #define TG3_RAW_IP_ALIGN 2
210
211 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
212 #define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
213
214 #define FIRMWARE_TG3 "tigon/tg3.bin"
215 #define FIRMWARE_TG357766 "tigon/tg357766.bin"
216 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
217 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
218
219 static char version[] =
220 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
221
222 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
223 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
224 MODULE_LICENSE("GPL");
225 MODULE_VERSION(DRV_MODULE_VERSION);
226 MODULE_FIRMWARE(FIRMWARE_TG3);
227 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
228 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
229
230 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
231 module_param(tg3_debug, int, 0);
232 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
233
234 #define TG3_DRV_DATA_FLAG_10_100_ONLY 0x0001
235 #define TG3_DRV_DATA_FLAG_5705_10_100 0x0002
236
237 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
257 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
258 TG3_DRV_DATA_FLAG_5705_10_100},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
260 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
261 TG3_DRV_DATA_FLAG_5705_10_100},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
263 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
264 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
265 TG3_DRV_DATA_FLAG_5705_10_100},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
272 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
278 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
286 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
287 PCI_VENDOR_ID_LENOVO,
288 TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
289 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
290 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
291 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
292 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
293 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
294 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
295 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
297 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
298 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
299 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
300 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
301 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
302 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
303 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
304 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
305 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
306 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
307 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
308 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
309 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
310 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
311 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
312 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
313 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
314 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
315 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
316 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
317 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
318 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
319 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
320 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
321 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
322 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
323 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
324 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
325 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
326 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
327 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
328 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
329 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
330 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
331 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
332 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
333 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
334 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
335 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
336 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
337 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
338 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
339 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
340 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
341 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
342 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
343 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
344 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
345 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
346 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
347 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
348 {}
349 };
350
351 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
352
353 static const struct {
354 const char string[ETH_GSTRING_LEN];
355 } ethtool_stats_keys[] = {
356 { "rx_octets" },
357 { "rx_fragments" },
358 { "rx_ucast_packets" },
359 { "rx_mcast_packets" },
360 { "rx_bcast_packets" },
361 { "rx_fcs_errors" },
362 { "rx_align_errors" },
363 { "rx_xon_pause_rcvd" },
364 { "rx_xoff_pause_rcvd" },
365 { "rx_mac_ctrl_rcvd" },
366 { "rx_xoff_entered" },
367 { "rx_frame_too_long_errors" },
368 { "rx_jabbers" },
369 { "rx_undersize_packets" },
370 { "rx_in_length_errors" },
371 { "rx_out_length_errors" },
372 { "rx_64_or_less_octet_packets" },
373 { "rx_65_to_127_octet_packets" },
374 { "rx_128_to_255_octet_packets" },
375 { "rx_256_to_511_octet_packets" },
376 { "rx_512_to_1023_octet_packets" },
377 { "rx_1024_to_1522_octet_packets" },
378 { "rx_1523_to_2047_octet_packets" },
379 { "rx_2048_to_4095_octet_packets" },
380 { "rx_4096_to_8191_octet_packets" },
381 { "rx_8192_to_9022_octet_packets" },
382
383 { "tx_octets" },
384 { "tx_collisions" },
385
386 { "tx_xon_sent" },
387 { "tx_xoff_sent" },
388 { "tx_flow_control" },
389 { "tx_mac_errors" },
390 { "tx_single_collisions" },
391 { "tx_mult_collisions" },
392 { "tx_deferred" },
393 { "tx_excessive_collisions" },
394 { "tx_late_collisions" },
395 { "tx_collide_2times" },
396 { "tx_collide_3times" },
397 { "tx_collide_4times" },
398 { "tx_collide_5times" },
399 { "tx_collide_6times" },
400 { "tx_collide_7times" },
401 { "tx_collide_8times" },
402 { "tx_collide_9times" },
403 { "tx_collide_10times" },
404 { "tx_collide_11times" },
405 { "tx_collide_12times" },
406 { "tx_collide_13times" },
407 { "tx_collide_14times" },
408 { "tx_collide_15times" },
409 { "tx_ucast_packets" },
410 { "tx_mcast_packets" },
411 { "tx_bcast_packets" },
412 { "tx_carrier_sense_errors" },
413 { "tx_discards" },
414 { "tx_errors" },
415
416 { "dma_writeq_full" },
417 { "dma_write_prioq_full" },
418 { "rxbds_empty" },
419 { "rx_discards" },
420 { "rx_errors" },
421 { "rx_threshold_hit" },
422
423 { "dma_readq_full" },
424 { "dma_read_prioq_full" },
425 { "tx_comp_queue_full" },
426
427 { "ring_set_send_prod_index" },
428 { "ring_status_update" },
429 { "nic_irqs" },
430 { "nic_avoided_irqs" },
431 { "nic_tx_threshold_hit" },
432
433 { "mbuf_lwm_thresh_hit" },
434 };
435
436 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
437 #define TG3_NVRAM_TEST 0
438 #define TG3_LINK_TEST 1
439 #define TG3_REGISTER_TEST 2
440 #define TG3_MEMORY_TEST 3
441 #define TG3_MAC_LOOPB_TEST 4
442 #define TG3_PHY_LOOPB_TEST 5
443 #define TG3_EXT_LOOPB_TEST 6
444 #define TG3_INTERRUPT_TEST 7
445
446
447 static const struct {
448 const char string[ETH_GSTRING_LEN];
449 } ethtool_test_keys[] = {
450 [TG3_NVRAM_TEST] = { "nvram test (online) " },
451 [TG3_LINK_TEST] = { "link test (online) " },
452 [TG3_REGISTER_TEST] = { "register test (offline)" },
453 [TG3_MEMORY_TEST] = { "memory test (offline)" },
454 [TG3_MAC_LOOPB_TEST] = { "mac loopback test (offline)" },
455 [TG3_PHY_LOOPB_TEST] = { "phy loopback test (offline)" },
456 [TG3_EXT_LOOPB_TEST] = { "ext loopback test (offline)" },
457 [TG3_INTERRUPT_TEST] = { "interrupt test (offline)" },
458 };
459
460 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
461
462
463 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
464 {
465 writel(val, tp->regs + off);
466 }
467
468 static u32 tg3_read32(struct tg3 *tp, u32 off)
469 {
470 return readl(tp->regs + off);
471 }
472
473 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
474 {
475 writel(val, tp->aperegs + off);
476 }
477
478 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
479 {
480 return readl(tp->aperegs + off);
481 }
482
483 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
484 {
485 unsigned long flags;
486
487 spin_lock_irqsave(&tp->indirect_lock, flags);
488 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
489 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
490 spin_unlock_irqrestore(&tp->indirect_lock, flags);
491 }
492
493 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
494 {
495 writel(val, tp->regs + off);
496 readl(tp->regs + off);
497 }
498
499 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
500 {
501 unsigned long flags;
502 u32 val;
503
504 spin_lock_irqsave(&tp->indirect_lock, flags);
505 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
506 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
507 spin_unlock_irqrestore(&tp->indirect_lock, flags);
508 return val;
509 }
510
511 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
512 {
513 unsigned long flags;
514
515 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
516 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
517 TG3_64BIT_REG_LOW, val);
518 return;
519 }
520 if (off == TG3_RX_STD_PROD_IDX_REG) {
521 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
522 TG3_64BIT_REG_LOW, val);
523 return;
524 }
525
526 spin_lock_irqsave(&tp->indirect_lock, flags);
527 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
528 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
529 spin_unlock_irqrestore(&tp->indirect_lock, flags);
530
531 /* In indirect mode when disabling interrupts, we also need
532 * to clear the interrupt bit in the GRC local ctrl register.
533 */
534 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
535 (val == 0x1)) {
536 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
537 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
538 }
539 }
540
541 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
542 {
543 unsigned long flags;
544 u32 val;
545
546 spin_lock_irqsave(&tp->indirect_lock, flags);
547 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
548 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
549 spin_unlock_irqrestore(&tp->indirect_lock, flags);
550 return val;
551 }
552
553 /* usec_wait specifies the wait time in usec when writing to certain registers
554 * where it is unsafe to read back the register without some delay.
555 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
556 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
557 */
558 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
559 {
560 if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
561 /* Non-posted methods */
562 tp->write32(tp, off, val);
563 else {
564 /* Posted method */
565 tg3_write32(tp, off, val);
566 if (usec_wait)
567 udelay(usec_wait);
568 tp->read32(tp, off);
569 }
570 /* Wait again after the read for the posted method to guarantee that
571 * the wait time is met.
572 */
573 if (usec_wait)
574 udelay(usec_wait);
575 }
576
577 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
578 {
579 tp->write32_mbox(tp, off, val);
580 if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
581 (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
582 !tg3_flag(tp, ICH_WORKAROUND)))
583 tp->read32_mbox(tp, off);
584 }
585
586 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
587 {
588 void __iomem *mbox = tp->regs + off;
589 writel(val, mbox);
590 if (tg3_flag(tp, TXD_MBOX_HWBUG))
591 writel(val, mbox);
592 if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
593 tg3_flag(tp, FLUSH_POSTED_WRITES))
594 readl(mbox);
595 }
596
597 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
598 {
599 return readl(tp->regs + off + GRCMBOX_BASE);
600 }
601
602 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
603 {
604 writel(val, tp->regs + off + GRCMBOX_BASE);
605 }
606
607 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
608 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
609 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
610 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
611 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
612
613 #define tw32(reg, val) tp->write32(tp, reg, val)
614 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
615 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
616 #define tr32(reg) tp->read32(tp, reg)
617
618 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
619 {
620 unsigned long flags;
621
622 if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
623 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
624 return;
625
626 spin_lock_irqsave(&tp->indirect_lock, flags);
627 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
628 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
629 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
630
631 /* Always leave this as zero. */
632 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
633 } else {
634 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
635 tw32_f(TG3PCI_MEM_WIN_DATA, val);
636
637 /* Always leave this as zero. */
638 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
639 }
640 spin_unlock_irqrestore(&tp->indirect_lock, flags);
641 }
642
643 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
644 {
645 unsigned long flags;
646
647 if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
648 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
649 *val = 0;
650 return;
651 }
652
653 spin_lock_irqsave(&tp->indirect_lock, flags);
654 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
655 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
656 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
657
658 /* Always leave this as zero. */
659 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
660 } else {
661 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
662 *val = tr32(TG3PCI_MEM_WIN_DATA);
663
664 /* Always leave this as zero. */
665 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
666 }
667 spin_unlock_irqrestore(&tp->indirect_lock, flags);
668 }
669
670 static void tg3_ape_lock_init(struct tg3 *tp)
671 {
672 int i;
673 u32 regbase, bit;
674
675 if (tg3_asic_rev(tp) == ASIC_REV_5761)
676 regbase = TG3_APE_LOCK_GRANT;
677 else
678 regbase = TG3_APE_PER_LOCK_GRANT;
679
680 /* Make sure the driver hasn't any stale locks. */
681 for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
682 switch (i) {
683 case TG3_APE_LOCK_PHY0:
684 case TG3_APE_LOCK_PHY1:
685 case TG3_APE_LOCK_PHY2:
686 case TG3_APE_LOCK_PHY3:
687 bit = APE_LOCK_GRANT_DRIVER;
688 break;
689 default:
690 if (!tp->pci_fn)
691 bit = APE_LOCK_GRANT_DRIVER;
692 else
693 bit = 1 << tp->pci_fn;
694 }
695 tg3_ape_write32(tp, regbase + 4 * i, bit);
696 }
697
698 }
699
700 static int tg3_ape_lock(struct tg3 *tp, int locknum)
701 {
702 int i, off;
703 int ret = 0;
704 u32 status, req, gnt, bit;
705
706 if (!tg3_flag(tp, ENABLE_APE))
707 return 0;
708
709 switch (locknum) {
710 case TG3_APE_LOCK_GPIO:
711 if (tg3_asic_rev(tp) == ASIC_REV_5761)
712 return 0;
713 case TG3_APE_LOCK_GRC:
714 case TG3_APE_LOCK_MEM:
715 if (!tp->pci_fn)
716 bit = APE_LOCK_REQ_DRIVER;
717 else
718 bit = 1 << tp->pci_fn;
719 break;
720 case TG3_APE_LOCK_PHY0:
721 case TG3_APE_LOCK_PHY1:
722 case TG3_APE_LOCK_PHY2:
723 case TG3_APE_LOCK_PHY3:
724 bit = APE_LOCK_REQ_DRIVER;
725 break;
726 default:
727 return -EINVAL;
728 }
729
730 if (tg3_asic_rev(tp) == ASIC_REV_5761) {
731 req = TG3_APE_LOCK_REQ;
732 gnt = TG3_APE_LOCK_GRANT;
733 } else {
734 req = TG3_APE_PER_LOCK_REQ;
735 gnt = TG3_APE_PER_LOCK_GRANT;
736 }
737
738 off = 4 * locknum;
739
740 tg3_ape_write32(tp, req + off, bit);
741
742 /* Wait for up to 1 millisecond to acquire lock. */
743 for (i = 0; i < 100; i++) {
744 status = tg3_ape_read32(tp, gnt + off);
745 if (status == bit)
746 break;
747 udelay(10);
748 }
749
750 if (status != bit) {
751 /* Revoke the lock request. */
752 tg3_ape_write32(tp, gnt + off, bit);
753 ret = -EBUSY;
754 }
755
756 return ret;
757 }
758
759 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
760 {
761 u32 gnt, bit;
762
763 if (!tg3_flag(tp, ENABLE_APE))
764 return;
765
766 switch (locknum) {
767 case TG3_APE_LOCK_GPIO:
768 if (tg3_asic_rev(tp) == ASIC_REV_5761)
769 return;
770 case TG3_APE_LOCK_GRC:
771 case TG3_APE_LOCK_MEM:
772 if (!tp->pci_fn)
773 bit = APE_LOCK_GRANT_DRIVER;
774 else
775 bit = 1 << tp->pci_fn;
776 break;
777 case TG3_APE_LOCK_PHY0:
778 case TG3_APE_LOCK_PHY1:
779 case TG3_APE_LOCK_PHY2:
780 case TG3_APE_LOCK_PHY3:
781 bit = APE_LOCK_GRANT_DRIVER;
782 break;
783 default:
784 return;
785 }
786
787 if (tg3_asic_rev(tp) == ASIC_REV_5761)
788 gnt = TG3_APE_LOCK_GRANT;
789 else
790 gnt = TG3_APE_PER_LOCK_GRANT;
791
792 tg3_ape_write32(tp, gnt + 4 * locknum, bit);
793 }
794
795 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
796 {
797 u32 apedata;
798
799 while (timeout_us) {
800 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
801 return -EBUSY;
802
803 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
804 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
805 break;
806
807 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
808
809 udelay(10);
810 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
811 }
812
813 return timeout_us ? 0 : -EBUSY;
814 }
815
816 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
817 {
818 u32 i, apedata;
819
820 for (i = 0; i < timeout_us / 10; i++) {
821 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
822
823 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
824 break;
825
826 udelay(10);
827 }
828
829 return i == timeout_us / 10;
830 }
831
832 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
833 u32 len)
834 {
835 int err;
836 u32 i, bufoff, msgoff, maxlen, apedata;
837
838 if (!tg3_flag(tp, APE_HAS_NCSI))
839 return 0;
840
841 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
842 if (apedata != APE_SEG_SIG_MAGIC)
843 return -ENODEV;
844
845 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
846 if (!(apedata & APE_FW_STATUS_READY))
847 return -EAGAIN;
848
849 bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
850 TG3_APE_SHMEM_BASE;
851 msgoff = bufoff + 2 * sizeof(u32);
852 maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
853
854 while (len) {
855 u32 length;
856
857 /* Cap xfer sizes to scratchpad limits. */
858 length = (len > maxlen) ? maxlen : len;
859 len -= length;
860
861 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
862 if (!(apedata & APE_FW_STATUS_READY))
863 return -EAGAIN;
864
865 /* Wait for up to 1 msec for APE to service previous event. */
866 err = tg3_ape_event_lock(tp, 1000);
867 if (err)
868 return err;
869
870 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
871 APE_EVENT_STATUS_SCRTCHPD_READ |
872 APE_EVENT_STATUS_EVENT_PENDING;
873 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
874
875 tg3_ape_write32(tp, bufoff, base_off);
876 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
877
878 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
879 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
880
881 base_off += length;
882
883 if (tg3_ape_wait_for_event(tp, 30000))
884 return -EAGAIN;
885
886 for (i = 0; length; i += 4, length -= 4) {
887 u32 val = tg3_ape_read32(tp, msgoff + i);
888 memcpy(data, &val, sizeof(u32));
889 data++;
890 }
891 }
892
893 return 0;
894 }
895
896 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
897 {
898 int err;
899 u32 apedata;
900
901 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
902 if (apedata != APE_SEG_SIG_MAGIC)
903 return -EAGAIN;
904
905 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
906 if (!(apedata & APE_FW_STATUS_READY))
907 return -EAGAIN;
908
909 /* Wait for up to 1 millisecond for APE to service previous event. */
910 err = tg3_ape_event_lock(tp, 1000);
911 if (err)
912 return err;
913
914 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
915 event | APE_EVENT_STATUS_EVENT_PENDING);
916
917 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
918 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
919
920 return 0;
921 }
922
923 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
924 {
925 u32 event;
926 u32 apedata;
927
928 if (!tg3_flag(tp, ENABLE_APE))
929 return;
930
931 switch (kind) {
932 case RESET_KIND_INIT:
933 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
934 APE_HOST_SEG_SIG_MAGIC);
935 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
936 APE_HOST_SEG_LEN_MAGIC);
937 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
938 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
939 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
940 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
941 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
942 APE_HOST_BEHAV_NO_PHYLOCK);
943 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
944 TG3_APE_HOST_DRVR_STATE_START);
945
946 event = APE_EVENT_STATUS_STATE_START;
947 break;
948 case RESET_KIND_SHUTDOWN:
949 /* With the interface we are currently using,
950 * APE does not track driver state. Wiping
951 * out the HOST SEGMENT SIGNATURE forces
952 * the APE to assume OS absent status.
953 */
954 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
955
956 if (device_may_wakeup(&tp->pdev->dev) &&
957 tg3_flag(tp, WOL_ENABLE)) {
958 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
959 TG3_APE_HOST_WOL_SPEED_AUTO);
960 apedata = TG3_APE_HOST_DRVR_STATE_WOL;
961 } else
962 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
963
964 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
965
966 event = APE_EVENT_STATUS_STATE_UNLOAD;
967 break;
968 case RESET_KIND_SUSPEND:
969 event = APE_EVENT_STATUS_STATE_SUSPEND;
970 break;
971 default:
972 return;
973 }
974
975 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
976
977 tg3_ape_send_event(tp, event);
978 }
979
980 static void tg3_disable_ints(struct tg3 *tp)
981 {
982 int i;
983
984 tw32(TG3PCI_MISC_HOST_CTRL,
985 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
986 for (i = 0; i < tp->irq_max; i++)
987 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
988 }
989
990 static void tg3_enable_ints(struct tg3 *tp)
991 {
992 int i;
993
994 tp->irq_sync = 0;
995 wmb();
996
997 tw32(TG3PCI_MISC_HOST_CTRL,
998 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
999
1000 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
1001 for (i = 0; i < tp->irq_cnt; i++) {
1002 struct tg3_napi *tnapi = &tp->napi[i];
1003
1004 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1005 if (tg3_flag(tp, 1SHOT_MSI))
1006 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1007
1008 tp->coal_now |= tnapi->coal_now;
1009 }
1010
1011 /* Force an initial interrupt */
1012 if (!tg3_flag(tp, TAGGED_STATUS) &&
1013 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1014 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1015 else
1016 tw32(HOSTCC_MODE, tp->coal_now);
1017
1018 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1019 }
1020
1021 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1022 {
1023 struct tg3 *tp = tnapi->tp;
1024 struct tg3_hw_status *sblk = tnapi->hw_status;
1025 unsigned int work_exists = 0;
1026
1027 /* check for phy events */
1028 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1029 if (sblk->status & SD_STATUS_LINK_CHG)
1030 work_exists = 1;
1031 }
1032
1033 /* check for TX work to do */
1034 if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1035 work_exists = 1;
1036
1037 /* check for RX work to do */
1038 if (tnapi->rx_rcb_prod_idx &&
1039 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1040 work_exists = 1;
1041
1042 return work_exists;
1043 }
1044
1045 /* tg3_int_reenable
1046 * similar to tg3_enable_ints, but it accurately determines whether there
1047 * is new work pending and can return without flushing the PIO write
1048 * which reenables interrupts
1049 */
1050 static void tg3_int_reenable(struct tg3_napi *tnapi)
1051 {
1052 struct tg3 *tp = tnapi->tp;
1053
1054 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1055 mmiowb();
1056
1057 /* When doing tagged status, this work check is unnecessary.
1058 * The last_tag we write above tells the chip which piece of
1059 * work we've completed.
1060 */
1061 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1062 tw32(HOSTCC_MODE, tp->coalesce_mode |
1063 HOSTCC_MODE_ENABLE | tnapi->coal_now);
1064 }
1065
1066 static void tg3_switch_clocks(struct tg3 *tp)
1067 {
1068 u32 clock_ctrl;
1069 u32 orig_clock_ctrl;
1070
1071 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1072 return;
1073
1074 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1075
1076 orig_clock_ctrl = clock_ctrl;
1077 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1078 CLOCK_CTRL_CLKRUN_OENABLE |
1079 0x1f);
1080 tp->pci_clock_ctrl = clock_ctrl;
1081
1082 if (tg3_flag(tp, 5705_PLUS)) {
1083 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1084 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1085 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1086 }
1087 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1088 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1089 clock_ctrl |
1090 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1091 40);
1092 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1093 clock_ctrl | (CLOCK_CTRL_ALTCLK),
1094 40);
1095 }
1096 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1097 }
1098
1099 #define PHY_BUSY_LOOPS 5000
1100
1101 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1102 u32 *val)
1103 {
1104 u32 frame_val;
1105 unsigned int loops;
1106 int ret;
1107
1108 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1109 tw32_f(MAC_MI_MODE,
1110 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1111 udelay(80);
1112 }
1113
1114 tg3_ape_lock(tp, tp->phy_ape_lock);
1115
1116 *val = 0x0;
1117
1118 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1119 MI_COM_PHY_ADDR_MASK);
1120 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1121 MI_COM_REG_ADDR_MASK);
1122 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1123
1124 tw32_f(MAC_MI_COM, frame_val);
1125
1126 loops = PHY_BUSY_LOOPS;
1127 while (loops != 0) {
1128 udelay(10);
1129 frame_val = tr32(MAC_MI_COM);
1130
1131 if ((frame_val & MI_COM_BUSY) == 0) {
1132 udelay(5);
1133 frame_val = tr32(MAC_MI_COM);
1134 break;
1135 }
1136 loops -= 1;
1137 }
1138
1139 ret = -EBUSY;
1140 if (loops != 0) {
1141 *val = frame_val & MI_COM_DATA_MASK;
1142 ret = 0;
1143 }
1144
1145 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1146 tw32_f(MAC_MI_MODE, tp->mi_mode);
1147 udelay(80);
1148 }
1149
1150 tg3_ape_unlock(tp, tp->phy_ape_lock);
1151
1152 return ret;
1153 }
1154
1155 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1156 {
1157 return __tg3_readphy(tp, tp->phy_addr, reg, val);
1158 }
1159
1160 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1161 u32 val)
1162 {
1163 u32 frame_val;
1164 unsigned int loops;
1165 int ret;
1166
1167 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1168 (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1169 return 0;
1170
1171 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1172 tw32_f(MAC_MI_MODE,
1173 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1174 udelay(80);
1175 }
1176
1177 tg3_ape_lock(tp, tp->phy_ape_lock);
1178
1179 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1180 MI_COM_PHY_ADDR_MASK);
1181 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1182 MI_COM_REG_ADDR_MASK);
1183 frame_val |= (val & MI_COM_DATA_MASK);
1184 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1185
1186 tw32_f(MAC_MI_COM, frame_val);
1187
1188 loops = PHY_BUSY_LOOPS;
1189 while (loops != 0) {
1190 udelay(10);
1191 frame_val = tr32(MAC_MI_COM);
1192 if ((frame_val & MI_COM_BUSY) == 0) {
1193 udelay(5);
1194 frame_val = tr32(MAC_MI_COM);
1195 break;
1196 }
1197 loops -= 1;
1198 }
1199
1200 ret = -EBUSY;
1201 if (loops != 0)
1202 ret = 0;
1203
1204 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1205 tw32_f(MAC_MI_MODE, tp->mi_mode);
1206 udelay(80);
1207 }
1208
1209 tg3_ape_unlock(tp, tp->phy_ape_lock);
1210
1211 return ret;
1212 }
1213
1214 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1215 {
1216 return __tg3_writephy(tp, tp->phy_addr, reg, val);
1217 }
1218
1219 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1220 {
1221 int err;
1222
1223 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1224 if (err)
1225 goto done;
1226
1227 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1228 if (err)
1229 goto done;
1230
1231 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1232 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1233 if (err)
1234 goto done;
1235
1236 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1237
1238 done:
1239 return err;
1240 }
1241
1242 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1243 {
1244 int err;
1245
1246 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1247 if (err)
1248 goto done;
1249
1250 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1251 if (err)
1252 goto done;
1253
1254 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1255 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1256 if (err)
1257 goto done;
1258
1259 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1260
1261 done:
1262 return err;
1263 }
1264
1265 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1266 {
1267 int err;
1268
1269 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1270 if (!err)
1271 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1272
1273 return err;
1274 }
1275
1276 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1277 {
1278 int err;
1279
1280 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1281 if (!err)
1282 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1283
1284 return err;
1285 }
1286
1287 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1288 {
1289 int err;
1290
1291 err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1292 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1293 MII_TG3_AUXCTL_SHDWSEL_MISC);
1294 if (!err)
1295 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1296
1297 return err;
1298 }
1299
1300 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1301 {
1302 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1303 set |= MII_TG3_AUXCTL_MISC_WREN;
1304
1305 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1306 }
1307
1308 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1309 {
1310 u32 val;
1311 int err;
1312
1313 err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1314
1315 if (err)
1316 return err;
1317 if (enable)
1318
1319 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1320 else
1321 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1322
1323 err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1324 val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1325
1326 return err;
1327 }
1328
1329 static int tg3_bmcr_reset(struct tg3 *tp)
1330 {
1331 u32 phy_control;
1332 int limit, err;
1333
1334 /* OK, reset it, and poll the BMCR_RESET bit until it
1335 * clears or we time out.
1336 */
1337 phy_control = BMCR_RESET;
1338 err = tg3_writephy(tp, MII_BMCR, phy_control);
1339 if (err != 0)
1340 return -EBUSY;
1341
1342 limit = 5000;
1343 while (limit--) {
1344 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1345 if (err != 0)
1346 return -EBUSY;
1347
1348 if ((phy_control & BMCR_RESET) == 0) {
1349 udelay(40);
1350 break;
1351 }
1352 udelay(10);
1353 }
1354 if (limit < 0)
1355 return -EBUSY;
1356
1357 return 0;
1358 }
1359
1360 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1361 {
1362 struct tg3 *tp = bp->priv;
1363 u32 val;
1364
1365 spin_lock_bh(&tp->lock);
1366
1367 if (tg3_readphy(tp, reg, &val))
1368 val = -EIO;
1369
1370 spin_unlock_bh(&tp->lock);
1371
1372 return val;
1373 }
1374
1375 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1376 {
1377 struct tg3 *tp = bp->priv;
1378 u32 ret = 0;
1379
1380 spin_lock_bh(&tp->lock);
1381
1382 if (tg3_writephy(tp, reg, val))
1383 ret = -EIO;
1384
1385 spin_unlock_bh(&tp->lock);
1386
1387 return ret;
1388 }
1389
1390 static int tg3_mdio_reset(struct mii_bus *bp)
1391 {
1392 return 0;
1393 }
1394
1395 static void tg3_mdio_config_5785(struct tg3 *tp)
1396 {
1397 u32 val;
1398 struct phy_device *phydev;
1399
1400 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1401 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1402 case PHY_ID_BCM50610:
1403 case PHY_ID_BCM50610M:
1404 val = MAC_PHYCFG2_50610_LED_MODES;
1405 break;
1406 case PHY_ID_BCMAC131:
1407 val = MAC_PHYCFG2_AC131_LED_MODES;
1408 break;
1409 case PHY_ID_RTL8211C:
1410 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1411 break;
1412 case PHY_ID_RTL8201E:
1413 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1414 break;
1415 default:
1416 return;
1417 }
1418
1419 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1420 tw32(MAC_PHYCFG2, val);
1421
1422 val = tr32(MAC_PHYCFG1);
1423 val &= ~(MAC_PHYCFG1_RGMII_INT |
1424 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1425 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1426 tw32(MAC_PHYCFG1, val);
1427
1428 return;
1429 }
1430
1431 if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1432 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1433 MAC_PHYCFG2_FMODE_MASK_MASK |
1434 MAC_PHYCFG2_GMODE_MASK_MASK |
1435 MAC_PHYCFG2_ACT_MASK_MASK |
1436 MAC_PHYCFG2_QUAL_MASK_MASK |
1437 MAC_PHYCFG2_INBAND_ENABLE;
1438
1439 tw32(MAC_PHYCFG2, val);
1440
1441 val = tr32(MAC_PHYCFG1);
1442 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1443 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1444 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1445 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1446 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1447 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1448 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1449 }
1450 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1451 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1452 tw32(MAC_PHYCFG1, val);
1453
1454 val = tr32(MAC_EXT_RGMII_MODE);
1455 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1456 MAC_RGMII_MODE_RX_QUALITY |
1457 MAC_RGMII_MODE_RX_ACTIVITY |
1458 MAC_RGMII_MODE_RX_ENG_DET |
1459 MAC_RGMII_MODE_TX_ENABLE |
1460 MAC_RGMII_MODE_TX_LOWPWR |
1461 MAC_RGMII_MODE_TX_RESET);
1462 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1463 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1464 val |= MAC_RGMII_MODE_RX_INT_B |
1465 MAC_RGMII_MODE_RX_QUALITY |
1466 MAC_RGMII_MODE_RX_ACTIVITY |
1467 MAC_RGMII_MODE_RX_ENG_DET;
1468 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1469 val |= MAC_RGMII_MODE_TX_ENABLE |
1470 MAC_RGMII_MODE_TX_LOWPWR |
1471 MAC_RGMII_MODE_TX_RESET;
1472 }
1473 tw32(MAC_EXT_RGMII_MODE, val);
1474 }
1475
1476 static void tg3_mdio_start(struct tg3 *tp)
1477 {
1478 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1479 tw32_f(MAC_MI_MODE, tp->mi_mode);
1480 udelay(80);
1481
1482 if (tg3_flag(tp, MDIOBUS_INITED) &&
1483 tg3_asic_rev(tp) == ASIC_REV_5785)
1484 tg3_mdio_config_5785(tp);
1485 }
1486
1487 static int tg3_mdio_init(struct tg3 *tp)
1488 {
1489 int i;
1490 u32 reg;
1491 struct phy_device *phydev;
1492
1493 if (tg3_flag(tp, 5717_PLUS)) {
1494 u32 is_serdes;
1495
1496 tp->phy_addr = tp->pci_fn + 1;
1497
1498 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1499 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1500 else
1501 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1502 TG3_CPMU_PHY_STRAP_IS_SERDES;
1503 if (is_serdes)
1504 tp->phy_addr += 7;
1505 } else
1506 tp->phy_addr = TG3_PHY_MII_ADDR;
1507
1508 tg3_mdio_start(tp);
1509
1510 if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1511 return 0;
1512
1513 tp->mdio_bus = mdiobus_alloc();
1514 if (tp->mdio_bus == NULL)
1515 return -ENOMEM;
1516
1517 tp->mdio_bus->name = "tg3 mdio bus";
1518 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1519 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1520 tp->mdio_bus->priv = tp;
1521 tp->mdio_bus->parent = &tp->pdev->dev;
1522 tp->mdio_bus->read = &tg3_mdio_read;
1523 tp->mdio_bus->write = &tg3_mdio_write;
1524 tp->mdio_bus->reset = &tg3_mdio_reset;
1525 tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1526 tp->mdio_bus->irq = &tp->mdio_irq[0];
1527
1528 for (i = 0; i < PHY_MAX_ADDR; i++)
1529 tp->mdio_bus->irq[i] = PHY_POLL;
1530
1531 /* The bus registration will look for all the PHYs on the mdio bus.
1532 * Unfortunately, it does not ensure the PHY is powered up before
1533 * accessing the PHY ID registers. A chip reset is the
1534 * quickest way to bring the device back to an operational state..
1535 */
1536 if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1537 tg3_bmcr_reset(tp);
1538
1539 i = mdiobus_register(tp->mdio_bus);
1540 if (i) {
1541 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1542 mdiobus_free(tp->mdio_bus);
1543 return i;
1544 }
1545
1546 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1547
1548 if (!phydev || !phydev->drv) {
1549 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1550 mdiobus_unregister(tp->mdio_bus);
1551 mdiobus_free(tp->mdio_bus);
1552 return -ENODEV;
1553 }
1554
1555 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1556 case PHY_ID_BCM57780:
1557 phydev->interface = PHY_INTERFACE_MODE_GMII;
1558 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1559 break;
1560 case PHY_ID_BCM50610:
1561 case PHY_ID_BCM50610M:
1562 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1563 PHY_BRCM_RX_REFCLK_UNUSED |
1564 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1565 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1566 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1567 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1568 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1569 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1570 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1571 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1572 /* fallthru */
1573 case PHY_ID_RTL8211C:
1574 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1575 break;
1576 case PHY_ID_RTL8201E:
1577 case PHY_ID_BCMAC131:
1578 phydev->interface = PHY_INTERFACE_MODE_MII;
1579 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1580 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1581 break;
1582 }
1583
1584 tg3_flag_set(tp, MDIOBUS_INITED);
1585
1586 if (tg3_asic_rev(tp) == ASIC_REV_5785)
1587 tg3_mdio_config_5785(tp);
1588
1589 return 0;
1590 }
1591
1592 static void tg3_mdio_fini(struct tg3 *tp)
1593 {
1594 if (tg3_flag(tp, MDIOBUS_INITED)) {
1595 tg3_flag_clear(tp, MDIOBUS_INITED);
1596 mdiobus_unregister(tp->mdio_bus);
1597 mdiobus_free(tp->mdio_bus);
1598 }
1599 }
1600
1601 /* tp->lock is held. */
1602 static inline void tg3_generate_fw_event(struct tg3 *tp)
1603 {
1604 u32 val;
1605
1606 val = tr32(GRC_RX_CPU_EVENT);
1607 val |= GRC_RX_CPU_DRIVER_EVENT;
1608 tw32_f(GRC_RX_CPU_EVENT, val);
1609
1610 tp->last_event_jiffies = jiffies;
1611 }
1612
1613 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1614
1615 /* tp->lock is held. */
1616 static void tg3_wait_for_event_ack(struct tg3 *tp)
1617 {
1618 int i;
1619 unsigned int delay_cnt;
1620 long time_remain;
1621
1622 /* If enough time has passed, no wait is necessary. */
1623 time_remain = (long)(tp->last_event_jiffies + 1 +
1624 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1625 (long)jiffies;
1626 if (time_remain < 0)
1627 return;
1628
1629 /* Check if we can shorten the wait time. */
1630 delay_cnt = jiffies_to_usecs(time_remain);
1631 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1632 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1633 delay_cnt = (delay_cnt >> 3) + 1;
1634
1635 for (i = 0; i < delay_cnt; i++) {
1636 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1637 break;
1638 udelay(8);
1639 }
1640 }
1641
1642 /* tp->lock is held. */
1643 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1644 {
1645 u32 reg, val;
1646
1647 val = 0;
1648 if (!tg3_readphy(tp, MII_BMCR, &reg))
1649 val = reg << 16;
1650 if (!tg3_readphy(tp, MII_BMSR, &reg))
1651 val |= (reg & 0xffff);
1652 *data++ = val;
1653
1654 val = 0;
1655 if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1656 val = reg << 16;
1657 if (!tg3_readphy(tp, MII_LPA, &reg))
1658 val |= (reg & 0xffff);
1659 *data++ = val;
1660
1661 val = 0;
1662 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1663 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1664 val = reg << 16;
1665 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1666 val |= (reg & 0xffff);
1667 }
1668 *data++ = val;
1669
1670 if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1671 val = reg << 16;
1672 else
1673 val = 0;
1674 *data++ = val;
1675 }
1676
1677 /* tp->lock is held. */
1678 static void tg3_ump_link_report(struct tg3 *tp)
1679 {
1680 u32 data[4];
1681
1682 if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1683 return;
1684
1685 tg3_phy_gather_ump_data(tp, data);
1686
1687 tg3_wait_for_event_ack(tp);
1688
1689 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1690 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1691 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1692 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1693 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1694 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1695
1696 tg3_generate_fw_event(tp);
1697 }
1698
1699 /* tp->lock is held. */
1700 static void tg3_stop_fw(struct tg3 *tp)
1701 {
1702 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1703 /* Wait for RX cpu to ACK the previous event. */
1704 tg3_wait_for_event_ack(tp);
1705
1706 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1707
1708 tg3_generate_fw_event(tp);
1709
1710 /* Wait for RX cpu to ACK this event. */
1711 tg3_wait_for_event_ack(tp);
1712 }
1713 }
1714
1715 /* tp->lock is held. */
1716 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1717 {
1718 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1719 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1720
1721 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1722 switch (kind) {
1723 case RESET_KIND_INIT:
1724 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1725 DRV_STATE_START);
1726 break;
1727
1728 case RESET_KIND_SHUTDOWN:
1729 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1730 DRV_STATE_UNLOAD);
1731 break;
1732
1733 case RESET_KIND_SUSPEND:
1734 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1735 DRV_STATE_SUSPEND);
1736 break;
1737
1738 default:
1739 break;
1740 }
1741 }
1742
1743 if (kind == RESET_KIND_INIT ||
1744 kind == RESET_KIND_SUSPEND)
1745 tg3_ape_driver_state_change(tp, kind);
1746 }
1747
1748 /* tp->lock is held. */
1749 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1750 {
1751 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1752 switch (kind) {
1753 case RESET_KIND_INIT:
1754 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1755 DRV_STATE_START_DONE);
1756 break;
1757
1758 case RESET_KIND_SHUTDOWN:
1759 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1760 DRV_STATE_UNLOAD_DONE);
1761 break;
1762
1763 default:
1764 break;
1765 }
1766 }
1767
1768 if (kind == RESET_KIND_SHUTDOWN)
1769 tg3_ape_driver_state_change(tp, kind);
1770 }
1771
1772 /* tp->lock is held. */
1773 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1774 {
1775 if (tg3_flag(tp, ENABLE_ASF)) {
1776 switch (kind) {
1777 case RESET_KIND_INIT:
1778 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1779 DRV_STATE_START);
1780 break;
1781
1782 case RESET_KIND_SHUTDOWN:
1783 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1784 DRV_STATE_UNLOAD);
1785 break;
1786
1787 case RESET_KIND_SUSPEND:
1788 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1789 DRV_STATE_SUSPEND);
1790 break;
1791
1792 default:
1793 break;
1794 }
1795 }
1796 }
1797
1798 static int tg3_poll_fw(struct tg3 *tp)
1799 {
1800 int i;
1801 u32 val;
1802
1803 if (tg3_flag(tp, IS_SSB_CORE)) {
1804 /* We don't use firmware. */
1805 return 0;
1806 }
1807
1808 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1809 /* Wait up to 20ms for init done. */
1810 for (i = 0; i < 200; i++) {
1811 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1812 return 0;
1813 udelay(100);
1814 }
1815 return -ENODEV;
1816 }
1817
1818 /* Wait for firmware initialization to complete. */
1819 for (i = 0; i < 100000; i++) {
1820 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1821 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1822 break;
1823 udelay(10);
1824 }
1825
1826 /* Chip might not be fitted with firmware. Some Sun onboard
1827 * parts are configured like that. So don't signal the timeout
1828 * of the above loop as an error, but do report the lack of
1829 * running firmware once.
1830 */
1831 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1832 tg3_flag_set(tp, NO_FWARE_REPORTED);
1833
1834 netdev_info(tp->dev, "No firmware running\n");
1835 }
1836
1837 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1838 /* The 57765 A0 needs a little more
1839 * time to do some important work.
1840 */
1841 mdelay(10);
1842 }
1843
1844 return 0;
1845 }
1846
1847 static void tg3_link_report(struct tg3 *tp)
1848 {
1849 if (!netif_carrier_ok(tp->dev)) {
1850 netif_info(tp, link, tp->dev, "Link is down\n");
1851 tg3_ump_link_report(tp);
1852 } else if (netif_msg_link(tp)) {
1853 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1854 (tp->link_config.active_speed == SPEED_1000 ?
1855 1000 :
1856 (tp->link_config.active_speed == SPEED_100 ?
1857 100 : 10)),
1858 (tp->link_config.active_duplex == DUPLEX_FULL ?
1859 "full" : "half"));
1860
1861 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1862 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1863 "on" : "off",
1864 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1865 "on" : "off");
1866
1867 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1868 netdev_info(tp->dev, "EEE is %s\n",
1869 tp->setlpicnt ? "enabled" : "disabled");
1870
1871 tg3_ump_link_report(tp);
1872 }
1873
1874 tp->link_up = netif_carrier_ok(tp->dev);
1875 }
1876
1877 static u32 tg3_decode_flowctrl_1000T(u32 adv)
1878 {
1879 u32 flowctrl = 0;
1880
1881 if (adv & ADVERTISE_PAUSE_CAP) {
1882 flowctrl |= FLOW_CTRL_RX;
1883 if (!(adv & ADVERTISE_PAUSE_ASYM))
1884 flowctrl |= FLOW_CTRL_TX;
1885 } else if (adv & ADVERTISE_PAUSE_ASYM)
1886 flowctrl |= FLOW_CTRL_TX;
1887
1888 return flowctrl;
1889 }
1890
1891 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1892 {
1893 u16 miireg;
1894
1895 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1896 miireg = ADVERTISE_1000XPAUSE;
1897 else if (flow_ctrl & FLOW_CTRL_TX)
1898 miireg = ADVERTISE_1000XPSE_ASYM;
1899 else if (flow_ctrl & FLOW_CTRL_RX)
1900 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1901 else
1902 miireg = 0;
1903
1904 return miireg;
1905 }
1906
1907 static u32 tg3_decode_flowctrl_1000X(u32 adv)
1908 {
1909 u32 flowctrl = 0;
1910
1911 if (adv & ADVERTISE_1000XPAUSE) {
1912 flowctrl |= FLOW_CTRL_RX;
1913 if (!(adv & ADVERTISE_1000XPSE_ASYM))
1914 flowctrl |= FLOW_CTRL_TX;
1915 } else if (adv & ADVERTISE_1000XPSE_ASYM)
1916 flowctrl |= FLOW_CTRL_TX;
1917
1918 return flowctrl;
1919 }
1920
1921 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1922 {
1923 u8 cap = 0;
1924
1925 if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1926 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1927 } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1928 if (lcladv & ADVERTISE_1000XPAUSE)
1929 cap = FLOW_CTRL_RX;
1930 if (rmtadv & ADVERTISE_1000XPAUSE)
1931 cap = FLOW_CTRL_TX;
1932 }
1933
1934 return cap;
1935 }
1936
1937 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1938 {
1939 u8 autoneg;
1940 u8 flowctrl = 0;
1941 u32 old_rx_mode = tp->rx_mode;
1942 u32 old_tx_mode = tp->tx_mode;
1943
1944 if (tg3_flag(tp, USE_PHYLIB))
1945 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1946 else
1947 autoneg = tp->link_config.autoneg;
1948
1949 if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1950 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1951 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1952 else
1953 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1954 } else
1955 flowctrl = tp->link_config.flowctrl;
1956
1957 tp->link_config.active_flowctrl = flowctrl;
1958
1959 if (flowctrl & FLOW_CTRL_RX)
1960 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1961 else
1962 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1963
1964 if (old_rx_mode != tp->rx_mode)
1965 tw32_f(MAC_RX_MODE, tp->rx_mode);
1966
1967 if (flowctrl & FLOW_CTRL_TX)
1968 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1969 else
1970 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1971
1972 if (old_tx_mode != tp->tx_mode)
1973 tw32_f(MAC_TX_MODE, tp->tx_mode);
1974 }
1975
1976 static void tg3_adjust_link(struct net_device *dev)
1977 {
1978 u8 oldflowctrl, linkmesg = 0;
1979 u32 mac_mode, lcl_adv, rmt_adv;
1980 struct tg3 *tp = netdev_priv(dev);
1981 struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1982
1983 spin_lock_bh(&tp->lock);
1984
1985 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1986 MAC_MODE_HALF_DUPLEX);
1987
1988 oldflowctrl = tp->link_config.active_flowctrl;
1989
1990 if (phydev->link) {
1991 lcl_adv = 0;
1992 rmt_adv = 0;
1993
1994 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1995 mac_mode |= MAC_MODE_PORT_MODE_MII;
1996 else if (phydev->speed == SPEED_1000 ||
1997 tg3_asic_rev(tp) != ASIC_REV_5785)
1998 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1999 else
2000 mac_mode |= MAC_MODE_PORT_MODE_MII;
2001
2002 if (phydev->duplex == DUPLEX_HALF)
2003 mac_mode |= MAC_MODE_HALF_DUPLEX;
2004 else {
2005 lcl_adv = mii_advertise_flowctrl(
2006 tp->link_config.flowctrl);
2007
2008 if (phydev->pause)
2009 rmt_adv = LPA_PAUSE_CAP;
2010 if (phydev->asym_pause)
2011 rmt_adv |= LPA_PAUSE_ASYM;
2012 }
2013
2014 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2015 } else
2016 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2017
2018 if (mac_mode != tp->mac_mode) {
2019 tp->mac_mode = mac_mode;
2020 tw32_f(MAC_MODE, tp->mac_mode);
2021 udelay(40);
2022 }
2023
2024 if (tg3_asic_rev(tp) == ASIC_REV_5785) {
2025 if (phydev->speed == SPEED_10)
2026 tw32(MAC_MI_STAT,
2027 MAC_MI_STAT_10MBPS_MODE |
2028 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2029 else
2030 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2031 }
2032
2033 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2034 tw32(MAC_TX_LENGTHS,
2035 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2036 (6 << TX_LENGTHS_IPG_SHIFT) |
2037 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2038 else
2039 tw32(MAC_TX_LENGTHS,
2040 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2041 (6 << TX_LENGTHS_IPG_SHIFT) |
2042 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2043
2044 if (phydev->link != tp->old_link ||
2045 phydev->speed != tp->link_config.active_speed ||
2046 phydev->duplex != tp->link_config.active_duplex ||
2047 oldflowctrl != tp->link_config.active_flowctrl)
2048 linkmesg = 1;
2049
2050 tp->old_link = phydev->link;
2051 tp->link_config.active_speed = phydev->speed;
2052 tp->link_config.active_duplex = phydev->duplex;
2053
2054 spin_unlock_bh(&tp->lock);
2055
2056 if (linkmesg)
2057 tg3_link_report(tp);
2058 }
2059
2060 static int tg3_phy_init(struct tg3 *tp)
2061 {
2062 struct phy_device *phydev;
2063
2064 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2065 return 0;
2066
2067 /* Bring the PHY back to a known state. */
2068 tg3_bmcr_reset(tp);
2069
2070 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2071
2072 /* Attach the MAC to the PHY. */
2073 phydev = phy_connect(tp->dev, dev_name(&phydev->dev),
2074 tg3_adjust_link, phydev->interface);
2075 if (IS_ERR(phydev)) {
2076 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2077 return PTR_ERR(phydev);
2078 }
2079
2080 /* Mask with MAC supported features. */
2081 switch (phydev->interface) {
2082 case PHY_INTERFACE_MODE_GMII:
2083 case PHY_INTERFACE_MODE_RGMII:
2084 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2085 phydev->supported &= (PHY_GBIT_FEATURES |
2086 SUPPORTED_Pause |
2087 SUPPORTED_Asym_Pause);
2088 break;
2089 }
2090 /* fallthru */
2091 case PHY_INTERFACE_MODE_MII:
2092 phydev->supported &= (PHY_BASIC_FEATURES |
2093 SUPPORTED_Pause |
2094 SUPPORTED_Asym_Pause);
2095 break;
2096 default:
2097 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2098 return -EINVAL;
2099 }
2100
2101 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2102
2103 phydev->advertising = phydev->supported;
2104
2105 return 0;
2106 }
2107
2108 static void tg3_phy_start(struct tg3 *tp)
2109 {
2110 struct phy_device *phydev;
2111
2112 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2113 return;
2114
2115 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2116
2117 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2118 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2119 phydev->speed = tp->link_config.speed;
2120 phydev->duplex = tp->link_config.duplex;
2121 phydev->autoneg = tp->link_config.autoneg;
2122 phydev->advertising = tp->link_config.advertising;
2123 }
2124
2125 phy_start(phydev);
2126
2127 phy_start_aneg(phydev);
2128 }
2129
2130 static void tg3_phy_stop(struct tg3 *tp)
2131 {
2132 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2133 return;
2134
2135 phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2136 }
2137
2138 static void tg3_phy_fini(struct tg3 *tp)
2139 {
2140 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2141 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2142 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2143 }
2144 }
2145
2146 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2147 {
2148 int err;
2149 u32 val;
2150
2151 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2152 return 0;
2153
2154 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2155 /* Cannot do read-modify-write on 5401 */
2156 err = tg3_phy_auxctl_write(tp,
2157 MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2158 MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2159 0x4c20);
2160 goto done;
2161 }
2162
2163 err = tg3_phy_auxctl_read(tp,
2164 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2165 if (err)
2166 return err;
2167
2168 val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2169 err = tg3_phy_auxctl_write(tp,
2170 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2171
2172 done:
2173 return err;
2174 }
2175
2176 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2177 {
2178 u32 phytest;
2179
2180 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2181 u32 phy;
2182
2183 tg3_writephy(tp, MII_TG3_FET_TEST,
2184 phytest | MII_TG3_FET_SHADOW_EN);
2185 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2186 if (enable)
2187 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2188 else
2189 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2190 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2191 }
2192 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2193 }
2194 }
2195
2196 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2197 {
2198 u32 reg;
2199
2200 if (!tg3_flag(tp, 5705_PLUS) ||
2201 (tg3_flag(tp, 5717_PLUS) &&
2202 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2203 return;
2204
2205 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2206 tg3_phy_fet_toggle_apd(tp, enable);
2207 return;
2208 }
2209
2210 reg = MII_TG3_MISC_SHDW_WREN |
2211 MII_TG3_MISC_SHDW_SCR5_SEL |
2212 MII_TG3_MISC_SHDW_SCR5_LPED |
2213 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2214 MII_TG3_MISC_SHDW_SCR5_SDTL |
2215 MII_TG3_MISC_SHDW_SCR5_C125OE;
2216 if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2217 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2218
2219 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2220
2221
2222 reg = MII_TG3_MISC_SHDW_WREN |
2223 MII_TG3_MISC_SHDW_APD_SEL |
2224 MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2225 if (enable)
2226 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2227
2228 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2229 }
2230
2231 static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable)
2232 {
2233 u32 phy;
2234
2235 if (!tg3_flag(tp, 5705_PLUS) ||
2236 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2237 return;
2238
2239 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2240 u32 ephy;
2241
2242 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2243 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2244
2245 tg3_writephy(tp, MII_TG3_FET_TEST,
2246 ephy | MII_TG3_FET_SHADOW_EN);
2247 if (!tg3_readphy(tp, reg, &phy)) {
2248 if (enable)
2249 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2250 else
2251 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2252 tg3_writephy(tp, reg, phy);
2253 }
2254 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2255 }
2256 } else {
2257 int ret;
2258
2259 ret = tg3_phy_auxctl_read(tp,
2260 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2261 if (!ret) {
2262 if (enable)
2263 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2264 else
2265 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2266 tg3_phy_auxctl_write(tp,
2267 MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2268 }
2269 }
2270 }
2271
2272 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2273 {
2274 int ret;
2275 u32 val;
2276
2277 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2278 return;
2279
2280 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2281 if (!ret)
2282 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2283 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2284 }
2285
2286 static void tg3_phy_apply_otp(struct tg3 *tp)
2287 {
2288 u32 otp, phy;
2289
2290 if (!tp->phy_otp)
2291 return;
2292
2293 otp = tp->phy_otp;
2294
2295 if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2296 return;
2297
2298 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2299 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2300 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2301
2302 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2303 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2304 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2305
2306 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2307 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2308 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2309
2310 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2311 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2312
2313 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2314 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2315
2316 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2317 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2318 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2319
2320 tg3_phy_toggle_auxctl_smdsp(tp, false);
2321 }
2322
2323 static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up)
2324 {
2325 u32 val;
2326
2327 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2328 return;
2329
2330 tp->setlpicnt = 0;
2331
2332 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2333 current_link_up &&
2334 tp->link_config.active_duplex == DUPLEX_FULL &&
2335 (tp->link_config.active_speed == SPEED_100 ||
2336 tp->link_config.active_speed == SPEED_1000)) {
2337 u32 eeectl;
2338
2339 if (tp->link_config.active_speed == SPEED_1000)
2340 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2341 else
2342 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2343
2344 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2345
2346 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
2347 TG3_CL45_D7_EEERES_STAT, &val);
2348
2349 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2350 val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
2351 tp->setlpicnt = 2;
2352 }
2353
2354 if (!tp->setlpicnt) {
2355 if (current_link_up &&
2356 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2357 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2358 tg3_phy_toggle_auxctl_smdsp(tp, false);
2359 }
2360
2361 val = tr32(TG3_CPMU_EEE_MODE);
2362 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2363 }
2364 }
2365
2366 static void tg3_phy_eee_enable(struct tg3 *tp)
2367 {
2368 u32 val;
2369
2370 if (tp->link_config.active_speed == SPEED_1000 &&
2371 (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2372 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2373 tg3_flag(tp, 57765_CLASS)) &&
2374 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2375 val = MII_TG3_DSP_TAP26_ALNOKO |
2376 MII_TG3_DSP_TAP26_RMRXSTO;
2377 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2378 tg3_phy_toggle_auxctl_smdsp(tp, false);
2379 }
2380
2381 val = tr32(TG3_CPMU_EEE_MODE);
2382 tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2383 }
2384
2385 static int tg3_wait_macro_done(struct tg3 *tp)
2386 {
2387 int limit = 100;
2388
2389 while (limit--) {
2390 u32 tmp32;
2391
2392 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2393 if ((tmp32 & 0x1000) == 0)
2394 break;
2395 }
2396 }
2397 if (limit < 0)
2398 return -EBUSY;
2399
2400 return 0;
2401 }
2402
2403 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2404 {
2405 static const u32 test_pat[4][6] = {
2406 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2407 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2408 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2409 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2410 };
2411 int chan;
2412
2413 for (chan = 0; chan < 4; chan++) {
2414 int i;
2415
2416 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2417 (chan * 0x2000) | 0x0200);
2418 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2419
2420 for (i = 0; i < 6; i++)
2421 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2422 test_pat[chan][i]);
2423
2424 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2425 if (tg3_wait_macro_done(tp)) {
2426 *resetp = 1;
2427 return -EBUSY;
2428 }
2429
2430 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2431 (chan * 0x2000) | 0x0200);
2432 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2433 if (tg3_wait_macro_done(tp)) {
2434 *resetp = 1;
2435 return -EBUSY;
2436 }
2437
2438 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2439 if (tg3_wait_macro_done(tp)) {
2440 *resetp = 1;
2441 return -EBUSY;
2442 }
2443
2444 for (i = 0; i < 6; i += 2) {
2445 u32 low, high;
2446
2447 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2448 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2449 tg3_wait_macro_done(tp)) {
2450 *resetp = 1;
2451 return -EBUSY;
2452 }
2453 low &= 0x7fff;
2454 high &= 0x000f;
2455 if (low != test_pat[chan][i] ||
2456 high != test_pat[chan][i+1]) {
2457 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2458 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2459 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2460
2461 return -EBUSY;
2462 }
2463 }
2464 }
2465
2466 return 0;
2467 }
2468
2469 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2470 {
2471 int chan;
2472
2473 for (chan = 0; chan < 4; chan++) {
2474 int i;
2475
2476 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2477 (chan * 0x2000) | 0x0200);
2478 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2479 for (i = 0; i < 6; i++)
2480 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2481 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2482 if (tg3_wait_macro_done(tp))
2483 return -EBUSY;
2484 }
2485
2486 return 0;
2487 }
2488
2489 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2490 {
2491 u32 reg32, phy9_orig;
2492 int retries, do_phy_reset, err;
2493
2494 retries = 10;
2495 do_phy_reset = 1;
2496 do {
2497 if (do_phy_reset) {
2498 err = tg3_bmcr_reset(tp);
2499 if (err)
2500 return err;
2501 do_phy_reset = 0;
2502 }
2503
2504 /* Disable transmitter and interrupt. */
2505 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2506 continue;
2507
2508 reg32 |= 0x3000;
2509 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2510
2511 /* Set full-duplex, 1000 mbps. */
2512 tg3_writephy(tp, MII_BMCR,
2513 BMCR_FULLDPLX | BMCR_SPEED1000);
2514
2515 /* Set to master mode. */
2516 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2517 continue;
2518
2519 tg3_writephy(tp, MII_CTRL1000,
2520 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2521
2522 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2523 if (err)
2524 return err;
2525
2526 /* Block the PHY control access. */
2527 tg3_phydsp_write(tp, 0x8005, 0x0800);
2528
2529 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2530 if (!err)
2531 break;
2532 } while (--retries);
2533
2534 err = tg3_phy_reset_chanpat(tp);
2535 if (err)
2536 return err;
2537
2538 tg3_phydsp_write(tp, 0x8005, 0x0000);
2539
2540 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2541 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2542
2543 tg3_phy_toggle_auxctl_smdsp(tp, false);
2544
2545 tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2546
2547 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2548 reg32 &= ~0x3000;
2549 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2550 } else if (!err)
2551 err = -EBUSY;
2552
2553 return err;
2554 }
2555
2556 static void tg3_carrier_off(struct tg3 *tp)
2557 {
2558 netif_carrier_off(tp->dev);
2559 tp->link_up = false;
2560 }
2561
2562 static void tg3_warn_mgmt_link_flap(struct tg3 *tp)
2563 {
2564 if (tg3_flag(tp, ENABLE_ASF))
2565 netdev_warn(tp->dev,
2566 "Management side-band traffic will be interrupted during phy settings change\n");
2567 }
2568
2569 /* This will reset the tigon3 PHY if there is no valid
2570 * link unless the FORCE argument is non-zero.
2571 */
2572 static int tg3_phy_reset(struct tg3 *tp)
2573 {
2574 u32 val, cpmuctrl;
2575 int err;
2576
2577 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2578 val = tr32(GRC_MISC_CFG);
2579 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2580 udelay(40);
2581 }
2582 err = tg3_readphy(tp, MII_BMSR, &val);
2583 err |= tg3_readphy(tp, MII_BMSR, &val);
2584 if (err != 0)
2585 return -EBUSY;
2586
2587 if (netif_running(tp->dev) && tp->link_up) {
2588 netif_carrier_off(tp->dev);
2589 tg3_link_report(tp);
2590 }
2591
2592 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2593 tg3_asic_rev(tp) == ASIC_REV_5704 ||
2594 tg3_asic_rev(tp) == ASIC_REV_5705) {
2595 err = tg3_phy_reset_5703_4_5(tp);
2596 if (err)
2597 return err;
2598 goto out;
2599 }
2600
2601 cpmuctrl = 0;
2602 if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2603 tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2604 cpmuctrl = tr32(TG3_CPMU_CTRL);
2605 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2606 tw32(TG3_CPMU_CTRL,
2607 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2608 }
2609
2610 err = tg3_bmcr_reset(tp);
2611 if (err)
2612 return err;
2613
2614 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2615 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2616 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2617
2618 tw32(TG3_CPMU_CTRL, cpmuctrl);
2619 }
2620
2621 if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2622 tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2623 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2624 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2625 CPMU_LSPD_1000MB_MACCLK_12_5) {
2626 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2627 udelay(40);
2628 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2629 }
2630 }
2631
2632 if (tg3_flag(tp, 5717_PLUS) &&
2633 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2634 return 0;
2635
2636 tg3_phy_apply_otp(tp);
2637
2638 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2639 tg3_phy_toggle_apd(tp, true);
2640 else
2641 tg3_phy_toggle_apd(tp, false);
2642
2643 out:
2644 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2645 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2646 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2647 tg3_phydsp_write(tp, 0x000a, 0x0323);
2648 tg3_phy_toggle_auxctl_smdsp(tp, false);
2649 }
2650
2651 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2652 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2653 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2654 }
2655
2656 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2657 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2658 tg3_phydsp_write(tp, 0x000a, 0x310b);
2659 tg3_phydsp_write(tp, 0x201f, 0x9506);
2660 tg3_phydsp_write(tp, 0x401f, 0x14e2);
2661 tg3_phy_toggle_auxctl_smdsp(tp, false);
2662 }
2663 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2664 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2665 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2666 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2667 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2668 tg3_writephy(tp, MII_TG3_TEST1,
2669 MII_TG3_TEST1_TRIM_EN | 0x4);
2670 } else
2671 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2672
2673 tg3_phy_toggle_auxctl_smdsp(tp, false);
2674 }
2675 }
2676
2677 /* Set Extended packet length bit (bit 14) on all chips that */
2678 /* support jumbo frames */
2679 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2680 /* Cannot do read-modify-write on 5401 */
2681 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2682 } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2683 /* Set bit 14 with read-modify-write to preserve other bits */
2684 err = tg3_phy_auxctl_read(tp,
2685 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2686 if (!err)
2687 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2688 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2689 }
2690
2691 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2692 * jumbo frames transmission.
2693 */
2694 if (tg3_flag(tp, JUMBO_CAPABLE)) {
2695 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2696 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2697 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2698 }
2699
2700 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2701 /* adjust output voltage */
2702 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2703 }
2704
2705 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2706 tg3_phydsp_write(tp, 0xffb, 0x4000);
2707
2708 tg3_phy_toggle_automdix(tp, true);
2709 tg3_phy_set_wirespeed(tp);
2710 return 0;
2711 }
2712
2713 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2714 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2715 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2716 TG3_GPIO_MSG_NEED_VAUX)
2717 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2718 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2719 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2720 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2721 (TG3_GPIO_MSG_DRVR_PRES << 12))
2722
2723 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2724 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2725 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2726 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2727 (TG3_GPIO_MSG_NEED_VAUX << 12))
2728
2729 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2730 {
2731 u32 status, shift;
2732
2733 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2734 tg3_asic_rev(tp) == ASIC_REV_5719)
2735 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2736 else
2737 status = tr32(TG3_CPMU_DRV_STATUS);
2738
2739 shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2740 status &= ~(TG3_GPIO_MSG_MASK << shift);
2741 status |= (newstat << shift);
2742
2743 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2744 tg3_asic_rev(tp) == ASIC_REV_5719)
2745 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2746 else
2747 tw32(TG3_CPMU_DRV_STATUS, status);
2748
2749 return status >> TG3_APE_GPIO_MSG_SHIFT;
2750 }
2751
2752 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2753 {
2754 if (!tg3_flag(tp, IS_NIC))
2755 return 0;
2756
2757 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2758 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2759 tg3_asic_rev(tp) == ASIC_REV_5720) {
2760 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2761 return -EIO;
2762
2763 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2764
2765 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2766 TG3_GRC_LCLCTL_PWRSW_DELAY);
2767
2768 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2769 } else {
2770 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2771 TG3_GRC_LCLCTL_PWRSW_DELAY);
2772 }
2773
2774 return 0;
2775 }
2776
2777 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2778 {
2779 u32 grc_local_ctrl;
2780
2781 if (!tg3_flag(tp, IS_NIC) ||
2782 tg3_asic_rev(tp) == ASIC_REV_5700 ||
2783 tg3_asic_rev(tp) == ASIC_REV_5701)
2784 return;
2785
2786 grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2787
2788 tw32_wait_f(GRC_LOCAL_CTRL,
2789 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2790 TG3_GRC_LCLCTL_PWRSW_DELAY);
2791
2792 tw32_wait_f(GRC_LOCAL_CTRL,
2793 grc_local_ctrl,
2794 TG3_GRC_LCLCTL_PWRSW_DELAY);
2795
2796 tw32_wait_f(GRC_LOCAL_CTRL,
2797 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2798 TG3_GRC_LCLCTL_PWRSW_DELAY);
2799 }
2800
2801 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2802 {
2803 if (!tg3_flag(tp, IS_NIC))
2804 return;
2805
2806 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2807 tg3_asic_rev(tp) == ASIC_REV_5701) {
2808 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2809 (GRC_LCLCTRL_GPIO_OE0 |
2810 GRC_LCLCTRL_GPIO_OE1 |
2811 GRC_LCLCTRL_GPIO_OE2 |
2812 GRC_LCLCTRL_GPIO_OUTPUT0 |
2813 GRC_LCLCTRL_GPIO_OUTPUT1),
2814 TG3_GRC_LCLCTL_PWRSW_DELAY);
2815 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2816 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2817 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2818 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2819 GRC_LCLCTRL_GPIO_OE1 |
2820 GRC_LCLCTRL_GPIO_OE2 |
2821 GRC_LCLCTRL_GPIO_OUTPUT0 |
2822 GRC_LCLCTRL_GPIO_OUTPUT1 |
2823 tp->grc_local_ctrl;
2824 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2825 TG3_GRC_LCLCTL_PWRSW_DELAY);
2826
2827 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2828 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2829 TG3_GRC_LCLCTL_PWRSW_DELAY);
2830
2831 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2832 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2833 TG3_GRC_LCLCTL_PWRSW_DELAY);
2834 } else {
2835 u32 no_gpio2;
2836 u32 grc_local_ctrl = 0;
2837
2838 /* Workaround to prevent overdrawing Amps. */
2839 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2840 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2841 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2842 grc_local_ctrl,
2843 TG3_GRC_LCLCTL_PWRSW_DELAY);
2844 }
2845
2846 /* On 5753 and variants, GPIO2 cannot be used. */
2847 no_gpio2 = tp->nic_sram_data_cfg &
2848 NIC_SRAM_DATA_CFG_NO_GPIO2;
2849
2850 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2851 GRC_LCLCTRL_GPIO_OE1 |
2852 GRC_LCLCTRL_GPIO_OE2 |
2853 GRC_LCLCTRL_GPIO_OUTPUT1 |
2854 GRC_LCLCTRL_GPIO_OUTPUT2;
2855 if (no_gpio2) {
2856 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2857 GRC_LCLCTRL_GPIO_OUTPUT2);
2858 }
2859 tw32_wait_f(GRC_LOCAL_CTRL,
2860 tp->grc_local_ctrl | grc_local_ctrl,
2861 TG3_GRC_LCLCTL_PWRSW_DELAY);
2862
2863 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2864
2865 tw32_wait_f(GRC_LOCAL_CTRL,
2866 tp->grc_local_ctrl | grc_local_ctrl,
2867 TG3_GRC_LCLCTL_PWRSW_DELAY);
2868
2869 if (!no_gpio2) {
2870 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2871 tw32_wait_f(GRC_LOCAL_CTRL,
2872 tp->grc_local_ctrl | grc_local_ctrl,
2873 TG3_GRC_LCLCTL_PWRSW_DELAY);
2874 }
2875 }
2876 }
2877
2878 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2879 {
2880 u32 msg = 0;
2881
2882 /* Serialize power state transitions */
2883 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2884 return;
2885
2886 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2887 msg = TG3_GPIO_MSG_NEED_VAUX;
2888
2889 msg = tg3_set_function_status(tp, msg);
2890
2891 if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2892 goto done;
2893
2894 if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2895 tg3_pwrsrc_switch_to_vaux(tp);
2896 else
2897 tg3_pwrsrc_die_with_vmain(tp);
2898
2899 done:
2900 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2901 }
2902
2903 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2904 {
2905 bool need_vaux = false;
2906
2907 /* The GPIOs do something completely different on 57765. */
2908 if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2909 return;
2910
2911 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2912 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2913 tg3_asic_rev(tp) == ASIC_REV_5720) {
2914 tg3_frob_aux_power_5717(tp, include_wol ?
2915 tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2916 return;
2917 }
2918
2919 if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2920 struct net_device *dev_peer;
2921
2922 dev_peer = pci_get_drvdata(tp->pdev_peer);
2923
2924 /* remove_one() may have been run on the peer. */
2925 if (dev_peer) {
2926 struct tg3 *tp_peer = netdev_priv(dev_peer);
2927
2928 if (tg3_flag(tp_peer, INIT_COMPLETE))
2929 return;
2930
2931 if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2932 tg3_flag(tp_peer, ENABLE_ASF))
2933 need_vaux = true;
2934 }
2935 }
2936
2937 if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2938 tg3_flag(tp, ENABLE_ASF))
2939 need_vaux = true;
2940
2941 if (need_vaux)
2942 tg3_pwrsrc_switch_to_vaux(tp);
2943 else
2944 tg3_pwrsrc_die_with_vmain(tp);
2945 }
2946
2947 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2948 {
2949 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2950 return 1;
2951 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2952 if (speed != SPEED_10)
2953 return 1;
2954 } else if (speed == SPEED_10)
2955 return 1;
2956
2957 return 0;
2958 }
2959
2960 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2961 {
2962 u32 val;
2963
2964 if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)
2965 return;
2966
2967 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2968 if (tg3_asic_rev(tp) == ASIC_REV_5704) {
2969 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2970 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2971
2972 sg_dig_ctrl |=
2973 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2974 tw32(SG_DIG_CTRL, sg_dig_ctrl);
2975 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2976 }
2977 return;
2978 }
2979
2980 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2981 tg3_bmcr_reset(tp);
2982 val = tr32(GRC_MISC_CFG);
2983 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2984 udelay(40);
2985 return;
2986 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2987 u32 phytest;
2988 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2989 u32 phy;
2990
2991 tg3_writephy(tp, MII_ADVERTISE, 0);
2992 tg3_writephy(tp, MII_BMCR,
2993 BMCR_ANENABLE | BMCR_ANRESTART);
2994
2995 tg3_writephy(tp, MII_TG3_FET_TEST,
2996 phytest | MII_TG3_FET_SHADOW_EN);
2997 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2998 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2999 tg3_writephy(tp,
3000 MII_TG3_FET_SHDW_AUXMODE4,
3001 phy);
3002 }
3003 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
3004 }
3005 return;
3006 } else if (do_low_power) {
3007 tg3_writephy(tp, MII_TG3_EXT_CTRL,
3008 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
3009
3010 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3011 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
3012 MII_TG3_AUXCTL_PCTL_VREG_11V;
3013 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
3014 }
3015
3016 /* The PHY should not be powered down on some chips because
3017 * of bugs.
3018 */
3019 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
3020 tg3_asic_rev(tp) == ASIC_REV_5704 ||
3021 (tg3_asic_rev(tp) == ASIC_REV_5780 &&
3022 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)) ||
3023 (tg3_asic_rev(tp) == ASIC_REV_5717 &&
3024 !tp->pci_fn))
3025 return;
3026
3027 if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
3028 tg3_chip_rev(tp) == CHIPREV_5761_AX) {
3029 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
3030 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
3031 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
3032 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
3033 }
3034
3035 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3036 }
3037
3038 /* tp->lock is held. */
3039 static int tg3_nvram_lock(struct tg3 *tp)
3040 {
3041 if (tg3_flag(tp, NVRAM)) {
3042 int i;
3043
3044 if (tp->nvram_lock_cnt == 0) {
3045 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3046 for (i = 0; i < 8000; i++) {
3047 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3048 break;
3049 udelay(20);
3050 }
3051 if (i == 8000) {
3052 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3053 return -ENODEV;
3054 }
3055 }
3056 tp->nvram_lock_cnt++;
3057 }
3058 return 0;
3059 }
3060
3061 /* tp->lock is held. */
3062 static void tg3_nvram_unlock(struct tg3 *tp)
3063 {
3064 if (tg3_flag(tp, NVRAM)) {
3065 if (tp->nvram_lock_cnt > 0)
3066 tp->nvram_lock_cnt--;
3067 if (tp->nvram_lock_cnt == 0)
3068 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3069 }
3070 }
3071
3072 /* tp->lock is held. */
3073 static void tg3_enable_nvram_access(struct tg3 *tp)
3074 {
3075 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3076 u32 nvaccess = tr32(NVRAM_ACCESS);
3077
3078 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3079 }
3080 }
3081
3082 /* tp->lock is held. */
3083 static void tg3_disable_nvram_access(struct tg3 *tp)
3084 {
3085 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3086 u32 nvaccess = tr32(NVRAM_ACCESS);
3087
3088 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3089 }
3090 }
3091
3092 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3093 u32 offset, u32 *val)
3094 {
3095 u32 tmp;
3096 int i;
3097
3098 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3099 return -EINVAL;
3100
3101 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3102 EEPROM_ADDR_DEVID_MASK |
3103 EEPROM_ADDR_READ);
3104 tw32(GRC_EEPROM_ADDR,
3105 tmp |
3106 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3107 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3108 EEPROM_ADDR_ADDR_MASK) |
3109 EEPROM_ADDR_READ | EEPROM_ADDR_START);
3110
3111 for (i = 0; i < 1000; i++) {
3112 tmp = tr32(GRC_EEPROM_ADDR);
3113
3114 if (tmp & EEPROM_ADDR_COMPLETE)
3115 break;
3116 msleep(1);
3117 }
3118 if (!(tmp & EEPROM_ADDR_COMPLETE))
3119 return -EBUSY;
3120
3121 tmp = tr32(GRC_EEPROM_DATA);
3122
3123 /*
3124 * The data will always be opposite the native endian
3125 * format. Perform a blind byteswap to compensate.
3126 */
3127 *val = swab32(tmp);
3128
3129 return 0;
3130 }
3131
3132 #define NVRAM_CMD_TIMEOUT 10000
3133
3134 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3135 {
3136 int i;
3137
3138 tw32(NVRAM_CMD, nvram_cmd);
3139 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3140 udelay(10);
3141 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3142 udelay(10);
3143 break;
3144 }
3145 }
3146
3147 if (i == NVRAM_CMD_TIMEOUT)
3148 return -EBUSY;
3149
3150 return 0;
3151 }
3152
3153 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3154 {
3155 if (tg3_flag(tp, NVRAM) &&
3156 tg3_flag(tp, NVRAM_BUFFERED) &&
3157 tg3_flag(tp, FLASH) &&
3158 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3159 (tp->nvram_jedecnum == JEDEC_ATMEL))
3160
3161 addr = ((addr / tp->nvram_pagesize) <<
3162 ATMEL_AT45DB0X1B_PAGE_POS) +
3163 (addr % tp->nvram_pagesize);
3164
3165 return addr;
3166 }
3167
3168 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3169 {
3170 if (tg3_flag(tp, NVRAM) &&
3171 tg3_flag(tp, NVRAM_BUFFERED) &&
3172 tg3_flag(tp, FLASH) &&
3173 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3174 (tp->nvram_jedecnum == JEDEC_ATMEL))
3175
3176 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3177 tp->nvram_pagesize) +
3178 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3179
3180 return addr;
3181 }
3182
3183 /* NOTE: Data read in from NVRAM is byteswapped according to
3184 * the byteswapping settings for all other register accesses.
3185 * tg3 devices are BE devices, so on a BE machine, the data
3186 * returned will be exactly as it is seen in NVRAM. On a LE
3187 * machine, the 32-bit value will be byteswapped.
3188 */
3189 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3190 {
3191 int ret;
3192
3193 if (!tg3_flag(tp, NVRAM))
3194 return tg3_nvram_read_using_eeprom(tp, offset, val);
3195
3196 offset = tg3_nvram_phys_addr(tp, offset);
3197
3198 if (offset > NVRAM_ADDR_MSK)
3199 return -EINVAL;
3200
3201 ret = tg3_nvram_lock(tp);
3202 if (ret)
3203 return ret;
3204
3205 tg3_enable_nvram_access(tp);
3206
3207 tw32(NVRAM_ADDR, offset);
3208 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3209 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3210
3211 if (ret == 0)
3212 *val = tr32(NVRAM_RDDATA);
3213
3214 tg3_disable_nvram_access(tp);
3215
3216 tg3_nvram_unlock(tp);
3217
3218 return ret;
3219 }
3220
3221 /* Ensures NVRAM data is in bytestream format. */
3222 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3223 {
3224 u32 v;
3225 int res = tg3_nvram_read(tp, offset, &v);
3226 if (!res)
3227 *val = cpu_to_be32(v);
3228 return res;
3229 }
3230
3231 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3232 u32 offset, u32 len, u8 *buf)
3233 {
3234 int i, j, rc = 0;
3235 u32 val;
3236
3237 for (i = 0; i < len; i += 4) {
3238 u32 addr;
3239 __be32 data;
3240
3241 addr = offset + i;
3242
3243 memcpy(&data, buf + i, 4);
3244
3245 /*
3246 * The SEEPROM interface expects the data to always be opposite
3247 * the native endian format. We accomplish this by reversing
3248 * all the operations that would have been performed on the
3249 * data from a call to tg3_nvram_read_be32().
3250 */
3251 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3252
3253 val = tr32(GRC_EEPROM_ADDR);
3254 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3255
3256 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3257 EEPROM_ADDR_READ);
3258 tw32(GRC_EEPROM_ADDR, val |
3259 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3260 (addr & EEPROM_ADDR_ADDR_MASK) |
3261 EEPROM_ADDR_START |
3262 EEPROM_ADDR_WRITE);
3263
3264 for (j = 0; j < 1000; j++) {
3265 val = tr32(GRC_EEPROM_ADDR);
3266
3267 if (val & EEPROM_ADDR_COMPLETE)
3268 break;
3269 msleep(1);
3270 }
3271 if (!(val & EEPROM_ADDR_COMPLETE)) {
3272 rc = -EBUSY;
3273 break;
3274 }
3275 }
3276
3277 return rc;
3278 }
3279
3280 /* offset and length are dword aligned */
3281 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3282 u8 *buf)
3283 {
3284 int ret = 0;
3285 u32 pagesize = tp->nvram_pagesize;
3286 u32 pagemask = pagesize - 1;
3287 u32 nvram_cmd;
3288 u8 *tmp;
3289
3290 tmp = kmalloc(pagesize, GFP_KERNEL);
3291 if (tmp == NULL)
3292 return -ENOMEM;
3293
3294 while (len) {
3295 int j;
3296 u32 phy_addr, page_off, size;
3297
3298 phy_addr = offset & ~pagemask;
3299
3300 for (j = 0; j < pagesize; j += 4) {
3301 ret = tg3_nvram_read_be32(tp, phy_addr + j,
3302 (__be32 *) (tmp + j));
3303 if (ret)
3304 break;
3305 }
3306 if (ret)
3307 break;
3308
3309 page_off = offset & pagemask;
3310 size = pagesize;
3311 if (len < size)
3312 size = len;
3313
3314 len -= size;
3315
3316 memcpy(tmp + page_off, buf, size);
3317
3318 offset = offset + (pagesize - page_off);
3319
3320 tg3_enable_nvram_access(tp);
3321
3322 /*
3323 * Before we can erase the flash page, we need
3324 * to issue a special "write enable" command.
3325 */
3326 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3327
3328 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3329 break;
3330
3331 /* Erase the target page */
3332 tw32(NVRAM_ADDR, phy_addr);
3333
3334 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3335 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3336
3337 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3338 break;
3339
3340 /* Issue another write enable to start the write. */
3341 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3342
3343 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3344 break;
3345
3346 for (j = 0; j < pagesize; j += 4) {
3347 __be32 data;
3348
3349 data = *((__be32 *) (tmp + j));
3350
3351 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3352
3353 tw32(NVRAM_ADDR, phy_addr + j);
3354
3355 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3356 NVRAM_CMD_WR;
3357
3358 if (j == 0)
3359 nvram_cmd |= NVRAM_CMD_FIRST;
3360 else if (j == (pagesize - 4))
3361 nvram_cmd |= NVRAM_CMD_LAST;
3362
3363 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3364 if (ret)
3365 break;
3366 }
3367 if (ret)
3368 break;
3369 }
3370
3371 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3372 tg3_nvram_exec_cmd(tp, nvram_cmd);
3373
3374 kfree(tmp);
3375
3376 return ret;
3377 }
3378
3379 /* offset and length are dword aligned */
3380 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3381 u8 *buf)
3382 {
3383 int i, ret = 0;
3384
3385 for (i = 0; i < len; i += 4, offset += 4) {
3386 u32 page_off, phy_addr, nvram_cmd;
3387 __be32 data;
3388
3389 memcpy(&data, buf + i, 4);
3390 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3391
3392 page_off = offset % tp->nvram_pagesize;
3393
3394 phy_addr = tg3_nvram_phys_addr(tp, offset);
3395
3396 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3397
3398 if (page_off == 0 || i == 0)
3399 nvram_cmd |= NVRAM_CMD_FIRST;
3400 if (page_off == (tp->nvram_pagesize - 4))
3401 nvram_cmd |= NVRAM_CMD_LAST;
3402
3403 if (i == (len - 4))
3404 nvram_cmd |= NVRAM_CMD_LAST;
3405
3406 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3407 !tg3_flag(tp, FLASH) ||
3408 !tg3_flag(tp, 57765_PLUS))
3409 tw32(NVRAM_ADDR, phy_addr);
3410
3411 if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3412 !tg3_flag(tp, 5755_PLUS) &&
3413 (tp->nvram_jedecnum == JEDEC_ST) &&
3414 (nvram_cmd & NVRAM_CMD_FIRST)) {
3415 u32 cmd;
3416
3417 cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3418 ret = tg3_nvram_exec_cmd(tp, cmd);
3419 if (ret)
3420 break;
3421 }
3422 if (!tg3_flag(tp, FLASH)) {
3423 /* We always do complete word writes to eeprom. */
3424 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3425 }
3426
3427 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3428 if (ret)
3429 break;
3430 }
3431 return ret;
3432 }
3433
3434 /* offset and length are dword aligned */
3435 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3436 {
3437 int ret;
3438
3439 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3440 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3441 ~GRC_LCLCTRL_GPIO_OUTPUT1);
3442 udelay(40);
3443 }
3444
3445 if (!tg3_flag(tp, NVRAM)) {
3446 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3447 } else {
3448 u32 grc_mode;
3449
3450 ret = tg3_nvram_lock(tp);
3451 if (ret)
3452 return ret;
3453
3454 tg3_enable_nvram_access(tp);
3455 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3456 tw32(NVRAM_WRITE1, 0x406);
3457
3458 grc_mode = tr32(GRC_MODE);
3459 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3460
3461 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3462 ret = tg3_nvram_write_block_buffered(tp, offset, len,
3463 buf);
3464 } else {
3465 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3466 buf);
3467 }
3468
3469 grc_mode = tr32(GRC_MODE);
3470 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3471
3472 tg3_disable_nvram_access(tp);
3473 tg3_nvram_unlock(tp);
3474 }
3475
3476 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3477 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3478 udelay(40);
3479 }
3480
3481 return ret;
3482 }
3483
3484 #define RX_CPU_SCRATCH_BASE 0x30000
3485 #define RX_CPU_SCRATCH_SIZE 0x04000
3486 #define TX_CPU_SCRATCH_BASE 0x34000
3487 #define TX_CPU_SCRATCH_SIZE 0x04000
3488
3489 /* tp->lock is held. */
3490 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3491 {
3492 int i;
3493 const int iters = 10000;
3494
3495 for (i = 0; i < iters; i++) {
3496 tw32(cpu_base + CPU_STATE, 0xffffffff);
3497 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3498 if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3499 break;
3500 }
3501
3502 return (i == iters) ? -EBUSY : 0;
3503 }
3504
3505 /* tp->lock is held. */
3506 static int tg3_rxcpu_pause(struct tg3 *tp)
3507 {
3508 int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
3509
3510 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3511 tw32_f(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
3512 udelay(10);
3513
3514 return rc;
3515 }
3516
3517 /* tp->lock is held. */
3518 static int tg3_txcpu_pause(struct tg3 *tp)
3519 {
3520 return tg3_pause_cpu(tp, TX_CPU_BASE);
3521 }
3522
3523 /* tp->lock is held. */
3524 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
3525 {
3526 tw32(cpu_base + CPU_STATE, 0xffffffff);
3527 tw32_f(cpu_base + CPU_MODE, 0x00000000);
3528 }
3529
3530 /* tp->lock is held. */
3531 static void tg3_rxcpu_resume(struct tg3 *tp)
3532 {
3533 tg3_resume_cpu(tp, RX_CPU_BASE);
3534 }
3535
3536 /* tp->lock is held. */
3537 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
3538 {
3539 int rc;
3540
3541 BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3542
3543 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3544 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3545
3546 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3547 return 0;
3548 }
3549 if (cpu_base == RX_CPU_BASE) {
3550 rc = tg3_rxcpu_pause(tp);
3551 } else {
3552 /*
3553 * There is only an Rx CPU for the 5750 derivative in the
3554 * BCM4785.
3555 */
3556 if (tg3_flag(tp, IS_SSB_CORE))
3557 return 0;
3558
3559 rc = tg3_txcpu_pause(tp);
3560 }
3561
3562 if (rc) {
3563 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3564 __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
3565 return -ENODEV;
3566 }
3567
3568 /* Clear firmware's nvram arbitration. */
3569 if (tg3_flag(tp, NVRAM))
3570 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3571 return 0;
3572 }
3573
3574 static int tg3_fw_data_len(struct tg3 *tp,
3575 const struct tg3_firmware_hdr *fw_hdr)
3576 {
3577 int fw_len;
3578
3579 /* Non fragmented firmware have one firmware header followed by a
3580 * contiguous chunk of data to be written. The length field in that
3581 * header is not the length of data to be written but the complete
3582 * length of the bss. The data length is determined based on
3583 * tp->fw->size minus headers.
3584 *
3585 * Fragmented firmware have a main header followed by multiple
3586 * fragments. Each fragment is identical to non fragmented firmware
3587 * with a firmware header followed by a contiguous chunk of data. In
3588 * the main header, the length field is unused and set to 0xffffffff.
3589 * In each fragment header the length is the entire size of that
3590 * fragment i.e. fragment data + header length. Data length is
3591 * therefore length field in the header minus TG3_FW_HDR_LEN.
3592 */
3593 if (tp->fw_len == 0xffffffff)
3594 fw_len = be32_to_cpu(fw_hdr->len);
3595 else
3596 fw_len = tp->fw->size;
3597
3598 return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
3599 }
3600
3601 /* tp->lock is held. */
3602 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3603 u32 cpu_scratch_base, int cpu_scratch_size,
3604 const struct tg3_firmware_hdr *fw_hdr)
3605 {
3606 int err, i;
3607 void (*write_op)(struct tg3 *, u32, u32);
3608 int total_len = tp->fw->size;
3609
3610 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3611 netdev_err(tp->dev,
3612 "%s: Trying to load TX cpu firmware which is 5705\n",
3613 __func__);
3614 return -EINVAL;
3615 }
3616
3617 if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
3618 write_op = tg3_write_mem;
3619 else
3620 write_op = tg3_write_indirect_reg32;
3621
3622 if (tg3_asic_rev(tp) != ASIC_REV_57766) {
3623 /* It is possible that bootcode is still loading at this point.
3624 * Get the nvram lock first before halting the cpu.
3625 */
3626 int lock_err = tg3_nvram_lock(tp);
3627 err = tg3_halt_cpu(tp, cpu_base);
3628 if (!lock_err)
3629 tg3_nvram_unlock(tp);
3630 if (err)
3631 goto out;
3632
3633 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3634 write_op(tp, cpu_scratch_base + i, 0);
3635 tw32(cpu_base + CPU_STATE, 0xffffffff);
3636 tw32(cpu_base + CPU_MODE,
3637 tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
3638 } else {
3639 /* Subtract additional main header for fragmented firmware and
3640 * advance to the first fragment
3641 */
3642 total_len -= TG3_FW_HDR_LEN;
3643 fw_hdr++;
3644 }
3645
3646 do {
3647 u32 *fw_data = (u32 *)(fw_hdr + 1);
3648 for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
3649 write_op(tp, cpu_scratch_base +
3650 (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
3651 (i * sizeof(u32)),
3652 be32_to_cpu(fw_data[i]));
3653
3654 total_len -= be32_to_cpu(fw_hdr->len);
3655
3656 /* Advance to next fragment */
3657 fw_hdr = (struct tg3_firmware_hdr *)
3658 ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
3659 } while (total_len > 0);
3660
3661 err = 0;
3662
3663 out:
3664 return err;
3665 }
3666
3667 /* tp->lock is held. */
3668 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
3669 {
3670 int i;
3671 const int iters = 5;
3672
3673 tw32(cpu_base + CPU_STATE, 0xffffffff);
3674 tw32_f(cpu_base + CPU_PC, pc);
3675
3676 for (i = 0; i < iters; i++) {
3677 if (tr32(cpu_base + CPU_PC) == pc)
3678 break;
3679 tw32(cpu_base + CPU_STATE, 0xffffffff);
3680 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3681 tw32_f(cpu_base + CPU_PC, pc);
3682 udelay(1000);
3683 }
3684
3685 return (i == iters) ? -EBUSY : 0;
3686 }
3687
3688 /* tp->lock is held. */
3689 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3690 {
3691 const struct tg3_firmware_hdr *fw_hdr;
3692 int err;
3693
3694 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3695
3696 /* Firmware blob starts with version numbers, followed by
3697 start address and length. We are setting complete length.
3698 length = end_address_of_bss - start_address_of_text.
3699 Remainder is the blob to be loaded contiguously
3700 from start address. */
3701
3702 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3703 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3704 fw_hdr);
3705 if (err)
3706 return err;
3707
3708 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3709 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3710 fw_hdr);
3711 if (err)
3712 return err;
3713
3714 /* Now startup only the RX cpu. */
3715 err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
3716 be32_to_cpu(fw_hdr->base_addr));
3717 if (err) {
3718 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3719 "should be %08x\n", __func__,
3720 tr32(RX_CPU_BASE + CPU_PC),
3721 be32_to_cpu(fw_hdr->base_addr));
3722 return -ENODEV;
3723 }
3724
3725 tg3_rxcpu_resume(tp);
3726
3727 return 0;
3728 }
3729
3730 static int tg3_validate_rxcpu_state(struct tg3 *tp)
3731 {
3732 const int iters = 1000;
3733 int i;
3734 u32 val;
3735
3736 /* Wait for boot code to complete initialization and enter service
3737 * loop. It is then safe to download service patches
3738 */
3739 for (i = 0; i < iters; i++) {
3740 if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
3741 break;
3742
3743 udelay(10);
3744 }
3745
3746 if (i == iters) {
3747 netdev_err(tp->dev, "Boot code not ready for service patches\n");
3748 return -EBUSY;
3749 }
3750
3751 val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
3752 if (val & 0xff) {
3753 netdev_warn(tp->dev,
3754 "Other patches exist. Not downloading EEE patch\n");
3755 return -EEXIST;
3756 }
3757
3758 return 0;
3759 }
3760
3761 /* tp->lock is held. */
3762 static void tg3_load_57766_firmware(struct tg3 *tp)
3763 {
3764 struct tg3_firmware_hdr *fw_hdr;
3765
3766 if (!tg3_flag(tp, NO_NVRAM))
3767 return;
3768
3769 if (tg3_validate_rxcpu_state(tp))
3770 return;
3771
3772 if (!tp->fw)
3773 return;
3774
3775 /* This firmware blob has a different format than older firmware
3776 * releases as given below. The main difference is we have fragmented
3777 * data to be written to non-contiguous locations.
3778 *
3779 * In the beginning we have a firmware header identical to other
3780 * firmware which consists of version, base addr and length. The length
3781 * here is unused and set to 0xffffffff.
3782 *
3783 * This is followed by a series of firmware fragments which are
3784 * individually identical to previous firmware. i.e. they have the
3785 * firmware header and followed by data for that fragment. The version
3786 * field of the individual fragment header is unused.
3787 */
3788
3789 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3790 if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
3791 return;
3792
3793 if (tg3_rxcpu_pause(tp))
3794 return;
3795
3796 /* tg3_load_firmware_cpu() will always succeed for the 57766 */
3797 tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
3798
3799 tg3_rxcpu_resume(tp);
3800 }
3801
3802 /* tp->lock is held. */
3803 static int tg3_load_tso_firmware(struct tg3 *tp)
3804 {
3805 const struct tg3_firmware_hdr *fw_hdr;
3806 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3807 int err;
3808
3809 if (!tg3_flag(tp, FW_TSO))
3810 return 0;
3811
3812 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3813
3814 /* Firmware blob starts with version numbers, followed by
3815 start address and length. We are setting complete length.
3816 length = end_address_of_bss - start_address_of_text.
3817 Remainder is the blob to be loaded contiguously
3818 from start address. */
3819
3820 cpu_scratch_size = tp->fw_len;
3821
3822 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3823 cpu_base = RX_CPU_BASE;
3824 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3825 } else {
3826 cpu_base = TX_CPU_BASE;
3827 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3828 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3829 }
3830
3831 err = tg3_load_firmware_cpu(tp, cpu_base,
3832 cpu_scratch_base, cpu_scratch_size,
3833 fw_hdr);
3834 if (err)
3835 return err;
3836
3837 /* Now startup the cpu. */
3838 err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
3839 be32_to_cpu(fw_hdr->base_addr));
3840 if (err) {
3841 netdev_err(tp->dev,
3842 "%s fails to set CPU PC, is %08x should be %08x\n",
3843 __func__, tr32(cpu_base + CPU_PC),
3844 be32_to_cpu(fw_hdr->base_addr));
3845 return -ENODEV;
3846 }
3847
3848 tg3_resume_cpu(tp, cpu_base);
3849 return 0;
3850 }
3851
3852
3853 /* tp->lock is held. */
3854 static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1)
3855 {
3856 u32 addr_high, addr_low;
3857 int i;
3858
3859 addr_high = ((tp->dev->dev_addr[0] << 8) |
3860 tp->dev->dev_addr[1]);
3861 addr_low = ((tp->dev->dev_addr[2] << 24) |
3862 (tp->dev->dev_addr[3] << 16) |
3863 (tp->dev->dev_addr[4] << 8) |
3864 (tp->dev->dev_addr[5] << 0));
3865 for (i = 0; i < 4; i++) {
3866 if (i == 1 && skip_mac_1)
3867 continue;
3868 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3869 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3870 }
3871
3872 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3873 tg3_asic_rev(tp) == ASIC_REV_5704) {
3874 for (i = 0; i < 12; i++) {
3875 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3876 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3877 }
3878 }
3879
3880 addr_high = (tp->dev->dev_addr[0] +
3881 tp->dev->dev_addr[1] +
3882 tp->dev->dev_addr[2] +
3883 tp->dev->dev_addr[3] +
3884 tp->dev->dev_addr[4] +
3885 tp->dev->dev_addr[5]) &
3886 TX_BACKOFF_SEED_MASK;
3887 tw32(MAC_TX_BACKOFF_SEED, addr_high);
3888 }
3889
3890 static void tg3_enable_register_access(struct tg3 *tp)
3891 {
3892 /*
3893 * Make sure register accesses (indirect or otherwise) will function
3894 * correctly.
3895 */
3896 pci_write_config_dword(tp->pdev,
3897 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3898 }
3899
3900 static int tg3_power_up(struct tg3 *tp)
3901 {
3902 int err;
3903
3904 tg3_enable_register_access(tp);
3905
3906 err = pci_set_power_state(tp->pdev, PCI_D0);
3907 if (!err) {
3908 /* Switch out of Vaux if it is a NIC */
3909 tg3_pwrsrc_switch_to_vmain(tp);
3910 } else {
3911 netdev_err(tp->dev, "Transition to D0 failed\n");
3912 }
3913
3914 return err;
3915 }
3916
3917 static int tg3_setup_phy(struct tg3 *, bool);
3918
3919 static int tg3_power_down_prepare(struct tg3 *tp)
3920 {
3921 u32 misc_host_ctrl;
3922 bool device_should_wake, do_low_power;
3923
3924 tg3_enable_register_access(tp);
3925
3926 /* Restore the CLKREQ setting. */
3927 if (tg3_flag(tp, CLKREQ_BUG))
3928 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
3929 PCI_EXP_LNKCTL_CLKREQ_EN);
3930
3931 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
3932 tw32(TG3PCI_MISC_HOST_CTRL,
3933 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
3934
3935 device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
3936 tg3_flag(tp, WOL_ENABLE);
3937
3938 if (tg3_flag(tp, USE_PHYLIB)) {
3939 do_low_power = false;
3940 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
3941 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3942 struct phy_device *phydev;
3943 u32 phyid, advertising;
3944
3945 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
3946
3947 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3948
3949 tp->link_config.speed = phydev->speed;
3950 tp->link_config.duplex = phydev->duplex;
3951 tp->link_config.autoneg = phydev->autoneg;
3952 tp->link_config.advertising = phydev->advertising;
3953
3954 advertising = ADVERTISED_TP |
3955 ADVERTISED_Pause |
3956 ADVERTISED_Autoneg |
3957 ADVERTISED_10baseT_Half;
3958
3959 if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
3960 if (tg3_flag(tp, WOL_SPEED_100MB))
3961 advertising |=
3962 ADVERTISED_100baseT_Half |
3963 ADVERTISED_100baseT_Full |
3964 ADVERTISED_10baseT_Full;
3965 else
3966 advertising |= ADVERTISED_10baseT_Full;
3967 }
3968
3969 phydev->advertising = advertising;
3970
3971 phy_start_aneg(phydev);
3972
3973 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
3974 if (phyid != PHY_ID_BCMAC131) {
3975 phyid &= PHY_BCM_OUI_MASK;
3976 if (phyid == PHY_BCM_OUI_1 ||
3977 phyid == PHY_BCM_OUI_2 ||
3978 phyid == PHY_BCM_OUI_3)
3979 do_low_power = true;
3980 }
3981 }
3982 } else {
3983 do_low_power = true;
3984
3985 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
3986 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3987
3988 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
3989 tg3_setup_phy(tp, false);
3990 }
3991
3992 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3993 u32 val;
3994
3995 val = tr32(GRC_VCPU_EXT_CTRL);
3996 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
3997 } else if (!tg3_flag(tp, ENABLE_ASF)) {
3998 int i;
3999 u32 val;
4000
4001 for (i = 0; i < 200; i++) {
4002 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
4003 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4004 break;
4005 msleep(1);
4006 }
4007 }
4008 if (tg3_flag(tp, WOL_CAP))
4009 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
4010 WOL_DRV_STATE_SHUTDOWN |
4011 WOL_DRV_WOL |
4012 WOL_SET_MAGIC_PKT);
4013
4014 if (device_should_wake) {
4015 u32 mac_mode;
4016
4017 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
4018 if (do_low_power &&
4019 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
4020 tg3_phy_auxctl_write(tp,
4021 MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
4022 MII_TG3_AUXCTL_PCTL_WOL_EN |
4023 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
4024 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
4025 udelay(40);
4026 }
4027
4028 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4029 mac_mode = MAC_MODE_PORT_MODE_GMII;
4030 else if (tp->phy_flags &
4031 TG3_PHYFLG_KEEP_LINK_ON_PWRDN) {
4032 if (tp->link_config.active_speed == SPEED_1000)
4033 mac_mode = MAC_MODE_PORT_MODE_GMII;
4034 else
4035 mac_mode = MAC_MODE_PORT_MODE_MII;
4036 } else
4037 mac_mode = MAC_MODE_PORT_MODE_MII;
4038
4039 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
4040 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4041 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
4042 SPEED_100 : SPEED_10;
4043 if (tg3_5700_link_polarity(tp, speed))
4044 mac_mode |= MAC_MODE_LINK_POLARITY;
4045 else
4046 mac_mode &= ~MAC_MODE_LINK_POLARITY;
4047 }
4048 } else {
4049 mac_mode = MAC_MODE_PORT_MODE_TBI;
4050 }
4051
4052 if (!tg3_flag(tp, 5750_PLUS))
4053 tw32(MAC_LED_CTRL, tp->led_ctrl);
4054
4055 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
4056 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
4057 (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
4058 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
4059
4060 if (tg3_flag(tp, ENABLE_APE))
4061 mac_mode |= MAC_MODE_APE_TX_EN |
4062 MAC_MODE_APE_RX_EN |
4063 MAC_MODE_TDE_ENABLE;
4064
4065 tw32_f(MAC_MODE, mac_mode);
4066 udelay(100);
4067
4068 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
4069 udelay(10);
4070 }
4071
4072 if (!tg3_flag(tp, WOL_SPEED_100MB) &&
4073 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4074 tg3_asic_rev(tp) == ASIC_REV_5701)) {
4075 u32 base_val;
4076
4077 base_val = tp->pci_clock_ctrl;
4078 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
4079 CLOCK_CTRL_TXCLK_DISABLE);
4080
4081 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
4082 CLOCK_CTRL_PWRDOWN_PLL133, 40);
4083 } else if (tg3_flag(tp, 5780_CLASS) ||
4084 tg3_flag(tp, CPMU_PRESENT) ||
4085 tg3_asic_rev(tp) == ASIC_REV_5906) {
4086 /* do nothing */
4087 } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
4088 u32 newbits1, newbits2;
4089
4090 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4091 tg3_asic_rev(tp) == ASIC_REV_5701) {
4092 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
4093 CLOCK_CTRL_TXCLK_DISABLE |
4094 CLOCK_CTRL_ALTCLK);
4095 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4096 } else if (tg3_flag(tp, 5705_PLUS)) {
4097 newbits1 = CLOCK_CTRL_625_CORE;
4098 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
4099 } else {
4100 newbits1 = CLOCK_CTRL_ALTCLK;
4101 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4102 }
4103
4104 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
4105 40);
4106
4107 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
4108 40);
4109
4110 if (!tg3_flag(tp, 5705_PLUS)) {
4111 u32 newbits3;
4112
4113 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4114 tg3_asic_rev(tp) == ASIC_REV_5701) {
4115 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
4116 CLOCK_CTRL_TXCLK_DISABLE |
4117 CLOCK_CTRL_44MHZ_CORE);
4118 } else {
4119 newbits3 = CLOCK_CTRL_44MHZ_CORE;
4120 }
4121
4122 tw32_wait_f(TG3PCI_CLOCK_CTRL,
4123 tp->pci_clock_ctrl | newbits3, 40);
4124 }
4125 }
4126
4127 if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
4128 tg3_power_down_phy(tp, do_low_power);
4129
4130 tg3_frob_aux_power(tp, true);
4131
4132 /* Workaround for unstable PLL clock */
4133 if ((!tg3_flag(tp, IS_SSB_CORE)) &&
4134 ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
4135 (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
4136 u32 val = tr32(0x7d00);
4137
4138 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4139 tw32(0x7d00, val);
4140 if (!tg3_flag(tp, ENABLE_ASF)) {
4141 int err;
4142
4143 err = tg3_nvram_lock(tp);
4144 tg3_halt_cpu(tp, RX_CPU_BASE);
4145 if (!err)
4146 tg3_nvram_unlock(tp);
4147 }
4148 }
4149
4150 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4151
4152 return 0;
4153 }
4154
4155 static void tg3_power_down(struct tg3 *tp)
4156 {
4157 tg3_power_down_prepare(tp);
4158
4159 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
4160 pci_set_power_state(tp->pdev, PCI_D3hot);
4161 }
4162
4163 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
4164 {
4165 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
4166 case MII_TG3_AUX_STAT_10HALF:
4167 *speed = SPEED_10;
4168 *duplex = DUPLEX_HALF;
4169 break;
4170
4171 case MII_TG3_AUX_STAT_10FULL:
4172 *speed = SPEED_10;
4173 *duplex = DUPLEX_FULL;
4174 break;
4175
4176 case MII_TG3_AUX_STAT_100HALF:
4177 *speed = SPEED_100;
4178 *duplex = DUPLEX_HALF;
4179 break;
4180
4181 case MII_TG3_AUX_STAT_100FULL:
4182 *speed = SPEED_100;
4183 *duplex = DUPLEX_FULL;
4184 break;
4185
4186 case MII_TG3_AUX_STAT_1000HALF:
4187 *speed = SPEED_1000;
4188 *duplex = DUPLEX_HALF;
4189 break;
4190
4191 case MII_TG3_AUX_STAT_1000FULL:
4192 *speed = SPEED_1000;
4193 *duplex = DUPLEX_FULL;
4194 break;
4195
4196 default:
4197 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4198 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4199 SPEED_10;
4200 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4201 DUPLEX_HALF;
4202 break;
4203 }
4204 *speed = SPEED_UNKNOWN;
4205 *duplex = DUPLEX_UNKNOWN;
4206 break;
4207 }
4208 }
4209
4210 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4211 {
4212 int err = 0;
4213 u32 val, new_adv;
4214
4215 new_adv = ADVERTISE_CSMA;
4216 new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4217 new_adv |= mii_advertise_flowctrl(flowctrl);
4218
4219 err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4220 if (err)
4221 goto done;
4222
4223 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4224 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4225
4226 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4227 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4228 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4229
4230 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4231 if (err)
4232 goto done;
4233 }
4234
4235 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4236 goto done;
4237
4238 tw32(TG3_CPMU_EEE_MODE,
4239 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4240
4241 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4242 if (!err) {
4243 u32 err2;
4244
4245 val = 0;
4246 /* Advertise 100-BaseTX EEE ability */
4247 if (advertise & ADVERTISED_100baseT_Full)
4248 val |= MDIO_AN_EEE_ADV_100TX;
4249 /* Advertise 1000-BaseT EEE ability */
4250 if (advertise & ADVERTISED_1000baseT_Full)
4251 val |= MDIO_AN_EEE_ADV_1000T;
4252 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4253 if (err)
4254 val = 0;
4255
4256 switch (tg3_asic_rev(tp)) {
4257 case ASIC_REV_5717:
4258 case ASIC_REV_57765:
4259 case ASIC_REV_57766:
4260 case ASIC_REV_5719:
4261 /* If we advertised any eee advertisements above... */
4262 if (val)
4263 val = MII_TG3_DSP_TAP26_ALNOKO |
4264 MII_TG3_DSP_TAP26_RMRXSTO |
4265 MII_TG3_DSP_TAP26_OPCSINPT;
4266 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4267 /* Fall through */
4268 case ASIC_REV_5720:
4269 case ASIC_REV_5762:
4270 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4271 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4272 MII_TG3_DSP_CH34TP2_HIBW01);
4273 }
4274
4275 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4276 if (!err)
4277 err = err2;
4278 }
4279
4280 done:
4281 return err;
4282 }
4283
4284 static void tg3_phy_copper_begin(struct tg3 *tp)
4285 {
4286 if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4287 (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4288 u32 adv, fc;
4289
4290 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4291 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4292 adv = ADVERTISED_10baseT_Half |
4293 ADVERTISED_10baseT_Full;
4294 if (tg3_flag(tp, WOL_SPEED_100MB))
4295 adv |= ADVERTISED_100baseT_Half |
4296 ADVERTISED_100baseT_Full;
4297 if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK)
4298 adv |= ADVERTISED_1000baseT_Half |
4299 ADVERTISED_1000baseT_Full;
4300
4301 fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4302 } else {
4303 adv = tp->link_config.advertising;
4304 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4305 adv &= ~(ADVERTISED_1000baseT_Half |
4306 ADVERTISED_1000baseT_Full);
4307
4308 fc = tp->link_config.flowctrl;
4309 }
4310
4311 tg3_phy_autoneg_cfg(tp, adv, fc);
4312
4313 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4314 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4315 /* Normally during power down we want to autonegotiate
4316 * the lowest possible speed for WOL. However, to avoid
4317 * link flap, we leave it untouched.
4318 */
4319 return;
4320 }
4321
4322 tg3_writephy(tp, MII_BMCR,
4323 BMCR_ANENABLE | BMCR_ANRESTART);
4324 } else {
4325 int i;
4326 u32 bmcr, orig_bmcr;
4327
4328 tp->link_config.active_speed = tp->link_config.speed;
4329 tp->link_config.active_duplex = tp->link_config.duplex;
4330
4331 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
4332 /* With autoneg disabled, 5715 only links up when the
4333 * advertisement register has the configured speed
4334 * enabled.
4335 */
4336 tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
4337 }
4338
4339 bmcr = 0;
4340 switch (tp->link_config.speed) {
4341 default:
4342 case SPEED_10:
4343 break;
4344
4345 case SPEED_100:
4346 bmcr |= BMCR_SPEED100;
4347 break;
4348
4349 case SPEED_1000:
4350 bmcr |= BMCR_SPEED1000;
4351 break;
4352 }
4353
4354 if (tp->link_config.duplex == DUPLEX_FULL)
4355 bmcr |= BMCR_FULLDPLX;
4356
4357 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4358 (bmcr != orig_bmcr)) {
4359 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4360 for (i = 0; i < 1500; i++) {
4361 u32 tmp;
4362
4363 udelay(10);
4364 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4365 tg3_readphy(tp, MII_BMSR, &tmp))
4366 continue;
4367 if (!(tmp & BMSR_LSTATUS)) {
4368 udelay(40);
4369 break;
4370 }
4371 }
4372 tg3_writephy(tp, MII_BMCR, bmcr);
4373 udelay(40);
4374 }
4375 }
4376 }
4377
4378 static int tg3_phy_pull_config(struct tg3 *tp)
4379 {
4380 int err;
4381 u32 val;
4382
4383 err = tg3_readphy(tp, MII_BMCR, &val);
4384 if (err)
4385 goto done;
4386
4387 if (!(val & BMCR_ANENABLE)) {
4388 tp->link_config.autoneg = AUTONEG_DISABLE;
4389 tp->link_config.advertising = 0;
4390 tg3_flag_clear(tp, PAUSE_AUTONEG);
4391
4392 err = -EIO;
4393
4394 switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) {
4395 case 0:
4396 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4397 goto done;
4398
4399 tp->link_config.speed = SPEED_10;
4400 break;
4401 case BMCR_SPEED100:
4402 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4403 goto done;
4404
4405 tp->link_config.speed = SPEED_100;
4406 break;
4407 case BMCR_SPEED1000:
4408 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4409 tp->link_config.speed = SPEED_1000;
4410 break;
4411 }
4412 /* Fall through */
4413 default:
4414 goto done;
4415 }
4416
4417 if (val & BMCR_FULLDPLX)
4418 tp->link_config.duplex = DUPLEX_FULL;
4419 else
4420 tp->link_config.duplex = DUPLEX_HALF;
4421
4422 tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
4423
4424 err = 0;
4425 goto done;
4426 }
4427
4428 tp->link_config.autoneg = AUTONEG_ENABLE;
4429 tp->link_config.advertising = ADVERTISED_Autoneg;
4430 tg3_flag_set(tp, PAUSE_AUTONEG);
4431
4432 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4433 u32 adv;
4434
4435 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4436 if (err)
4437 goto done;
4438
4439 adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL);
4440 tp->link_config.advertising |= adv | ADVERTISED_TP;
4441
4442 tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val);
4443 } else {
4444 tp->link_config.advertising |= ADVERTISED_FIBRE;
4445 }
4446
4447 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4448 u32 adv;
4449
4450 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4451 err = tg3_readphy(tp, MII_CTRL1000, &val);
4452 if (err)
4453 goto done;
4454
4455 adv = mii_ctrl1000_to_ethtool_adv_t(val);
4456 } else {
4457 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4458 if (err)
4459 goto done;
4460
4461 adv = tg3_decode_flowctrl_1000X(val);
4462 tp->link_config.flowctrl = adv;
4463
4464 val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL);
4465 adv = mii_adv_to_ethtool_adv_x(val);
4466 }
4467
4468 tp->link_config.advertising |= adv;
4469 }
4470
4471 done:
4472 return err;
4473 }
4474
4475 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4476 {
4477 int err;
4478
4479 /* Turn off tap power management. */
4480 /* Set Extended packet length bit */
4481 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4482
4483 err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4484 err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4485 err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4486 err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4487 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4488
4489 udelay(40);
4490
4491 return err;
4492 }
4493
4494 static bool tg3_phy_eee_config_ok(struct tg3 *tp)
4495 {
4496 u32 val;
4497 u32 tgtadv = 0;
4498 u32 advertising = tp->link_config.advertising;
4499
4500 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4501 return true;
4502
4503 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
4504 return false;
4505
4506 val &= (MDIO_AN_EEE_ADV_100TX | MDIO_AN_EEE_ADV_1000T);
4507
4508
4509 if (advertising & ADVERTISED_100baseT_Full)
4510 tgtadv |= MDIO_AN_EEE_ADV_100TX;
4511 if (advertising & ADVERTISED_1000baseT_Full)
4512 tgtadv |= MDIO_AN_EEE_ADV_1000T;
4513
4514 if (val != tgtadv)
4515 return false;
4516
4517 return true;
4518 }
4519
4520 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4521 {
4522 u32 advmsk, tgtadv, advertising;
4523
4524 advertising = tp->link_config.advertising;
4525 tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4526
4527 advmsk = ADVERTISE_ALL;
4528 if (tp->link_config.active_duplex == DUPLEX_FULL) {
4529 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4530 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4531 }
4532
4533 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4534 return false;
4535
4536 if ((*lcladv & advmsk) != tgtadv)
4537 return false;
4538
4539 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4540 u32 tg3_ctrl;
4541
4542 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4543
4544 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4545 return false;
4546
4547 if (tgtadv &&
4548 (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4549 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4550 tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4551 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4552 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4553 } else {
4554 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4555 }
4556
4557 if (tg3_ctrl != tgtadv)
4558 return false;
4559 }
4560
4561 return true;
4562 }
4563
4564 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4565 {
4566 u32 lpeth = 0;
4567
4568 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4569 u32 val;
4570
4571 if (tg3_readphy(tp, MII_STAT1000, &val))
4572 return false;
4573
4574 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4575 }
4576
4577 if (tg3_readphy(tp, MII_LPA, rmtadv))
4578 return false;
4579
4580 lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4581 tp->link_config.rmt_adv = lpeth;
4582
4583 return true;
4584 }
4585
4586 static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up)
4587 {
4588 if (curr_link_up != tp->link_up) {
4589 if (curr_link_up) {
4590 netif_carrier_on(tp->dev);
4591 } else {
4592 netif_carrier_off(tp->dev);
4593 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4594 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4595 }
4596
4597 tg3_link_report(tp);
4598 return true;
4599 }
4600
4601 return false;
4602 }
4603
4604 static void tg3_clear_mac_status(struct tg3 *tp)
4605 {
4606 tw32(MAC_EVENT, 0);
4607
4608 tw32_f(MAC_STATUS,
4609 MAC_STATUS_SYNC_CHANGED |
4610 MAC_STATUS_CFG_CHANGED |
4611 MAC_STATUS_MI_COMPLETION |
4612 MAC_STATUS_LNKSTATE_CHANGED);
4613 udelay(40);
4614 }
4615
4616 static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
4617 {
4618 bool current_link_up;
4619 u32 bmsr, val;
4620 u32 lcl_adv, rmt_adv;
4621 u16 current_speed;
4622 u8 current_duplex;
4623 int i, err;
4624
4625 tg3_clear_mac_status(tp);
4626
4627 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4628 tw32_f(MAC_MI_MODE,
4629 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4630 udelay(80);
4631 }
4632
4633 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4634
4635 /* Some third-party PHYs need to be reset on link going
4636 * down.
4637 */
4638 if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4639 tg3_asic_rev(tp) == ASIC_REV_5704 ||
4640 tg3_asic_rev(tp) == ASIC_REV_5705) &&
4641 tp->link_up) {
4642 tg3_readphy(tp, MII_BMSR, &bmsr);
4643 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4644 !(bmsr & BMSR_LSTATUS))
4645 force_reset = true;
4646 }
4647 if (force_reset)
4648 tg3_phy_reset(tp);
4649
4650 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4651 tg3_readphy(tp, MII_BMSR, &bmsr);
4652 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4653 !tg3_flag(tp, INIT_COMPLETE))
4654 bmsr = 0;
4655
4656 if (!(bmsr & BMSR_LSTATUS)) {
4657 err = tg3_init_5401phy_dsp(tp);
4658 if (err)
4659 return err;
4660
4661 tg3_readphy(tp, MII_BMSR, &bmsr);
4662 for (i = 0; i < 1000; i++) {
4663 udelay(10);
4664 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4665 (bmsr & BMSR_LSTATUS)) {
4666 udelay(40);
4667 break;
4668 }
4669 }
4670
4671 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4672 TG3_PHY_REV_BCM5401_B0 &&
4673 !(bmsr & BMSR_LSTATUS) &&
4674 tp->link_config.active_speed == SPEED_1000) {
4675 err = tg3_phy_reset(tp);
4676 if (!err)
4677 err = tg3_init_5401phy_dsp(tp);
4678 if (err)
4679 return err;
4680 }
4681 }
4682 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4683 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4684 /* 5701 {A0,B0} CRC bug workaround */
4685 tg3_writephy(tp, 0x15, 0x0a75);
4686 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4687 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4688 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4689 }
4690
4691 /* Clear pending interrupts... */
4692 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4693 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4694
4695 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4696 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4697 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4698 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4699
4700 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4701 tg3_asic_rev(tp) == ASIC_REV_5701) {
4702 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4703 tg3_writephy(tp, MII_TG3_EXT_CTRL,
4704 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4705 else
4706 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4707 }
4708
4709 current_link_up = false;
4710 current_speed = SPEED_UNKNOWN;
4711 current_duplex = DUPLEX_UNKNOWN;
4712 tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4713 tp->link_config.rmt_adv = 0;
4714
4715 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4716 err = tg3_phy_auxctl_read(tp,
4717 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4718 &val);
4719 if (!err && !(val & (1 << 10))) {
4720 tg3_phy_auxctl_write(tp,
4721 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4722 val | (1 << 10));
4723 goto relink;
4724 }
4725 }
4726
4727 bmsr = 0;
4728 for (i = 0; i < 100; i++) {
4729 tg3_readphy(tp, MII_BMSR, &bmsr);
4730 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4731 (bmsr & BMSR_LSTATUS))
4732 break;
4733 udelay(40);
4734 }
4735
4736 if (bmsr & BMSR_LSTATUS) {
4737 u32 aux_stat, bmcr;
4738
4739 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4740 for (i = 0; i < 2000; i++) {
4741 udelay(10);
4742 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4743 aux_stat)
4744 break;
4745 }
4746
4747 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4748 &current_speed,
4749 &current_duplex);
4750
4751 bmcr = 0;
4752 for (i = 0; i < 200; i++) {
4753 tg3_readphy(tp, MII_BMCR, &bmcr);
4754 if (tg3_readphy(tp, MII_BMCR, &bmcr))
4755 continue;
4756 if (bmcr && bmcr != 0x7fff)
4757 break;
4758 udelay(10);
4759 }
4760
4761 lcl_adv = 0;
4762 rmt_adv = 0;
4763
4764 tp->link_config.active_speed = current_speed;
4765 tp->link_config.active_duplex = current_duplex;
4766
4767 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4768 bool eee_config_ok = tg3_phy_eee_config_ok(tp);
4769
4770 if ((bmcr & BMCR_ANENABLE) &&
4771 eee_config_ok &&
4772 tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4773 tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4774 current_link_up = true;
4775
4776 /* EEE settings changes take effect only after a phy
4777 * reset. If we have skipped a reset due to Link Flap
4778 * Avoidance being enabled, do it now.
4779 */
4780 if (!eee_config_ok &&
4781 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
4782 !force_reset)
4783 tg3_phy_reset(tp);
4784 } else {
4785 if (!(bmcr & BMCR_ANENABLE) &&
4786 tp->link_config.speed == current_speed &&
4787 tp->link_config.duplex == current_duplex) {
4788 current_link_up = true;
4789 }
4790 }
4791
4792 if (current_link_up &&
4793 tp->link_config.active_duplex == DUPLEX_FULL) {
4794 u32 reg, bit;
4795
4796 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4797 reg = MII_TG3_FET_GEN_STAT;
4798 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4799 } else {
4800 reg = MII_TG3_EXT_STAT;
4801 bit = MII_TG3_EXT_STAT_MDIX;
4802 }
4803
4804 if (!tg3_readphy(tp, reg, &val) && (val & bit))
4805 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4806
4807 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4808 }
4809 }
4810
4811 relink:
4812 if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4813 tg3_phy_copper_begin(tp);
4814
4815 if (tg3_flag(tp, ROBOSWITCH)) {
4816 current_link_up = true;
4817 /* FIXME: when BCM5325 switch is used use 100 MBit/s */
4818 current_speed = SPEED_1000;
4819 current_duplex = DUPLEX_FULL;
4820 tp->link_config.active_speed = current_speed;
4821 tp->link_config.active_duplex = current_duplex;
4822 }
4823
4824 tg3_readphy(tp, MII_BMSR, &bmsr);
4825 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4826 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4827 current_link_up = true;
4828 }
4829
4830 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4831 if (current_link_up) {
4832 if (tp->link_config.active_speed == SPEED_100 ||
4833 tp->link_config.active_speed == SPEED_10)
4834 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4835 else
4836 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4837 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4838 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4839 else
4840 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4841
4842 /* In order for the 5750 core in BCM4785 chip to work properly
4843 * in RGMII mode, the Led Control Register must be set up.
4844 */
4845 if (tg3_flag(tp, RGMII_MODE)) {
4846 u32 led_ctrl = tr32(MAC_LED_CTRL);
4847 led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
4848
4849 if (tp->link_config.active_speed == SPEED_10)
4850 led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
4851 else if (tp->link_config.active_speed == SPEED_100)
4852 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
4853 LED_CTRL_100MBPS_ON);
4854 else if (tp->link_config.active_speed == SPEED_1000)
4855 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
4856 LED_CTRL_1000MBPS_ON);
4857
4858 tw32(MAC_LED_CTRL, led_ctrl);
4859 udelay(40);
4860 }
4861
4862 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4863 if (tp->link_config.active_duplex == DUPLEX_HALF)
4864 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4865
4866 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4867 if (current_link_up &&
4868 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
4869 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
4870 else
4871 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
4872 }
4873
4874 /* ??? Without this setting Netgear GA302T PHY does not
4875 * ??? send/receive packets...
4876 */
4877 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
4878 tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
4879 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
4880 tw32_f(MAC_MI_MODE, tp->mi_mode);
4881 udelay(80);
4882 }
4883
4884 tw32_f(MAC_MODE, tp->mac_mode);
4885 udelay(40);
4886
4887 tg3_phy_eee_adjust(tp, current_link_up);
4888
4889 if (tg3_flag(tp, USE_LINKCHG_REG)) {
4890 /* Polled via timer. */
4891 tw32_f(MAC_EVENT, 0);
4892 } else {
4893 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4894 }
4895 udelay(40);
4896
4897 if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
4898 current_link_up &&
4899 tp->link_config.active_speed == SPEED_1000 &&
4900 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
4901 udelay(120);
4902 tw32_f(MAC_STATUS,
4903 (MAC_STATUS_SYNC_CHANGED |
4904 MAC_STATUS_CFG_CHANGED));
4905 udelay(40);
4906 tg3_write_mem(tp,
4907 NIC_SRAM_FIRMWARE_MBOX,
4908 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
4909 }
4910
4911 /* Prevent send BD corruption. */
4912 if (tg3_flag(tp, CLKREQ_BUG)) {
4913 if (tp->link_config.active_speed == SPEED_100 ||
4914 tp->link_config.active_speed == SPEED_10)
4915 pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
4916 PCI_EXP_LNKCTL_CLKREQ_EN);
4917 else
4918 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4919 PCI_EXP_LNKCTL_CLKREQ_EN);
4920 }
4921
4922 tg3_test_and_report_link_chg(tp, current_link_up);
4923
4924 return 0;
4925 }
4926
4927 struct tg3_fiber_aneginfo {
4928 int state;
4929 #define ANEG_STATE_UNKNOWN 0
4930 #define ANEG_STATE_AN_ENABLE 1
4931 #define ANEG_STATE_RESTART_INIT 2
4932 #define ANEG_STATE_RESTART 3
4933 #define ANEG_STATE_DISABLE_LINK_OK 4
4934 #define ANEG_STATE_ABILITY_DETECT_INIT 5
4935 #define ANEG_STATE_ABILITY_DETECT 6
4936 #define ANEG_STATE_ACK_DETECT_INIT 7
4937 #define ANEG_STATE_ACK_DETECT 8
4938 #define ANEG_STATE_COMPLETE_ACK_INIT 9
4939 #define ANEG_STATE_COMPLETE_ACK 10
4940 #define ANEG_STATE_IDLE_DETECT_INIT 11
4941 #define ANEG_STATE_IDLE_DETECT 12
4942 #define ANEG_STATE_LINK_OK 13
4943 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
4944 #define ANEG_STATE_NEXT_PAGE_WAIT 15
4945
4946 u32 flags;
4947 #define MR_AN_ENABLE 0x00000001
4948 #define MR_RESTART_AN 0x00000002
4949 #define MR_AN_COMPLETE 0x00000004
4950 #define MR_PAGE_RX 0x00000008
4951 #define MR_NP_LOADED 0x00000010
4952 #define MR_TOGGLE_TX 0x00000020
4953 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
4954 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
4955 #define MR_LP_ADV_SYM_PAUSE 0x00000100
4956 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
4957 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
4958 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
4959 #define MR_LP_ADV_NEXT_PAGE 0x00001000
4960 #define MR_TOGGLE_RX 0x00002000
4961 #define MR_NP_RX 0x00004000
4962
4963 #define MR_LINK_OK 0x80000000
4964
4965 unsigned long link_time, cur_time;
4966
4967 u32 ability_match_cfg;
4968 int ability_match_count;
4969
4970 char ability_match, idle_match, ack_match;
4971
4972 u32 txconfig, rxconfig;
4973 #define ANEG_CFG_NP 0x00000080
4974 #define ANEG_CFG_ACK 0x00000040
4975 #define ANEG_CFG_RF2 0x00000020
4976 #define ANEG_CFG_RF1 0x00000010
4977 #define ANEG_CFG_PS2 0x00000001
4978 #define ANEG_CFG_PS1 0x00008000
4979 #define ANEG_CFG_HD 0x00004000
4980 #define ANEG_CFG_FD 0x00002000
4981 #define ANEG_CFG_INVAL 0x00001f06
4982
4983 };
4984 #define ANEG_OK 0
4985 #define ANEG_DONE 1
4986 #define ANEG_TIMER_ENAB 2
4987 #define ANEG_FAILED -1
4988
4989 #define ANEG_STATE_SETTLE_TIME 10000
4990
4991 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
4992 struct tg3_fiber_aneginfo *ap)
4993 {
4994 u16 flowctrl;
4995 unsigned long delta;
4996 u32 rx_cfg_reg;
4997 int ret;
4998
4999 if (ap->state == ANEG_STATE_UNKNOWN) {
5000 ap->rxconfig = 0;
5001 ap->link_time = 0;
5002 ap->cur_time = 0;
5003 ap->ability_match_cfg = 0;
5004 ap->ability_match_count = 0;
5005 ap->ability_match = 0;
5006 ap->idle_match = 0;
5007 ap->ack_match = 0;
5008 }
5009 ap->cur_time++;
5010
5011 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
5012 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
5013
5014 if (rx_cfg_reg != ap->ability_match_cfg) {
5015 ap->ability_match_cfg = rx_cfg_reg;
5016 ap->ability_match = 0;
5017 ap->ability_match_count = 0;
5018 } else {
5019 if (++ap->ability_match_count > 1) {
5020 ap->ability_match = 1;
5021 ap->ability_match_cfg = rx_cfg_reg;
5022 }
5023 }
5024 if (rx_cfg_reg & ANEG_CFG_ACK)
5025 ap->ack_match = 1;
5026 else
5027 ap->ack_match = 0;
5028
5029 ap->idle_match = 0;
5030 } else {
5031 ap->idle_match = 1;
5032 ap->ability_match_cfg = 0;
5033 ap->ability_match_count = 0;
5034 ap->ability_match = 0;
5035 ap->ack_match = 0;
5036
5037 rx_cfg_reg = 0;
5038 }
5039
5040 ap->rxconfig = rx_cfg_reg;
5041 ret = ANEG_OK;
5042
5043 switch (ap->state) {
5044 case ANEG_STATE_UNKNOWN:
5045 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
5046 ap->state = ANEG_STATE_AN_ENABLE;
5047
5048 /* fallthru */
5049 case ANEG_STATE_AN_ENABLE:
5050 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
5051 if (ap->flags & MR_AN_ENABLE) {
5052 ap->link_time = 0;
5053 ap->cur_time = 0;
5054 ap->ability_match_cfg = 0;
5055 ap->ability_match_count = 0;
5056 ap->ability_match = 0;
5057 ap->idle_match = 0;
5058 ap->ack_match = 0;
5059
5060 ap->state = ANEG_STATE_RESTART_INIT;
5061 } else {
5062 ap->state = ANEG_STATE_DISABLE_LINK_OK;
5063 }
5064 break;
5065
5066 case ANEG_STATE_RESTART_INIT:
5067 ap->link_time = ap->cur_time;
5068 ap->flags &= ~(MR_NP_LOADED);
5069 ap->txconfig = 0;
5070 tw32(MAC_TX_AUTO_NEG, 0);
5071 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5072 tw32_f(MAC_MODE, tp->mac_mode);
5073 udelay(40);
5074
5075 ret = ANEG_TIMER_ENAB;
5076 ap->state = ANEG_STATE_RESTART;
5077
5078 /* fallthru */
5079 case ANEG_STATE_RESTART:
5080 delta = ap->cur_time - ap->link_time;
5081 if (delta > ANEG_STATE_SETTLE_TIME)
5082 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
5083 else
5084 ret = ANEG_TIMER_ENAB;
5085 break;
5086
5087 case ANEG_STATE_DISABLE_LINK_OK:
5088 ret = ANEG_DONE;
5089 break;
5090
5091 case ANEG_STATE_ABILITY_DETECT_INIT:
5092 ap->flags &= ~(MR_TOGGLE_TX);
5093 ap->txconfig = ANEG_CFG_FD;
5094 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5095 if (flowctrl & ADVERTISE_1000XPAUSE)
5096 ap->txconfig |= ANEG_CFG_PS1;
5097 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5098 ap->txconfig |= ANEG_CFG_PS2;
5099 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5100 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5101 tw32_f(MAC_MODE, tp->mac_mode);
5102 udelay(40);
5103
5104 ap->state = ANEG_STATE_ABILITY_DETECT;
5105 break;
5106
5107 case ANEG_STATE_ABILITY_DETECT:
5108 if (ap->ability_match != 0 && ap->rxconfig != 0)
5109 ap->state = ANEG_STATE_ACK_DETECT_INIT;
5110 break;
5111
5112 case ANEG_STATE_ACK_DETECT_INIT:
5113 ap->txconfig |= ANEG_CFG_ACK;
5114 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5115 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5116 tw32_f(MAC_MODE, tp->mac_mode);
5117 udelay(40);
5118
5119 ap->state = ANEG_STATE_ACK_DETECT;
5120
5121 /* fallthru */
5122 case ANEG_STATE_ACK_DETECT:
5123 if (ap->ack_match != 0) {
5124 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
5125 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
5126 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
5127 } else {
5128 ap->state = ANEG_STATE_AN_ENABLE;
5129 }
5130 } else if (ap->ability_match != 0 &&
5131 ap->rxconfig == 0) {
5132 ap->state = ANEG_STATE_AN_ENABLE;
5133 }
5134 break;
5135
5136 case ANEG_STATE_COMPLETE_ACK_INIT:
5137 if (ap->rxconfig & ANEG_CFG_INVAL) {
5138 ret = ANEG_FAILED;
5139 break;
5140 }
5141 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
5142 MR_LP_ADV_HALF_DUPLEX |
5143 MR_LP_ADV_SYM_PAUSE |
5144 MR_LP_ADV_ASYM_PAUSE |
5145 MR_LP_ADV_REMOTE_FAULT1 |
5146 MR_LP_ADV_REMOTE_FAULT2 |
5147 MR_LP_ADV_NEXT_PAGE |
5148 MR_TOGGLE_RX |
5149 MR_NP_RX);
5150 if (ap->rxconfig & ANEG_CFG_FD)
5151 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
5152 if (ap->rxconfig & ANEG_CFG_HD)
5153 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
5154 if (ap->rxconfig & ANEG_CFG_PS1)
5155 ap->flags |= MR_LP_ADV_SYM_PAUSE;
5156 if (ap->rxconfig & ANEG_CFG_PS2)
5157 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
5158 if (ap->rxconfig & ANEG_CFG_RF1)
5159 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
5160 if (ap->rxconfig & ANEG_CFG_RF2)
5161 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
5162 if (ap->rxconfig & ANEG_CFG_NP)
5163 ap->flags |= MR_LP_ADV_NEXT_PAGE;
5164
5165 ap->link_time = ap->cur_time;
5166
5167 ap->flags ^= (MR_TOGGLE_TX);
5168 if (ap->rxconfig & 0x0008)
5169 ap->flags |= MR_TOGGLE_RX;
5170 if (ap->rxconfig & ANEG_CFG_NP)
5171 ap->flags |= MR_NP_RX;
5172 ap->flags |= MR_PAGE_RX;
5173
5174 ap->state = ANEG_STATE_COMPLETE_ACK;
5175 ret = ANEG_TIMER_ENAB;
5176 break;
5177
5178 case ANEG_STATE_COMPLETE_ACK:
5179 if (ap->ability_match != 0 &&
5180 ap->rxconfig == 0) {
5181 ap->state = ANEG_STATE_AN_ENABLE;
5182 break;
5183 }
5184 delta = ap->cur_time - ap->link_time;
5185 if (delta > ANEG_STATE_SETTLE_TIME) {
5186 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
5187 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5188 } else {
5189 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
5190 !(ap->flags & MR_NP_RX)) {
5191 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5192 } else {
5193 ret = ANEG_FAILED;
5194 }
5195 }
5196 }
5197 break;
5198
5199 case ANEG_STATE_IDLE_DETECT_INIT:
5200 ap->link_time = ap->cur_time;
5201 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5202 tw32_f(MAC_MODE, tp->mac_mode);
5203 udelay(40);
5204
5205 ap->state = ANEG_STATE_IDLE_DETECT;
5206 ret = ANEG_TIMER_ENAB;
5207 break;
5208
5209 case ANEG_STATE_IDLE_DETECT:
5210 if (ap->ability_match != 0 &&
5211 ap->rxconfig == 0) {
5212 ap->state = ANEG_STATE_AN_ENABLE;
5213 break;
5214 }
5215 delta = ap->cur_time - ap->link_time;
5216 if (delta > ANEG_STATE_SETTLE_TIME) {
5217 /* XXX another gem from the Broadcom driver :( */
5218 ap->state = ANEG_STATE_LINK_OK;
5219 }
5220 break;
5221
5222 case ANEG_STATE_LINK_OK:
5223 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
5224 ret = ANEG_DONE;
5225 break;
5226
5227 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
5228 /* ??? unimplemented */
5229 break;
5230
5231 case ANEG_STATE_NEXT_PAGE_WAIT:
5232 /* ??? unimplemented */
5233 break;
5234
5235 default:
5236 ret = ANEG_FAILED;
5237 break;
5238 }
5239
5240 return ret;
5241 }
5242
5243 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
5244 {
5245 int res = 0;
5246 struct tg3_fiber_aneginfo aninfo;
5247 int status = ANEG_FAILED;
5248 unsigned int tick;
5249 u32 tmp;
5250
5251 tw32_f(MAC_TX_AUTO_NEG, 0);
5252
5253 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5254 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
5255 udelay(40);
5256
5257 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
5258 udelay(40);
5259
5260 memset(&aninfo, 0, sizeof(aninfo));
5261 aninfo.flags |= MR_AN_ENABLE;
5262 aninfo.state = ANEG_STATE_UNKNOWN;
5263 aninfo.cur_time = 0;
5264 tick = 0;
5265 while (++tick < 195000) {
5266 status = tg3_fiber_aneg_smachine(tp, &aninfo);
5267 if (status == ANEG_DONE || status == ANEG_FAILED)
5268 break;
5269
5270 udelay(1);
5271 }
5272
5273 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5274 tw32_f(MAC_MODE, tp->mac_mode);
5275 udelay(40);
5276
5277 *txflags = aninfo.txconfig;
5278 *rxflags = aninfo.flags;
5279
5280 if (status == ANEG_DONE &&
5281 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
5282 MR_LP_ADV_FULL_DUPLEX)))
5283 res = 1;
5284
5285 return res;
5286 }
5287
5288 static void tg3_init_bcm8002(struct tg3 *tp)
5289 {
5290 u32 mac_status = tr32(MAC_STATUS);
5291 int i;
5292
5293 /* Reset when initting first time or we have a link. */
5294 if (tg3_flag(tp, INIT_COMPLETE) &&
5295 !(mac_status & MAC_STATUS_PCS_SYNCED))
5296 return;
5297
5298 /* Set PLL lock range. */
5299 tg3_writephy(tp, 0x16, 0x8007);
5300
5301 /* SW reset */
5302 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
5303
5304 /* Wait for reset to complete. */
5305 /* XXX schedule_timeout() ... */
5306 for (i = 0; i < 500; i++)
5307 udelay(10);
5308
5309 /* Config mode; select PMA/Ch 1 regs. */
5310 tg3_writephy(tp, 0x10, 0x8411);
5311
5312 /* Enable auto-lock and comdet, select txclk for tx. */
5313 tg3_writephy(tp, 0x11, 0x0a10);
5314
5315 tg3_writephy(tp, 0x18, 0x00a0);
5316 tg3_writephy(tp, 0x16, 0x41ff);
5317
5318 /* Assert and deassert POR. */
5319 tg3_writephy(tp, 0x13, 0x0400);
5320 udelay(40);
5321 tg3_writephy(tp, 0x13, 0x0000);
5322
5323 tg3_writephy(tp, 0x11, 0x0a50);
5324 udelay(40);
5325 tg3_writephy(tp, 0x11, 0x0a10);
5326
5327 /* Wait for signal to stabilize */
5328 /* XXX schedule_timeout() ... */
5329 for (i = 0; i < 15000; i++)
5330 udelay(10);
5331
5332 /* Deselect the channel register so we can read the PHYID
5333 * later.
5334 */
5335 tg3_writephy(tp, 0x10, 0x8011);
5336 }
5337
5338 static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5339 {
5340 u16 flowctrl;
5341 bool current_link_up;
5342 u32 sg_dig_ctrl, sg_dig_status;
5343 u32 serdes_cfg, expected_sg_dig_ctrl;
5344 int workaround, port_a;
5345
5346 serdes_cfg = 0;
5347 expected_sg_dig_ctrl = 0;
5348 workaround = 0;
5349 port_a = 1;
5350 current_link_up = false;
5351
5352 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5353 tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5354 workaround = 1;
5355 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5356 port_a = 0;
5357
5358 /* preserve bits 0-11,13,14 for signal pre-emphasis */
5359 /* preserve bits 20-23 for voltage regulator */
5360 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5361 }
5362
5363 sg_dig_ctrl = tr32(SG_DIG_CTRL);
5364
5365 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5366 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5367 if (workaround) {
5368 u32 val = serdes_cfg;
5369
5370 if (port_a)
5371 val |= 0xc010000;
5372 else
5373 val |= 0x4010000;
5374 tw32_f(MAC_SERDES_CFG, val);
5375 }
5376
5377 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5378 }
5379 if (mac_status & MAC_STATUS_PCS_SYNCED) {
5380 tg3_setup_flow_control(tp, 0, 0);
5381 current_link_up = true;
5382 }
5383 goto out;
5384 }
5385
5386 /* Want auto-negotiation. */
5387 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5388
5389 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5390 if (flowctrl & ADVERTISE_1000XPAUSE)
5391 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5392 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5393 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5394
5395 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5396 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5397 tp->serdes_counter &&
5398 ((mac_status & (MAC_STATUS_PCS_SYNCED |
5399 MAC_STATUS_RCVD_CFG)) ==
5400 MAC_STATUS_PCS_SYNCED)) {
5401 tp->serdes_counter--;
5402 current_link_up = true;
5403 goto out;
5404 }
5405 restart_autoneg:
5406 if (workaround)
5407 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5408 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5409 udelay(5);
5410 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5411
5412 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5413 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5414 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5415 MAC_STATUS_SIGNAL_DET)) {
5416 sg_dig_status = tr32(SG_DIG_STATUS);
5417 mac_status = tr32(MAC_STATUS);
5418
5419 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5420 (mac_status & MAC_STATUS_PCS_SYNCED)) {
5421 u32 local_adv = 0, remote_adv = 0;
5422
5423 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5424 local_adv |= ADVERTISE_1000XPAUSE;
5425 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5426 local_adv |= ADVERTISE_1000XPSE_ASYM;
5427
5428 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5429 remote_adv |= LPA_1000XPAUSE;
5430 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5431 remote_adv |= LPA_1000XPAUSE_ASYM;
5432
5433 tp->link_config.rmt_adv =
5434 mii_adv_to_ethtool_adv_x(remote_adv);
5435
5436 tg3_setup_flow_control(tp, local_adv, remote_adv);
5437 current_link_up = true;
5438 tp->serdes_counter = 0;
5439 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5440 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5441 if (tp->serdes_counter)
5442 tp->serdes_counter--;
5443 else {
5444 if (workaround) {
5445 u32 val = serdes_cfg;
5446
5447 if (port_a)
5448 val |= 0xc010000;
5449 else
5450 val |= 0x4010000;
5451
5452 tw32_f(MAC_SERDES_CFG, val);
5453 }
5454
5455 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5456 udelay(40);
5457
5458 /* Link parallel detection - link is up */
5459 /* only if we have PCS_SYNC and not */
5460 /* receiving config code words */
5461 mac_status = tr32(MAC_STATUS);
5462 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5463 !(mac_status & MAC_STATUS_RCVD_CFG)) {
5464 tg3_setup_flow_control(tp, 0, 0);
5465 current_link_up = true;
5466 tp->phy_flags |=
5467 TG3_PHYFLG_PARALLEL_DETECT;
5468 tp->serdes_counter =
5469 SERDES_PARALLEL_DET_TIMEOUT;
5470 } else
5471 goto restart_autoneg;
5472 }
5473 }
5474 } else {
5475 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5476 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5477 }
5478
5479 out:
5480 return current_link_up;
5481 }
5482
5483 static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5484 {
5485 bool current_link_up = false;
5486
5487 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5488 goto out;
5489
5490 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5491 u32 txflags, rxflags;
5492 int i;
5493
5494 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5495 u32 local_adv = 0, remote_adv = 0;
5496
5497 if (txflags & ANEG_CFG_PS1)
5498 local_adv |= ADVERTISE_1000XPAUSE;
5499 if (txflags & ANEG_CFG_PS2)
5500 local_adv |= ADVERTISE_1000XPSE_ASYM;
5501
5502 if (rxflags & MR_LP_ADV_SYM_PAUSE)
5503 remote_adv |= LPA_1000XPAUSE;
5504 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5505 remote_adv |= LPA_1000XPAUSE_ASYM;
5506
5507 tp->link_config.rmt_adv =
5508 mii_adv_to_ethtool_adv_x(remote_adv);
5509
5510 tg3_setup_flow_control(tp, local_adv, remote_adv);
5511
5512 current_link_up = true;
5513 }
5514 for (i = 0; i < 30; i++) {
5515 udelay(20);
5516 tw32_f(MAC_STATUS,
5517 (MAC_STATUS_SYNC_CHANGED |
5518 MAC_STATUS_CFG_CHANGED));
5519 udelay(40);
5520 if ((tr32(MAC_STATUS) &
5521 (MAC_STATUS_SYNC_CHANGED |
5522 MAC_STATUS_CFG_CHANGED)) == 0)
5523 break;
5524 }
5525
5526 mac_status = tr32(MAC_STATUS);
5527 if (!current_link_up &&
5528 (mac_status & MAC_STATUS_PCS_SYNCED) &&
5529 !(mac_status & MAC_STATUS_RCVD_CFG))
5530 current_link_up = true;
5531 } else {
5532 tg3_setup_flow_control(tp, 0, 0);
5533
5534 /* Forcing 1000FD link up. */
5535 current_link_up = true;
5536
5537 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5538 udelay(40);
5539
5540 tw32_f(MAC_MODE, tp->mac_mode);
5541 udelay(40);
5542 }
5543
5544 out:
5545 return current_link_up;
5546 }
5547
5548 static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
5549 {
5550 u32 orig_pause_cfg;
5551 u16 orig_active_speed;
5552 u8 orig_active_duplex;
5553 u32 mac_status;
5554 bool current_link_up;
5555 int i;
5556
5557 orig_pause_cfg = tp->link_config.active_flowctrl;
5558 orig_active_speed = tp->link_config.active_speed;
5559 orig_active_duplex = tp->link_config.active_duplex;
5560
5561 if (!tg3_flag(tp, HW_AUTONEG) &&
5562 tp->link_up &&
5563 tg3_flag(tp, INIT_COMPLETE)) {
5564 mac_status = tr32(MAC_STATUS);
5565 mac_status &= (MAC_STATUS_PCS_SYNCED |
5566 MAC_STATUS_SIGNAL_DET |
5567 MAC_STATUS_CFG_CHANGED |
5568 MAC_STATUS_RCVD_CFG);
5569 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5570 MAC_STATUS_SIGNAL_DET)) {
5571 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5572 MAC_STATUS_CFG_CHANGED));
5573 return 0;
5574 }
5575 }
5576
5577 tw32_f(MAC_TX_AUTO_NEG, 0);
5578
5579 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5580 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5581 tw32_f(MAC_MODE, tp->mac_mode);
5582 udelay(40);
5583
5584 if (tp->phy_id == TG3_PHY_ID_BCM8002)
5585 tg3_init_bcm8002(tp);
5586
5587 /* Enable link change event even when serdes polling. */
5588 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5589 udelay(40);
5590
5591 current_link_up = false;
5592 tp->link_config.rmt_adv = 0;
5593 mac_status = tr32(MAC_STATUS);
5594
5595 if (tg3_flag(tp, HW_AUTONEG))
5596 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5597 else
5598 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5599
5600 tp->napi[0].hw_status->status =
5601 (SD_STATUS_UPDATED |
5602 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5603
5604 for (i = 0; i < 100; i++) {
5605 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5606 MAC_STATUS_CFG_CHANGED));
5607 udelay(5);
5608 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5609 MAC_STATUS_CFG_CHANGED |
5610 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5611 break;
5612 }
5613
5614 mac_status = tr32(MAC_STATUS);
5615 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5616 current_link_up = false;
5617 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5618 tp->serdes_counter == 0) {
5619 tw32_f(MAC_MODE, (tp->mac_mode |
5620 MAC_MODE_SEND_CONFIGS));
5621 udelay(1);
5622 tw32_f(MAC_MODE, tp->mac_mode);
5623 }
5624 }
5625
5626 if (current_link_up) {
5627 tp->link_config.active_speed = SPEED_1000;
5628 tp->link_config.active_duplex = DUPLEX_FULL;
5629 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5630 LED_CTRL_LNKLED_OVERRIDE |
5631 LED_CTRL_1000MBPS_ON));
5632 } else {
5633 tp->link_config.active_speed = SPEED_UNKNOWN;
5634 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5635 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5636 LED_CTRL_LNKLED_OVERRIDE |
5637 LED_CTRL_TRAFFIC_OVERRIDE));
5638 }
5639
5640 if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5641 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5642 if (orig_pause_cfg != now_pause_cfg ||
5643 orig_active_speed != tp->link_config.active_speed ||
5644 orig_active_duplex != tp->link_config.active_duplex)
5645 tg3_link_report(tp);
5646 }
5647
5648 return 0;
5649 }
5650
5651 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
5652 {
5653 int err = 0;
5654 u32 bmsr, bmcr;
5655 u16 current_speed = SPEED_UNKNOWN;
5656 u8 current_duplex = DUPLEX_UNKNOWN;
5657 bool current_link_up = false;
5658 u32 local_adv, remote_adv, sgsr;
5659
5660 if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
5661 tg3_asic_rev(tp) == ASIC_REV_5720) &&
5662 !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) &&
5663 (sgsr & SERDES_TG3_SGMII_MODE)) {
5664
5665 if (force_reset)
5666 tg3_phy_reset(tp);
5667
5668 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
5669
5670 if (!(sgsr & SERDES_TG3_LINK_UP)) {
5671 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5672 } else {
5673 current_link_up = true;
5674 if (sgsr & SERDES_TG3_SPEED_1000) {
5675 current_speed = SPEED_1000;
5676 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5677 } else if (sgsr & SERDES_TG3_SPEED_100) {
5678 current_speed = SPEED_100;
5679 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5680 } else {
5681 current_speed = SPEED_10;
5682 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5683 }
5684
5685 if (sgsr & SERDES_TG3_FULL_DUPLEX)
5686 current_duplex = DUPLEX_FULL;
5687 else
5688 current_duplex = DUPLEX_HALF;
5689 }
5690
5691 tw32_f(MAC_MODE, tp->mac_mode);
5692 udelay(40);
5693
5694 tg3_clear_mac_status(tp);
5695
5696 goto fiber_setup_done;
5697 }
5698
5699 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5700 tw32_f(MAC_MODE, tp->mac_mode);
5701 udelay(40);
5702
5703 tg3_clear_mac_status(tp);
5704
5705 if (force_reset)
5706 tg3_phy_reset(tp);
5707
5708 tp->link_config.rmt_adv = 0;
5709
5710 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5711 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5712 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5713 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5714 bmsr |= BMSR_LSTATUS;
5715 else
5716 bmsr &= ~BMSR_LSTATUS;
5717 }
5718
5719 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5720
5721 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5722 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5723 /* do nothing, just check for link up at the end */
5724 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5725 u32 adv, newadv;
5726
5727 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5728 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5729 ADVERTISE_1000XPAUSE |
5730 ADVERTISE_1000XPSE_ASYM |
5731 ADVERTISE_SLCT);
5732
5733 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5734 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5735
5736 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5737 tg3_writephy(tp, MII_ADVERTISE, newadv);
5738 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5739 tg3_writephy(tp, MII_BMCR, bmcr);
5740
5741 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5742 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5743 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5744
5745 return err;
5746 }
5747 } else {
5748 u32 new_bmcr;
5749
5750 bmcr &= ~BMCR_SPEED1000;
5751 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5752
5753 if (tp->link_config.duplex == DUPLEX_FULL)
5754 new_bmcr |= BMCR_FULLDPLX;
5755
5756 if (new_bmcr != bmcr) {
5757 /* BMCR_SPEED1000 is a reserved bit that needs
5758 * to be set on write.
5759 */
5760 new_bmcr |= BMCR_SPEED1000;
5761
5762 /* Force a linkdown */
5763 if (tp->link_up) {
5764 u32 adv;
5765
5766 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5767 adv &= ~(ADVERTISE_1000XFULL |
5768 ADVERTISE_1000XHALF |
5769 ADVERTISE_SLCT);
5770 tg3_writephy(tp, MII_ADVERTISE, adv);
5771 tg3_writephy(tp, MII_BMCR, bmcr |
5772 BMCR_ANRESTART |
5773 BMCR_ANENABLE);
5774 udelay(10);
5775 tg3_carrier_off(tp);
5776 }
5777 tg3_writephy(tp, MII_BMCR, new_bmcr);
5778 bmcr = new_bmcr;
5779 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5780 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5781 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5782 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5783 bmsr |= BMSR_LSTATUS;
5784 else
5785 bmsr &= ~BMSR_LSTATUS;
5786 }
5787 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5788 }
5789 }
5790
5791 if (bmsr & BMSR_LSTATUS) {
5792 current_speed = SPEED_1000;
5793 current_link_up = true;
5794 if (bmcr & BMCR_FULLDPLX)
5795 current_duplex = DUPLEX_FULL;
5796 else
5797 current_duplex = DUPLEX_HALF;
5798
5799 local_adv = 0;
5800 remote_adv = 0;
5801
5802 if (bmcr & BMCR_ANENABLE) {
5803 u32 common;
5804
5805 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5806 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5807 common = local_adv & remote_adv;
5808 if (common & (ADVERTISE_1000XHALF |
5809 ADVERTISE_1000XFULL)) {
5810 if (common & ADVERTISE_1000XFULL)
5811 current_duplex = DUPLEX_FULL;
5812 else
5813 current_duplex = DUPLEX_HALF;
5814
5815 tp->link_config.rmt_adv =
5816 mii_adv_to_ethtool_adv_x(remote_adv);
5817 } else if (!tg3_flag(tp, 5780_CLASS)) {
5818 /* Link is up via parallel detect */
5819 } else {
5820 current_link_up = false;
5821 }
5822 }
5823 }
5824
5825 fiber_setup_done:
5826 if (current_link_up && current_duplex == DUPLEX_FULL)
5827 tg3_setup_flow_control(tp, local_adv, remote_adv);
5828
5829 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5830 if (tp->link_config.active_duplex == DUPLEX_HALF)
5831 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5832
5833 tw32_f(MAC_MODE, tp->mac_mode);
5834 udelay(40);
5835
5836 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5837
5838 tp->link_config.active_speed = current_speed;
5839 tp->link_config.active_duplex = current_duplex;
5840
5841 tg3_test_and_report_link_chg(tp, current_link_up);
5842 return err;
5843 }
5844
5845 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5846 {
5847 if (tp->serdes_counter) {
5848 /* Give autoneg time to complete. */
5849 tp->serdes_counter--;
5850 return;
5851 }
5852
5853 if (!tp->link_up &&
5854 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5855 u32 bmcr;
5856
5857 tg3_readphy(tp, MII_BMCR, &bmcr);
5858 if (bmcr & BMCR_ANENABLE) {
5859 u32 phy1, phy2;
5860
5861 /* Select shadow register 0x1f */
5862 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5863 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
5864
5865 /* Select expansion interrupt status register */
5866 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5867 MII_TG3_DSP_EXP1_INT_STAT);
5868 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5869 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5870
5871 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5872 /* We have signal detect and not receiving
5873 * config code words, link is up by parallel
5874 * detection.
5875 */
5876
5877 bmcr &= ~BMCR_ANENABLE;
5878 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5879 tg3_writephy(tp, MII_BMCR, bmcr);
5880 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
5881 }
5882 }
5883 } else if (tp->link_up &&
5884 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
5885 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5886 u32 phy2;
5887
5888 /* Select expansion interrupt status register */
5889 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5890 MII_TG3_DSP_EXP1_INT_STAT);
5891 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5892 if (phy2 & 0x20) {
5893 u32 bmcr;
5894
5895 /* Config code words received, turn on autoneg. */
5896 tg3_readphy(tp, MII_BMCR, &bmcr);
5897 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
5898
5899 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5900
5901 }
5902 }
5903 }
5904
5905 static int tg3_setup_phy(struct tg3 *tp, bool force_reset)
5906 {
5907 u32 val;
5908 int err;
5909
5910 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
5911 err = tg3_setup_fiber_phy(tp, force_reset);
5912 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
5913 err = tg3_setup_fiber_mii_phy(tp, force_reset);
5914 else
5915 err = tg3_setup_copper_phy(tp, force_reset);
5916
5917 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
5918 u32 scale;
5919
5920 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
5921 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
5922 scale = 65;
5923 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
5924 scale = 6;
5925 else
5926 scale = 12;
5927
5928 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
5929 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
5930 tw32(GRC_MISC_CFG, val);
5931 }
5932
5933 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5934 (6 << TX_LENGTHS_IPG_SHIFT);
5935 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
5936 tg3_asic_rev(tp) == ASIC_REV_5762)
5937 val |= tr32(MAC_TX_LENGTHS) &
5938 (TX_LENGTHS_JMB_FRM_LEN_MSK |
5939 TX_LENGTHS_CNT_DWN_VAL_MSK);
5940
5941 if (tp->link_config.active_speed == SPEED_1000 &&
5942 tp->link_config.active_duplex == DUPLEX_HALF)
5943 tw32(MAC_TX_LENGTHS, val |
5944 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
5945 else
5946 tw32(MAC_TX_LENGTHS, val |
5947 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5948
5949 if (!tg3_flag(tp, 5705_PLUS)) {
5950 if (tp->link_up) {
5951 tw32(HOSTCC_STAT_COAL_TICKS,
5952 tp->coal.stats_block_coalesce_usecs);
5953 } else {
5954 tw32(HOSTCC_STAT_COAL_TICKS, 0);
5955 }
5956 }
5957
5958 if (tg3_flag(tp, ASPM_WORKAROUND)) {
5959 val = tr32(PCIE_PWR_MGMT_THRESH);
5960 if (!tp->link_up)
5961 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
5962 tp->pwrmgmt_thresh;
5963 else
5964 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
5965 tw32(PCIE_PWR_MGMT_THRESH, val);
5966 }
5967
5968 return err;
5969 }
5970
5971 /* tp->lock must be held */
5972 static u64 tg3_refclk_read(struct tg3 *tp)
5973 {
5974 u64 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
5975 return stamp | (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
5976 }
5977
5978 /* tp->lock must be held */
5979 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
5980 {
5981 tw32(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_STOP);
5982 tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
5983 tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
5984 tw32_f(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_RESUME);
5985 }
5986
5987 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
5988 static inline void tg3_full_unlock(struct tg3 *tp);
5989 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
5990 {
5991 struct tg3 *tp = netdev_priv(dev);
5992
5993 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
5994 SOF_TIMESTAMPING_RX_SOFTWARE |
5995 SOF_TIMESTAMPING_SOFTWARE;
5996
5997 if (tg3_flag(tp, PTP_CAPABLE)) {
5998 info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
5999 SOF_TIMESTAMPING_RX_HARDWARE |
6000 SOF_TIMESTAMPING_RAW_HARDWARE;
6001 }
6002
6003 if (tp->ptp_clock)
6004 info->phc_index = ptp_clock_index(tp->ptp_clock);
6005 else
6006 info->phc_index = -1;
6007
6008 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
6009
6010 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
6011 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
6012 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
6013 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
6014 return 0;
6015 }
6016
6017 static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
6018 {
6019 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6020 bool neg_adj = false;
6021 u32 correction = 0;
6022
6023 if (ppb < 0) {
6024 neg_adj = true;
6025 ppb = -ppb;
6026 }
6027
6028 /* Frequency adjustment is performed using hardware with a 24 bit
6029 * accumulator and a programmable correction value. On each clk, the
6030 * correction value gets added to the accumulator and when it
6031 * overflows, the time counter is incremented/decremented.
6032 *
6033 * So conversion from ppb to correction value is
6034 * ppb * (1 << 24) / 1000000000
6035 */
6036 correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
6037 TG3_EAV_REF_CLK_CORRECT_MASK;
6038
6039 tg3_full_lock(tp, 0);
6040
6041 if (correction)
6042 tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
6043 TG3_EAV_REF_CLK_CORRECT_EN |
6044 (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
6045 else
6046 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
6047
6048 tg3_full_unlock(tp);
6049
6050 return 0;
6051 }
6052
6053 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
6054 {
6055 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6056
6057 tg3_full_lock(tp, 0);
6058 tp->ptp_adjust += delta;
6059 tg3_full_unlock(tp);
6060
6061 return 0;
6062 }
6063
6064 static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
6065 {
6066 u64 ns;
6067 u32 remainder;
6068 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6069
6070 tg3_full_lock(tp, 0);
6071 ns = tg3_refclk_read(tp);
6072 ns += tp->ptp_adjust;
6073 tg3_full_unlock(tp);
6074
6075 ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
6076 ts->tv_nsec = remainder;
6077
6078 return 0;
6079 }
6080
6081 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
6082 const struct timespec *ts)
6083 {
6084 u64 ns;
6085 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6086
6087 ns = timespec_to_ns(ts);
6088
6089 tg3_full_lock(tp, 0);
6090 tg3_refclk_write(tp, ns);
6091 tp->ptp_adjust = 0;
6092 tg3_full_unlock(tp);
6093
6094 return 0;
6095 }
6096
6097 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
6098 struct ptp_clock_request *rq, int on)
6099 {
6100 return -EOPNOTSUPP;
6101 }
6102
6103 static const struct ptp_clock_info tg3_ptp_caps = {
6104 .owner = THIS_MODULE,
6105 .name = "tg3 clock",
6106 .max_adj = 250000000,
6107 .n_alarm = 0,
6108 .n_ext_ts = 0,
6109 .n_per_out = 0,
6110 .pps = 0,
6111 .adjfreq = tg3_ptp_adjfreq,
6112 .adjtime = tg3_ptp_adjtime,
6113 .gettime = tg3_ptp_gettime,
6114 .settime = tg3_ptp_settime,
6115 .enable = tg3_ptp_enable,
6116 };
6117
6118 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
6119 struct skb_shared_hwtstamps *timestamp)
6120 {
6121 memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
6122 timestamp->hwtstamp = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
6123 tp->ptp_adjust);
6124 }
6125
6126 /* tp->lock must be held */
6127 static void tg3_ptp_init(struct tg3 *tp)
6128 {
6129 if (!tg3_flag(tp, PTP_CAPABLE))
6130 return;
6131
6132 /* Initialize the hardware clock to the system time. */
6133 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
6134 tp->ptp_adjust = 0;
6135 tp->ptp_info = tg3_ptp_caps;
6136 }
6137
6138 /* tp->lock must be held */
6139 static void tg3_ptp_resume(struct tg3 *tp)
6140 {
6141 if (!tg3_flag(tp, PTP_CAPABLE))
6142 return;
6143
6144 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
6145 tp->ptp_adjust = 0;
6146 }
6147
6148 static void tg3_ptp_fini(struct tg3 *tp)
6149 {
6150 if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
6151 return;
6152
6153 ptp_clock_unregister(tp->ptp_clock);
6154 tp->ptp_clock = NULL;
6155 tp->ptp_adjust = 0;
6156 }
6157
6158 static inline int tg3_irq_sync(struct tg3 *tp)
6159 {
6160 return tp->irq_sync;
6161 }
6162
6163 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
6164 {
6165 int i;
6166
6167 dst = (u32 *)((u8 *)dst + off);
6168 for (i = 0; i < len; i += sizeof(u32))
6169 *dst++ = tr32(off + i);
6170 }
6171
6172 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
6173 {
6174 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
6175 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
6176 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
6177 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
6178 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
6179 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
6180 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
6181 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
6182 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
6183 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
6184 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
6185 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
6186 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
6187 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
6188 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
6189 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
6190 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
6191 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
6192 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
6193
6194 if (tg3_flag(tp, SUPPORT_MSIX))
6195 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
6196
6197 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
6198 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
6199 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
6200 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
6201 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
6202 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
6203 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
6204 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
6205
6206 if (!tg3_flag(tp, 5705_PLUS)) {
6207 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
6208 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
6209 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
6210 }
6211
6212 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
6213 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
6214 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
6215 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
6216 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
6217
6218 if (tg3_flag(tp, NVRAM))
6219 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
6220 }
6221
6222 static void tg3_dump_state(struct tg3 *tp)
6223 {
6224 int i;
6225 u32 *regs;
6226
6227 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
6228 if (!regs)
6229 return;
6230
6231 if (tg3_flag(tp, PCI_EXPRESS)) {
6232 /* Read up to but not including private PCI registers */
6233 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
6234 regs[i / sizeof(u32)] = tr32(i);
6235 } else
6236 tg3_dump_legacy_regs(tp, regs);
6237
6238 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
6239 if (!regs[i + 0] && !regs[i + 1] &&
6240 !regs[i + 2] && !regs[i + 3])
6241 continue;
6242
6243 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6244 i * 4,
6245 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
6246 }
6247
6248 kfree(regs);
6249
6250 for (i = 0; i < tp->irq_cnt; i++) {
6251 struct tg3_napi *tnapi = &tp->napi[i];
6252
6253 /* SW status block */
6254 netdev_err(tp->dev,
6255 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6256 i,
6257 tnapi->hw_status->status,
6258 tnapi->hw_status->status_tag,
6259 tnapi->hw_status->rx_jumbo_consumer,
6260 tnapi->hw_status->rx_consumer,
6261 tnapi->hw_status->rx_mini_consumer,
6262 tnapi->hw_status->idx[0].rx_producer,
6263 tnapi->hw_status->idx[0].tx_consumer);
6264
6265 netdev_err(tp->dev,
6266 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6267 i,
6268 tnapi->last_tag, tnapi->last_irq_tag,
6269 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
6270 tnapi->rx_rcb_ptr,
6271 tnapi->prodring.rx_std_prod_idx,
6272 tnapi->prodring.rx_std_cons_idx,
6273 tnapi->prodring.rx_jmb_prod_idx,
6274 tnapi->prodring.rx_jmb_cons_idx);
6275 }
6276 }
6277
6278 /* This is called whenever we suspect that the system chipset is re-
6279 * ordering the sequence of MMIO to the tx send mailbox. The symptom
6280 * is bogus tx completions. We try to recover by setting the
6281 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6282 * in the workqueue.
6283 */
6284 static void tg3_tx_recover(struct tg3 *tp)
6285 {
6286 BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
6287 tp->write32_tx_mbox == tg3_write_indirect_mbox);
6288
6289 netdev_warn(tp->dev,
6290 "The system may be re-ordering memory-mapped I/O "
6291 "cycles to the network device, attempting to recover. "
6292 "Please report the problem to the driver maintainer "
6293 "and include system chipset information.\n");
6294
6295 spin_lock(&tp->lock);
6296 tg3_flag_set(tp, TX_RECOVERY_PENDING);
6297 spin_unlock(&tp->lock);
6298 }
6299
6300 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
6301 {
6302 /* Tell compiler to fetch tx indices from memory. */
6303 barrier();
6304 return tnapi->tx_pending -
6305 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
6306 }
6307
6308 /* Tigon3 never reports partial packet sends. So we do not
6309 * need special logic to handle SKBs that have not had all
6310 * of their frags sent yet, like SunGEM does.
6311 */
6312 static void tg3_tx(struct tg3_napi *tnapi)
6313 {
6314 struct tg3 *tp = tnapi->tp;
6315 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
6316 u32 sw_idx = tnapi->tx_cons;
6317 struct netdev_queue *txq;
6318 int index = tnapi - tp->napi;
6319 unsigned int pkts_compl = 0, bytes_compl = 0;
6320
6321 if (tg3_flag(tp, ENABLE_TSS))
6322 index--;
6323
6324 txq = netdev_get_tx_queue(tp->dev, index);
6325
6326 while (sw_idx != hw_idx) {
6327 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
6328 struct sk_buff *skb = ri->skb;
6329 int i, tx_bug = 0;
6330
6331 if (unlikely(skb == NULL)) {
6332 tg3_tx_recover(tp);
6333 return;
6334 }
6335
6336 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
6337 struct skb_shared_hwtstamps timestamp;
6338 u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
6339 hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
6340
6341 tg3_hwclock_to_timestamp(tp, hwclock, &timestamp);
6342
6343 skb_tstamp_tx(skb, &timestamp);
6344 }
6345
6346 pci_unmap_single(tp->pdev,
6347 dma_unmap_addr(ri, mapping),
6348 skb_headlen(skb),
6349 PCI_DMA_TODEVICE);
6350
6351 ri->skb = NULL;
6352
6353 while (ri->fragmented) {
6354 ri->fragmented = false;
6355 sw_idx = NEXT_TX(sw_idx);
6356 ri = &tnapi->tx_buffers[sw_idx];
6357 }
6358
6359 sw_idx = NEXT_TX(sw_idx);
6360
6361 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6362 ri = &tnapi->tx_buffers[sw_idx];
6363 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
6364 tx_bug = 1;
6365
6366 pci_unmap_page(tp->pdev,
6367 dma_unmap_addr(ri, mapping),
6368 skb_frag_size(&skb_shinfo(skb)->frags[i]),
6369 PCI_DMA_TODEVICE);
6370
6371 while (ri->fragmented) {
6372 ri->fragmented = false;
6373 sw_idx = NEXT_TX(sw_idx);
6374 ri = &tnapi->tx_buffers[sw_idx];
6375 }
6376
6377 sw_idx = NEXT_TX(sw_idx);
6378 }
6379
6380 pkts_compl++;
6381 bytes_compl += skb->len;
6382
6383 dev_kfree_skb(skb);
6384
6385 if (unlikely(tx_bug)) {
6386 tg3_tx_recover(tp);
6387 return;
6388 }
6389 }
6390
6391 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6392
6393 tnapi->tx_cons = sw_idx;
6394
6395 /* Need to make the tx_cons update visible to tg3_start_xmit()
6396 * before checking for netif_queue_stopped(). Without the
6397 * memory barrier, there is a small possibility that tg3_start_xmit()
6398 * will miss it and cause the queue to be stopped forever.
6399 */
6400 smp_mb();
6401
6402 if (unlikely(netif_tx_queue_stopped(txq) &&
6403 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6404 __netif_tx_lock(txq, smp_processor_id());
6405 if (netif_tx_queue_stopped(txq) &&
6406 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6407 netif_tx_wake_queue(txq);
6408 __netif_tx_unlock(txq);
6409 }
6410 }
6411
6412 static void tg3_frag_free(bool is_frag, void *data)
6413 {
6414 if (is_frag)
6415 put_page(virt_to_head_page(data));
6416 else
6417 kfree(data);
6418 }
6419
6420 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6421 {
6422 unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6423 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6424
6425 if (!ri->data)
6426 return;
6427
6428 pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
6429 map_sz, PCI_DMA_FROMDEVICE);
6430 tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6431 ri->data = NULL;
6432 }
6433
6434
6435 /* Returns size of skb allocated or < 0 on error.
6436 *
6437 * We only need to fill in the address because the other members
6438 * of the RX descriptor are invariant, see tg3_init_rings.
6439 *
6440 * Note the purposeful assymetry of cpu vs. chip accesses. For
6441 * posting buffers we only dirty the first cache line of the RX
6442 * descriptor (containing the address). Whereas for the RX status
6443 * buffers the cpu only reads the last cacheline of the RX descriptor
6444 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6445 */
6446 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6447 u32 opaque_key, u32 dest_idx_unmasked,
6448 unsigned int *frag_size)
6449 {
6450 struct tg3_rx_buffer_desc *desc;
6451 struct ring_info *map;
6452 u8 *data;
6453 dma_addr_t mapping;
6454 int skb_size, data_size, dest_idx;
6455
6456 switch (opaque_key) {
6457 case RXD_OPAQUE_RING_STD:
6458 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6459 desc = &tpr->rx_std[dest_idx];
6460 map = &tpr->rx_std_buffers[dest_idx];
6461 data_size = tp->rx_pkt_map_sz;
6462 break;
6463
6464 case RXD_OPAQUE_RING_JUMBO:
6465 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6466 desc = &tpr->rx_jmb[dest_idx].std;
6467 map = &tpr->rx_jmb_buffers[dest_idx];
6468 data_size = TG3_RX_JMB_MAP_SZ;
6469 break;
6470
6471 default:
6472 return -EINVAL;
6473 }
6474
6475 /* Do not overwrite any of the map or rp information
6476 * until we are sure we can commit to a new buffer.
6477 *
6478 * Callers depend upon this behavior and assume that
6479 * we leave everything unchanged if we fail.
6480 */
6481 skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6482 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6483 if (skb_size <= PAGE_SIZE) {
6484 data = netdev_alloc_frag(skb_size);
6485 *frag_size = skb_size;
6486 } else {
6487 data = kmalloc(skb_size, GFP_ATOMIC);
6488 *frag_size = 0;
6489 }
6490 if (!data)
6491 return -ENOMEM;
6492
6493 mapping = pci_map_single(tp->pdev,
6494 data + TG3_RX_OFFSET(tp),
6495 data_size,
6496 PCI_DMA_FROMDEVICE);
6497 if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
6498 tg3_frag_free(skb_size <= PAGE_SIZE, data);
6499 return -EIO;
6500 }
6501
6502 map->data = data;
6503 dma_unmap_addr_set(map, mapping, mapping);
6504
6505 desc->addr_hi = ((u64)mapping >> 32);
6506 desc->addr_lo = ((u64)mapping & 0xffffffff);
6507
6508 return data_size;
6509 }
6510
6511 /* We only need to move over in the address because the other
6512 * members of the RX descriptor are invariant. See notes above
6513 * tg3_alloc_rx_data for full details.
6514 */
6515 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6516 struct tg3_rx_prodring_set *dpr,
6517 u32 opaque_key, int src_idx,
6518 u32 dest_idx_unmasked)
6519 {
6520 struct tg3 *tp = tnapi->tp;
6521 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6522 struct ring_info *src_map, *dest_map;
6523 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6524 int dest_idx;
6525
6526 switch (opaque_key) {
6527 case RXD_OPAQUE_RING_STD:
6528 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6529 dest_desc = &dpr->rx_std[dest_idx];
6530 dest_map = &dpr->rx_std_buffers[dest_idx];
6531 src_desc = &spr->rx_std[src_idx];
6532 src_map = &spr->rx_std_buffers[src_idx];
6533 break;
6534
6535 case RXD_OPAQUE_RING_JUMBO:
6536 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6537 dest_desc = &dpr->rx_jmb[dest_idx].std;
6538 dest_map = &dpr->rx_jmb_buffers[dest_idx];
6539 src_desc = &spr->rx_jmb[src_idx].std;
6540 src_map = &spr->rx_jmb_buffers[src_idx];
6541 break;
6542
6543 default:
6544 return;
6545 }
6546
6547 dest_map->data = src_map->data;
6548 dma_unmap_addr_set(dest_map, mapping,
6549 dma_unmap_addr(src_map, mapping));
6550 dest_desc->addr_hi = src_desc->addr_hi;
6551 dest_desc->addr_lo = src_desc->addr_lo;
6552
6553 /* Ensure that the update to the skb happens after the physical
6554 * addresses have been transferred to the new BD location.
6555 */
6556 smp_wmb();
6557
6558 src_map->data = NULL;
6559 }
6560
6561 /* The RX ring scheme is composed of multiple rings which post fresh
6562 * buffers to the chip, and one special ring the chip uses to report
6563 * status back to the host.
6564 *
6565 * The special ring reports the status of received packets to the
6566 * host. The chip does not write into the original descriptor the
6567 * RX buffer was obtained from. The chip simply takes the original
6568 * descriptor as provided by the host, updates the status and length
6569 * field, then writes this into the next status ring entry.
6570 *
6571 * Each ring the host uses to post buffers to the chip is described
6572 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
6573 * it is first placed into the on-chip ram. When the packet's length
6574 * is known, it walks down the TG3_BDINFO entries to select the ring.
6575 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6576 * which is within the range of the new packet's length is chosen.
6577 *
6578 * The "separate ring for rx status" scheme may sound queer, but it makes
6579 * sense from a cache coherency perspective. If only the host writes
6580 * to the buffer post rings, and only the chip writes to the rx status
6581 * rings, then cache lines never move beyond shared-modified state.
6582 * If both the host and chip were to write into the same ring, cache line
6583 * eviction could occur since both entities want it in an exclusive state.
6584 */
6585 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6586 {
6587 struct tg3 *tp = tnapi->tp;
6588 u32 work_mask, rx_std_posted = 0;
6589 u32 std_prod_idx, jmb_prod_idx;
6590 u32 sw_idx = tnapi->rx_rcb_ptr;
6591 u16 hw_idx;
6592 int received;
6593 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6594
6595 hw_idx = *(tnapi->rx_rcb_prod_idx);
6596 /*
6597 * We need to order the read of hw_idx and the read of
6598 * the opaque cookie.
6599 */
6600 rmb();
6601 work_mask = 0;
6602 received = 0;
6603 std_prod_idx = tpr->rx_std_prod_idx;
6604 jmb_prod_idx = tpr->rx_jmb_prod_idx;
6605 while (sw_idx != hw_idx && budget > 0) {
6606 struct ring_info *ri;
6607 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6608 unsigned int len;
6609 struct sk_buff *skb;
6610 dma_addr_t dma_addr;
6611 u32 opaque_key, desc_idx, *post_ptr;
6612 u8 *data;
6613 u64 tstamp = 0;
6614
6615 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6616 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6617 if (opaque_key == RXD_OPAQUE_RING_STD) {
6618 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6619 dma_addr = dma_unmap_addr(ri, mapping);
6620 data = ri->data;
6621 post_ptr = &std_prod_idx;
6622 rx_std_posted++;
6623 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6624 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6625 dma_addr = dma_unmap_addr(ri, mapping);
6626 data = ri->data;
6627 post_ptr = &jmb_prod_idx;
6628 } else
6629 goto next_pkt_nopost;
6630
6631 work_mask |= opaque_key;
6632
6633 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
6634 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
6635 drop_it:
6636 tg3_recycle_rx(tnapi, tpr, opaque_key,
6637 desc_idx, *post_ptr);
6638 drop_it_no_recycle:
6639 /* Other statistics kept track of by card. */
6640 tp->rx_dropped++;
6641 goto next_pkt;
6642 }
6643
6644 prefetch(data + TG3_RX_OFFSET(tp));
6645 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6646 ETH_FCS_LEN;
6647
6648 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6649 RXD_FLAG_PTPSTAT_PTPV1 ||
6650 (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6651 RXD_FLAG_PTPSTAT_PTPV2) {
6652 tstamp = tr32(TG3_RX_TSTAMP_LSB);
6653 tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6654 }
6655
6656 if (len > TG3_RX_COPY_THRESH(tp)) {
6657 int skb_size;
6658 unsigned int frag_size;
6659
6660 skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6661 *post_ptr, &frag_size);
6662 if (skb_size < 0)
6663 goto drop_it;
6664
6665 pci_unmap_single(tp->pdev, dma_addr, skb_size,
6666 PCI_DMA_FROMDEVICE);
6667
6668 skb = build_skb(data, frag_size);
6669 if (!skb) {
6670 tg3_frag_free(frag_size != 0, data);
6671 goto drop_it_no_recycle;
6672 }
6673 skb_reserve(skb, TG3_RX_OFFSET(tp));
6674 /* Ensure that the update to the data happens
6675 * after the usage of the old DMA mapping.
6676 */
6677 smp_wmb();
6678
6679 ri->data = NULL;
6680
6681 } else {
6682 tg3_recycle_rx(tnapi, tpr, opaque_key,
6683 desc_idx, *post_ptr);
6684
6685 skb = netdev_alloc_skb(tp->dev,
6686 len + TG3_RAW_IP_ALIGN);
6687 if (skb == NULL)
6688 goto drop_it_no_recycle;
6689
6690 skb_reserve(skb, TG3_RAW_IP_ALIGN);
6691 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6692 memcpy(skb->data,
6693 data + TG3_RX_OFFSET(tp),
6694 len);
6695 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6696 }
6697
6698 skb_put(skb, len);
6699 if (tstamp)
6700 tg3_hwclock_to_timestamp(tp, tstamp,
6701 skb_hwtstamps(skb));
6702
6703 if ((tp->dev->features & NETIF_F_RXCSUM) &&
6704 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6705 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6706 >> RXD_TCPCSUM_SHIFT) == 0xffff))
6707 skb->ip_summed = CHECKSUM_UNNECESSARY;
6708 else
6709 skb_checksum_none_assert(skb);
6710
6711 skb->protocol = eth_type_trans(skb, tp->dev);
6712
6713 if (len > (tp->dev->mtu + ETH_HLEN) &&
6714 skb->protocol != htons(ETH_P_8021Q)) {
6715 dev_kfree_skb(skb);
6716 goto drop_it_no_recycle;
6717 }
6718
6719 if (desc->type_flags & RXD_FLAG_VLAN &&
6720 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6721 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
6722 desc->err_vlan & RXD_VLAN_MASK);
6723
6724 napi_gro_receive(&tnapi->napi, skb);
6725
6726 received++;
6727 budget--;
6728
6729 next_pkt:
6730 (*post_ptr)++;
6731
6732 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6733 tpr->rx_std_prod_idx = std_prod_idx &
6734 tp->rx_std_ring_mask;
6735 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6736 tpr->rx_std_prod_idx);
6737 work_mask &= ~RXD_OPAQUE_RING_STD;
6738 rx_std_posted = 0;
6739 }
6740 next_pkt_nopost:
6741 sw_idx++;
6742 sw_idx &= tp->rx_ret_ring_mask;
6743
6744 /* Refresh hw_idx to see if there is new work */
6745 if (sw_idx == hw_idx) {
6746 hw_idx = *(tnapi->rx_rcb_prod_idx);
6747 rmb();
6748 }
6749 }
6750
6751 /* ACK the status ring. */
6752 tnapi->rx_rcb_ptr = sw_idx;
6753 tw32_rx_mbox(tnapi->consmbox, sw_idx);
6754
6755 /* Refill RX ring(s). */
6756 if (!tg3_flag(tp, ENABLE_RSS)) {
6757 /* Sync BD data before updating mailbox */
6758 wmb();
6759
6760 if (work_mask & RXD_OPAQUE_RING_STD) {
6761 tpr->rx_std_prod_idx = std_prod_idx &
6762 tp->rx_std_ring_mask;
6763 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6764 tpr->rx_std_prod_idx);
6765 }
6766 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6767 tpr->rx_jmb_prod_idx = jmb_prod_idx &
6768 tp->rx_jmb_ring_mask;
6769 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6770 tpr->rx_jmb_prod_idx);
6771 }
6772 mmiowb();
6773 } else if (work_mask) {
6774 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6775 * updated before the producer indices can be updated.
6776 */
6777 smp_wmb();
6778
6779 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6780 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
6781
6782 if (tnapi != &tp->napi[1]) {
6783 tp->rx_refill = true;
6784 napi_schedule(&tp->napi[1].napi);
6785 }
6786 }
6787
6788 return received;
6789 }
6790
6791 static void tg3_poll_link(struct tg3 *tp)
6792 {
6793 /* handle link change and other phy events */
6794 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
6795 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
6796
6797 if (sblk->status & SD_STATUS_LINK_CHG) {
6798 sblk->status = SD_STATUS_UPDATED |
6799 (sblk->status & ~SD_STATUS_LINK_CHG);
6800 spin_lock(&tp->lock);
6801 if (tg3_flag(tp, USE_PHYLIB)) {
6802 tw32_f(MAC_STATUS,
6803 (MAC_STATUS_SYNC_CHANGED |
6804 MAC_STATUS_CFG_CHANGED |
6805 MAC_STATUS_MI_COMPLETION |
6806 MAC_STATUS_LNKSTATE_CHANGED));
6807 udelay(40);
6808 } else
6809 tg3_setup_phy(tp, false);
6810 spin_unlock(&tp->lock);
6811 }
6812 }
6813 }
6814
6815 static int tg3_rx_prodring_xfer(struct tg3 *tp,
6816 struct tg3_rx_prodring_set *dpr,
6817 struct tg3_rx_prodring_set *spr)
6818 {
6819 u32 si, di, cpycnt, src_prod_idx;
6820 int i, err = 0;
6821
6822 while (1) {
6823 src_prod_idx = spr->rx_std_prod_idx;
6824
6825 /* Make sure updates to the rx_std_buffers[] entries and the
6826 * standard producer index are seen in the correct order.
6827 */
6828 smp_rmb();
6829
6830 if (spr->rx_std_cons_idx == src_prod_idx)
6831 break;
6832
6833 if (spr->rx_std_cons_idx < src_prod_idx)
6834 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
6835 else
6836 cpycnt = tp->rx_std_ring_mask + 1 -
6837 spr->rx_std_cons_idx;
6838
6839 cpycnt = min(cpycnt,
6840 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
6841
6842 si = spr->rx_std_cons_idx;
6843 di = dpr->rx_std_prod_idx;
6844
6845 for (i = di; i < di + cpycnt; i++) {
6846 if (dpr->rx_std_buffers[i].data) {
6847 cpycnt = i - di;
6848 err = -ENOSPC;
6849 break;
6850 }
6851 }
6852
6853 if (!cpycnt)
6854 break;
6855
6856 /* Ensure that updates to the rx_std_buffers ring and the
6857 * shadowed hardware producer ring from tg3_recycle_skb() are
6858 * ordered correctly WRT the skb check above.
6859 */
6860 smp_rmb();
6861
6862 memcpy(&dpr->rx_std_buffers[di],
6863 &spr->rx_std_buffers[si],
6864 cpycnt * sizeof(struct ring_info));
6865
6866 for (i = 0; i < cpycnt; i++, di++, si++) {
6867 struct tg3_rx_buffer_desc *sbd, *dbd;
6868 sbd = &spr->rx_std[si];
6869 dbd = &dpr->rx_std[di];
6870 dbd->addr_hi = sbd->addr_hi;
6871 dbd->addr_lo = sbd->addr_lo;
6872 }
6873
6874 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
6875 tp->rx_std_ring_mask;
6876 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
6877 tp->rx_std_ring_mask;
6878 }
6879
6880 while (1) {
6881 src_prod_idx = spr->rx_jmb_prod_idx;
6882
6883 /* Make sure updates to the rx_jmb_buffers[] entries and
6884 * the jumbo producer index are seen in the correct order.
6885 */
6886 smp_rmb();
6887
6888 if (spr->rx_jmb_cons_idx == src_prod_idx)
6889 break;
6890
6891 if (spr->rx_jmb_cons_idx < src_prod_idx)
6892 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
6893 else
6894 cpycnt = tp->rx_jmb_ring_mask + 1 -
6895 spr->rx_jmb_cons_idx;
6896
6897 cpycnt = min(cpycnt,
6898 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
6899
6900 si = spr->rx_jmb_cons_idx;
6901 di = dpr->rx_jmb_prod_idx;
6902
6903 for (i = di; i < di + cpycnt; i++) {
6904 if (dpr->rx_jmb_buffers[i].data) {
6905 cpycnt = i - di;
6906 err = -ENOSPC;
6907 break;
6908 }
6909 }
6910
6911 if (!cpycnt)
6912 break;
6913
6914 /* Ensure that updates to the rx_jmb_buffers ring and the
6915 * shadowed hardware producer ring from tg3_recycle_skb() are
6916 * ordered correctly WRT the skb check above.
6917 */
6918 smp_rmb();
6919
6920 memcpy(&dpr->rx_jmb_buffers[di],
6921 &spr->rx_jmb_buffers[si],
6922 cpycnt * sizeof(struct ring_info));
6923
6924 for (i = 0; i < cpycnt; i++, di++, si++) {
6925 struct tg3_rx_buffer_desc *sbd, *dbd;
6926 sbd = &spr->rx_jmb[si].std;
6927 dbd = &dpr->rx_jmb[di].std;
6928 dbd->addr_hi = sbd->addr_hi;
6929 dbd->addr_lo = sbd->addr_lo;
6930 }
6931
6932 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
6933 tp->rx_jmb_ring_mask;
6934 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
6935 tp->rx_jmb_ring_mask;
6936 }
6937
6938 return err;
6939 }
6940
6941 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
6942 {
6943 struct tg3 *tp = tnapi->tp;
6944
6945 /* run TX completion thread */
6946 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
6947 tg3_tx(tnapi);
6948 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6949 return work_done;
6950 }
6951
6952 if (!tnapi->rx_rcb_prod_idx)
6953 return work_done;
6954
6955 /* run RX thread, within the bounds set by NAPI.
6956 * All RX "locking" is done by ensuring outside
6957 * code synchronizes with tg3->napi.poll()
6958 */
6959 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
6960 work_done += tg3_rx(tnapi, budget - work_done);
6961
6962 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
6963 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
6964 int i, err = 0;
6965 u32 std_prod_idx = dpr->rx_std_prod_idx;
6966 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
6967
6968 tp->rx_refill = false;
6969 for (i = 1; i <= tp->rxq_cnt; i++)
6970 err |= tg3_rx_prodring_xfer(tp, dpr,
6971 &tp->napi[i].prodring);
6972
6973 wmb();
6974
6975 if (std_prod_idx != dpr->rx_std_prod_idx)
6976 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6977 dpr->rx_std_prod_idx);
6978
6979 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
6980 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6981 dpr->rx_jmb_prod_idx);
6982
6983 mmiowb();
6984
6985 if (err)
6986 tw32_f(HOSTCC_MODE, tp->coal_now);
6987 }
6988
6989 return work_done;
6990 }
6991
6992 static inline void tg3_reset_task_schedule(struct tg3 *tp)
6993 {
6994 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
6995 schedule_work(&tp->reset_task);
6996 }
6997
6998 static inline void tg3_reset_task_cancel(struct tg3 *tp)
6999 {
7000 cancel_work_sync(&tp->reset_task);
7001 tg3_flag_clear(tp, RESET_TASK_PENDING);
7002 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
7003 }
7004
7005 static int tg3_poll_msix(struct napi_struct *napi, int budget)
7006 {
7007 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7008 struct tg3 *tp = tnapi->tp;
7009 int work_done = 0;
7010 struct tg3_hw_status *sblk = tnapi->hw_status;
7011
7012 while (1) {
7013 work_done = tg3_poll_work(tnapi, work_done, budget);
7014
7015 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7016 goto tx_recovery;
7017
7018 if (unlikely(work_done >= budget))
7019 break;
7020
7021 /* tp->last_tag is used in tg3_int_reenable() below
7022 * to tell the hw how much work has been processed,
7023 * so we must read it before checking for more work.
7024 */
7025 tnapi->last_tag = sblk->status_tag;
7026 tnapi->last_irq_tag = tnapi->last_tag;
7027 rmb();
7028
7029 /* check for RX/TX work to do */
7030 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
7031 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
7032
7033 /* This test here is not race free, but will reduce
7034 * the number of interrupts by looping again.
7035 */
7036 if (tnapi == &tp->napi[1] && tp->rx_refill)
7037 continue;
7038
7039 napi_complete(napi);
7040 /* Reenable interrupts. */
7041 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
7042
7043 /* This test here is synchronized by napi_schedule()
7044 * and napi_complete() to close the race condition.
7045 */
7046 if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
7047 tw32(HOSTCC_MODE, tp->coalesce_mode |
7048 HOSTCC_MODE_ENABLE |
7049 tnapi->coal_now);
7050 }
7051 mmiowb();
7052 break;
7053 }
7054 }
7055
7056 return work_done;
7057
7058 tx_recovery:
7059 /* work_done is guaranteed to be less than budget. */
7060 napi_complete(napi);
7061 tg3_reset_task_schedule(tp);
7062 return work_done;
7063 }
7064
7065 static void tg3_process_error(struct tg3 *tp)
7066 {
7067 u32 val;
7068 bool real_error = false;
7069
7070 if (tg3_flag(tp, ERROR_PROCESSED))
7071 return;
7072
7073 /* Check Flow Attention register */
7074 val = tr32(HOSTCC_FLOW_ATTN);
7075 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
7076 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n");
7077 real_error = true;
7078 }
7079
7080 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
7081 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n");
7082 real_error = true;
7083 }
7084
7085 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
7086 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n");
7087 real_error = true;
7088 }
7089
7090 if (!real_error)
7091 return;
7092
7093 tg3_dump_state(tp);
7094
7095 tg3_flag_set(tp, ERROR_PROCESSED);
7096 tg3_reset_task_schedule(tp);
7097 }
7098
7099 static int tg3_poll(struct napi_struct *napi, int budget)
7100 {
7101 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7102 struct tg3 *tp = tnapi->tp;
7103 int work_done = 0;
7104 struct tg3_hw_status *sblk = tnapi->hw_status;
7105
7106 while (1) {
7107 if (sblk->status & SD_STATUS_ERROR)
7108 tg3_process_error(tp);
7109
7110 tg3_poll_link(tp);
7111
7112 work_done = tg3_poll_work(tnapi, work_done, budget);
7113
7114 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7115 goto tx_recovery;
7116
7117 if (unlikely(work_done >= budget))
7118 break;
7119
7120 if (tg3_flag(tp, TAGGED_STATUS)) {
7121 /* tp->last_tag is used in tg3_int_reenable() below
7122 * to tell the hw how much work has been processed,
7123 * so we must read it before checking for more work.
7124 */
7125 tnapi->last_tag = sblk->status_tag;
7126 tnapi->last_irq_tag = tnapi->last_tag;
7127 rmb();
7128 } else
7129 sblk->status &= ~SD_STATUS_UPDATED;
7130
7131 if (likely(!tg3_has_work(tnapi))) {
7132 napi_complete(napi);
7133 tg3_int_reenable(tnapi);
7134 break;
7135 }
7136 }
7137
7138 return work_done;
7139
7140 tx_recovery:
7141 /* work_done is guaranteed to be less than budget. */
7142 napi_complete(napi);
7143 tg3_reset_task_schedule(tp);
7144 return work_done;
7145 }
7146
7147 static void tg3_napi_disable(struct tg3 *tp)
7148 {
7149 int i;
7150
7151 for (i = tp->irq_cnt - 1; i >= 0; i--)
7152 napi_disable(&tp->napi[i].napi);
7153 }
7154
7155 static void tg3_napi_enable(struct tg3 *tp)
7156 {
7157 int i;
7158
7159 for (i = 0; i < tp->irq_cnt; i++)
7160 napi_enable(&tp->napi[i].napi);
7161 }
7162
7163 static void tg3_napi_init(struct tg3 *tp)
7164 {
7165 int i;
7166
7167 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
7168 for (i = 1; i < tp->irq_cnt; i++)
7169 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
7170 }
7171
7172 static void tg3_napi_fini(struct tg3 *tp)
7173 {
7174 int i;
7175
7176 for (i = 0; i < tp->irq_cnt; i++)
7177 netif_napi_del(&tp->napi[i].napi);
7178 }
7179
7180 static inline void tg3_netif_stop(struct tg3 *tp)
7181 {
7182 tp->dev->trans_start = jiffies; /* prevent tx timeout */
7183 tg3_napi_disable(tp);
7184 netif_carrier_off(tp->dev);
7185 netif_tx_disable(tp->dev);
7186 }
7187
7188 /* tp->lock must be held */
7189 static inline void tg3_netif_start(struct tg3 *tp)
7190 {
7191 tg3_ptp_resume(tp);
7192
7193 /* NOTE: unconditional netif_tx_wake_all_queues is only
7194 * appropriate so long as all callers are assured to
7195 * have free tx slots (such as after tg3_init_hw)
7196 */
7197 netif_tx_wake_all_queues(tp->dev);
7198
7199 if (tp->link_up)
7200 netif_carrier_on(tp->dev);
7201
7202 tg3_napi_enable(tp);
7203 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
7204 tg3_enable_ints(tp);
7205 }
7206
7207 static void tg3_irq_quiesce(struct tg3 *tp)
7208 {
7209 int i;
7210
7211 BUG_ON(tp->irq_sync);
7212
7213 tp->irq_sync = 1;
7214 smp_mb();
7215
7216 for (i = 0; i < tp->irq_cnt; i++)
7217 synchronize_irq(tp->napi[i].irq_vec);
7218 }
7219
7220 /* Fully shutdown all tg3 driver activity elsewhere in the system.
7221 * If irq_sync is non-zero, then the IRQ handler must be synchronized
7222 * with as well. Most of the time, this is not necessary except when
7223 * shutting down the device.
7224 */
7225 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
7226 {
7227 spin_lock_bh(&tp->lock);
7228 if (irq_sync)
7229 tg3_irq_quiesce(tp);
7230 }
7231
7232 static inline void tg3_full_unlock(struct tg3 *tp)
7233 {
7234 spin_unlock_bh(&tp->lock);
7235 }
7236
7237 /* One-shot MSI handler - Chip automatically disables interrupt
7238 * after sending MSI so driver doesn't have to do it.
7239 */
7240 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
7241 {
7242 struct tg3_napi *tnapi = dev_id;
7243 struct tg3 *tp = tnapi->tp;
7244
7245 prefetch(tnapi->hw_status);
7246 if (tnapi->rx_rcb)
7247 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7248
7249 if (likely(!tg3_irq_sync(tp)))
7250 napi_schedule(&tnapi->napi);
7251
7252 return IRQ_HANDLED;
7253 }
7254
7255 /* MSI ISR - No need to check for interrupt sharing and no need to
7256 * flush status block and interrupt mailbox. PCI ordering rules
7257 * guarantee that MSI will arrive after the status block.
7258 */
7259 static irqreturn_t tg3_msi(int irq, void *dev_id)
7260 {
7261 struct tg3_napi *tnapi = dev_id;
7262 struct tg3 *tp = tnapi->tp;
7263
7264 prefetch(tnapi->hw_status);
7265 if (tnapi->rx_rcb)
7266 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7267 /*
7268 * Writing any value to intr-mbox-0 clears PCI INTA# and
7269 * chip-internal interrupt pending events.
7270 * Writing non-zero to intr-mbox-0 additional tells the
7271 * NIC to stop sending us irqs, engaging "in-intr-handler"
7272 * event coalescing.
7273 */
7274 tw32_mailbox(tnapi->int_mbox, 0x00000001);
7275 if (likely(!tg3_irq_sync(tp)))
7276 napi_schedule(&tnapi->napi);
7277
7278 return IRQ_RETVAL(1);
7279 }
7280
7281 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
7282 {
7283 struct tg3_napi *tnapi = dev_id;
7284 struct tg3 *tp = tnapi->tp;
7285 struct tg3_hw_status *sblk = tnapi->hw_status;
7286 unsigned int handled = 1;
7287
7288 /* In INTx mode, it is possible for the interrupt to arrive at
7289 * the CPU before the status block posted prior to the interrupt.
7290 * Reading the PCI State register will confirm whether the
7291 * interrupt is ours and will flush the status block.
7292 */
7293 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
7294 if (tg3_flag(tp, CHIP_RESETTING) ||
7295 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7296 handled = 0;
7297 goto out;
7298 }
7299 }
7300
7301 /*
7302 * Writing any value to intr-mbox-0 clears PCI INTA# and
7303 * chip-internal interrupt pending events.
7304 * Writing non-zero to intr-mbox-0 additional tells the
7305 * NIC to stop sending us irqs, engaging "in-intr-handler"
7306 * event coalescing.
7307 *
7308 * Flush the mailbox to de-assert the IRQ immediately to prevent
7309 * spurious interrupts. The flush impacts performance but
7310 * excessive spurious interrupts can be worse in some cases.
7311 */
7312 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7313 if (tg3_irq_sync(tp))
7314 goto out;
7315 sblk->status &= ~SD_STATUS_UPDATED;
7316 if (likely(tg3_has_work(tnapi))) {
7317 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7318 napi_schedule(&tnapi->napi);
7319 } else {
7320 /* No work, shared interrupt perhaps? re-enable
7321 * interrupts, and flush that PCI write
7322 */
7323 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
7324 0x00000000);
7325 }
7326 out:
7327 return IRQ_RETVAL(handled);
7328 }
7329
7330 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
7331 {
7332 struct tg3_napi *tnapi = dev_id;
7333 struct tg3 *tp = tnapi->tp;
7334 struct tg3_hw_status *sblk = tnapi->hw_status;
7335 unsigned int handled = 1;
7336
7337 /* In INTx mode, it is possible for the interrupt to arrive at
7338 * the CPU before the status block posted prior to the interrupt.
7339 * Reading the PCI State register will confirm whether the
7340 * interrupt is ours and will flush the status block.
7341 */
7342 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
7343 if (tg3_flag(tp, CHIP_RESETTING) ||
7344 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7345 handled = 0;
7346 goto out;
7347 }
7348 }
7349
7350 /*
7351 * writing any value to intr-mbox-0 clears PCI INTA# and
7352 * chip-internal interrupt pending events.
7353 * writing non-zero to intr-mbox-0 additional tells the
7354 * NIC to stop sending us irqs, engaging "in-intr-handler"
7355 * event coalescing.
7356 *
7357 * Flush the mailbox to de-assert the IRQ immediately to prevent
7358 * spurious interrupts. The flush impacts performance but
7359 * excessive spurious interrupts can be worse in some cases.
7360 */
7361 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7362
7363 /*
7364 * In a shared interrupt configuration, sometimes other devices'
7365 * interrupts will scream. We record the current status tag here
7366 * so that the above check can report that the screaming interrupts
7367 * are unhandled. Eventually they will be silenced.
7368 */
7369 tnapi->last_irq_tag = sblk->status_tag;
7370
7371 if (tg3_irq_sync(tp))
7372 goto out;
7373
7374 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7375
7376 napi_schedule(&tnapi->napi);
7377
7378 out:
7379 return IRQ_RETVAL(handled);
7380 }
7381
7382 /* ISR for interrupt test */
7383 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7384 {
7385 struct tg3_napi *tnapi = dev_id;
7386 struct tg3 *tp = tnapi->tp;
7387 struct tg3_hw_status *sblk = tnapi->hw_status;
7388
7389 if ((sblk->status & SD_STATUS_UPDATED) ||
7390 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7391 tg3_disable_ints(tp);
7392 return IRQ_RETVAL(1);
7393 }
7394 return IRQ_RETVAL(0);
7395 }
7396
7397 #ifdef CONFIG_NET_POLL_CONTROLLER
7398 static void tg3_poll_controller(struct net_device *dev)
7399 {
7400 int i;
7401 struct tg3 *tp = netdev_priv(dev);
7402
7403 if (tg3_irq_sync(tp))
7404 return;
7405
7406 for (i = 0; i < tp->irq_cnt; i++)
7407 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7408 }
7409 #endif
7410
7411 static void tg3_tx_timeout(struct net_device *dev)
7412 {
7413 struct tg3 *tp = netdev_priv(dev);
7414
7415 if (netif_msg_tx_err(tp)) {
7416 netdev_err(dev, "transmit timed out, resetting\n");
7417 tg3_dump_state(tp);
7418 }
7419
7420 tg3_reset_task_schedule(tp);
7421 }
7422
7423 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7424 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7425 {
7426 u32 base = (u32) mapping & 0xffffffff;
7427
7428 return (base > 0xffffdcc0) && (base + len + 8 < base);
7429 }
7430
7431 /* Test for DMA addresses > 40-bit */
7432 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7433 int len)
7434 {
7435 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7436 if (tg3_flag(tp, 40BIT_DMA_BUG))
7437 return ((u64) mapping + len) > DMA_BIT_MASK(40);
7438 return 0;
7439 #else
7440 return 0;
7441 #endif
7442 }
7443
7444 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7445 dma_addr_t mapping, u32 len, u32 flags,
7446 u32 mss, u32 vlan)
7447 {
7448 txbd->addr_hi = ((u64) mapping >> 32);
7449 txbd->addr_lo = ((u64) mapping & 0xffffffff);
7450 txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7451 txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7452 }
7453
7454 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7455 dma_addr_t map, u32 len, u32 flags,
7456 u32 mss, u32 vlan)
7457 {
7458 struct tg3 *tp = tnapi->tp;
7459 bool hwbug = false;
7460
7461 if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7462 hwbug = true;
7463
7464 if (tg3_4g_overflow_test(map, len))
7465 hwbug = true;
7466
7467 if (tg3_40bit_overflow_test(tp, map, len))
7468 hwbug = true;
7469
7470 if (tp->dma_limit) {
7471 u32 prvidx = *entry;
7472 u32 tmp_flag = flags & ~TXD_FLAG_END;
7473 while (len > tp->dma_limit && *budget) {
7474 u32 frag_len = tp->dma_limit;
7475 len -= tp->dma_limit;
7476
7477 /* Avoid the 8byte DMA problem */
7478 if (len <= 8) {
7479 len += tp->dma_limit / 2;
7480 frag_len = tp->dma_limit / 2;
7481 }
7482
7483 tnapi->tx_buffers[*entry].fragmented = true;
7484
7485 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7486 frag_len, tmp_flag, mss, vlan);
7487 *budget -= 1;
7488 prvidx = *entry;
7489 *entry = NEXT_TX(*entry);
7490
7491 map += frag_len;
7492 }
7493
7494 if (len) {
7495 if (*budget) {
7496 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7497 len, flags, mss, vlan);
7498 *budget -= 1;
7499 *entry = NEXT_TX(*entry);
7500 } else {
7501 hwbug = true;
7502 tnapi->tx_buffers[prvidx].fragmented = false;
7503 }
7504 }
7505 } else {
7506 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7507 len, flags, mss, vlan);
7508 *entry = NEXT_TX(*entry);
7509 }
7510
7511 return hwbug;
7512 }
7513
7514 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7515 {
7516 int i;
7517 struct sk_buff *skb;
7518 struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7519
7520 skb = txb->skb;
7521 txb->skb = NULL;
7522
7523 pci_unmap_single(tnapi->tp->pdev,
7524 dma_unmap_addr(txb, mapping),
7525 skb_headlen(skb),
7526 PCI_DMA_TODEVICE);
7527
7528 while (txb->fragmented) {
7529 txb->fragmented = false;
7530 entry = NEXT_TX(entry);
7531 txb = &tnapi->tx_buffers[entry];
7532 }
7533
7534 for (i = 0; i <= last; i++) {
7535 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7536
7537 entry = NEXT_TX(entry);
7538 txb = &tnapi->tx_buffers[entry];
7539
7540 pci_unmap_page(tnapi->tp->pdev,
7541 dma_unmap_addr(txb, mapping),
7542 skb_frag_size(frag), PCI_DMA_TODEVICE);
7543
7544 while (txb->fragmented) {
7545 txb->fragmented = false;
7546 entry = NEXT_TX(entry);
7547 txb = &tnapi->tx_buffers[entry];
7548 }
7549 }
7550 }
7551
7552 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7553 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7554 struct sk_buff **pskb,
7555 u32 *entry, u32 *budget,
7556 u32 base_flags, u32 mss, u32 vlan)
7557 {
7558 struct tg3 *tp = tnapi->tp;
7559 struct sk_buff *new_skb, *skb = *pskb;
7560 dma_addr_t new_addr = 0;
7561 int ret = 0;
7562
7563 if (tg3_asic_rev(tp) != ASIC_REV_5701)
7564 new_skb = skb_copy(skb, GFP_ATOMIC);
7565 else {
7566 int more_headroom = 4 - ((unsigned long)skb->data & 3);
7567
7568 new_skb = skb_copy_expand(skb,
7569 skb_headroom(skb) + more_headroom,
7570 skb_tailroom(skb), GFP_ATOMIC);
7571 }
7572
7573 if (!new_skb) {
7574 ret = -1;
7575 } else {
7576 /* New SKB is guaranteed to be linear. */
7577 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
7578 PCI_DMA_TODEVICE);
7579 /* Make sure the mapping succeeded */
7580 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
7581 dev_kfree_skb(new_skb);
7582 ret = -1;
7583 } else {
7584 u32 save_entry = *entry;
7585
7586 base_flags |= TXD_FLAG_END;
7587
7588 tnapi->tx_buffers[*entry].skb = new_skb;
7589 dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7590 mapping, new_addr);
7591
7592 if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7593 new_skb->len, base_flags,
7594 mss, vlan)) {
7595 tg3_tx_skb_unmap(tnapi, save_entry, -1);
7596 dev_kfree_skb(new_skb);
7597 ret = -1;
7598 }
7599 }
7600 }
7601
7602 dev_kfree_skb(skb);
7603 *pskb = new_skb;
7604 return ret;
7605 }
7606
7607 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7608
7609 /* Use GSO to workaround a rare TSO bug that may be triggered when the
7610 * TSO header is greater than 80 bytes.
7611 */
7612 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
7613 {
7614 struct sk_buff *segs, *nskb;
7615 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7616
7617 /* Estimate the number of fragments in the worst case */
7618 if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
7619 netif_stop_queue(tp->dev);
7620
7621 /* netif_tx_stop_queue() must be done before checking
7622 * checking tx index in tg3_tx_avail() below, because in
7623 * tg3_tx(), we update tx index before checking for
7624 * netif_tx_queue_stopped().
7625 */
7626 smp_mb();
7627 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
7628 return NETDEV_TX_BUSY;
7629
7630 netif_wake_queue(tp->dev);
7631 }
7632
7633 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
7634 if (IS_ERR(segs))
7635 goto tg3_tso_bug_end;
7636
7637 do {
7638 nskb = segs;
7639 segs = segs->next;
7640 nskb->next = NULL;
7641 tg3_start_xmit(nskb, tp->dev);
7642 } while (segs);
7643
7644 tg3_tso_bug_end:
7645 dev_kfree_skb(skb);
7646
7647 return NETDEV_TX_OK;
7648 }
7649
7650 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
7651 * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
7652 */
7653 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7654 {
7655 struct tg3 *tp = netdev_priv(dev);
7656 u32 len, entry, base_flags, mss, vlan = 0;
7657 u32 budget;
7658 int i = -1, would_hit_hwbug;
7659 dma_addr_t mapping;
7660 struct tg3_napi *tnapi;
7661 struct netdev_queue *txq;
7662 unsigned int last;
7663
7664 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7665 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7666 if (tg3_flag(tp, ENABLE_TSS))
7667 tnapi++;
7668
7669 budget = tg3_tx_avail(tnapi);
7670
7671 /* We are running in BH disabled context with netif_tx_lock
7672 * and TX reclaim runs via tp->napi.poll inside of a software
7673 * interrupt. Furthermore, IRQ processing runs lockless so we have
7674 * no IRQ context deadlocks to worry about either. Rejoice!
7675 */
7676 if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7677 if (!netif_tx_queue_stopped(txq)) {
7678 netif_tx_stop_queue(txq);
7679
7680 /* This is a hard error, log it. */
7681 netdev_err(dev,
7682 "BUG! Tx Ring full when queue awake!\n");
7683 }
7684 return NETDEV_TX_BUSY;
7685 }
7686
7687 entry = tnapi->tx_prod;
7688 base_flags = 0;
7689 if (skb->ip_summed == CHECKSUM_PARTIAL)
7690 base_flags |= TXD_FLAG_TCPUDP_CSUM;
7691
7692 mss = skb_shinfo(skb)->gso_size;
7693 if (mss) {
7694 struct iphdr *iph;
7695 u32 tcp_opt_len, hdr_len;
7696
7697 if (skb_header_cloned(skb) &&
7698 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
7699 goto drop;
7700
7701 iph = ip_hdr(skb);
7702 tcp_opt_len = tcp_optlen(skb);
7703
7704 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7705
7706 if (!skb_is_gso_v6(skb)) {
7707 iph->check = 0;
7708 iph->tot_len = htons(mss + hdr_len);
7709 }
7710
7711 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7712 tg3_flag(tp, TSO_BUG))
7713 return tg3_tso_bug(tp, skb);
7714
7715 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7716 TXD_FLAG_CPU_POST_DMA);
7717
7718 if (tg3_flag(tp, HW_TSO_1) ||
7719 tg3_flag(tp, HW_TSO_2) ||
7720 tg3_flag(tp, HW_TSO_3)) {
7721 tcp_hdr(skb)->check = 0;
7722 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7723 } else
7724 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
7725 iph->daddr, 0,
7726 IPPROTO_TCP,
7727 0);
7728
7729 if (tg3_flag(tp, HW_TSO_3)) {
7730 mss |= (hdr_len & 0xc) << 12;
7731 if (hdr_len & 0x10)
7732 base_flags |= 0x00000010;
7733 base_flags |= (hdr_len & 0x3e0) << 5;
7734 } else if (tg3_flag(tp, HW_TSO_2))
7735 mss |= hdr_len << 9;
7736 else if (tg3_flag(tp, HW_TSO_1) ||
7737 tg3_asic_rev(tp) == ASIC_REV_5705) {
7738 if (tcp_opt_len || iph->ihl > 5) {
7739 int tsflags;
7740
7741 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7742 mss |= (tsflags << 11);
7743 }
7744 } else {
7745 if (tcp_opt_len || iph->ihl > 5) {
7746 int tsflags;
7747
7748 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7749 base_flags |= tsflags << 12;
7750 }
7751 }
7752 }
7753
7754 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
7755 !mss && skb->len > VLAN_ETH_FRAME_LEN)
7756 base_flags |= TXD_FLAG_JMB_PKT;
7757
7758 if (vlan_tx_tag_present(skb)) {
7759 base_flags |= TXD_FLAG_VLAN;
7760 vlan = vlan_tx_tag_get(skb);
7761 }
7762
7763 if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
7764 tg3_flag(tp, TX_TSTAMP_EN)) {
7765 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
7766 base_flags |= TXD_FLAG_HWTSTAMP;
7767 }
7768
7769 len = skb_headlen(skb);
7770
7771 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
7772 if (pci_dma_mapping_error(tp->pdev, mapping))
7773 goto drop;
7774
7775
7776 tnapi->tx_buffers[entry].skb = skb;
7777 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
7778
7779 would_hit_hwbug = 0;
7780
7781 if (tg3_flag(tp, 5701_DMA_BUG))
7782 would_hit_hwbug = 1;
7783
7784 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
7785 ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
7786 mss, vlan)) {
7787 would_hit_hwbug = 1;
7788 } else if (skb_shinfo(skb)->nr_frags > 0) {
7789 u32 tmp_mss = mss;
7790
7791 if (!tg3_flag(tp, HW_TSO_1) &&
7792 !tg3_flag(tp, HW_TSO_2) &&
7793 !tg3_flag(tp, HW_TSO_3))
7794 tmp_mss = 0;
7795
7796 /* Now loop through additional data
7797 * fragments, and queue them.
7798 */
7799 last = skb_shinfo(skb)->nr_frags - 1;
7800 for (i = 0; i <= last; i++) {
7801 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7802
7803 len = skb_frag_size(frag);
7804 mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
7805 len, DMA_TO_DEVICE);
7806
7807 tnapi->tx_buffers[entry].skb = NULL;
7808 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
7809 mapping);
7810 if (dma_mapping_error(&tp->pdev->dev, mapping))
7811 goto dma_error;
7812
7813 if (!budget ||
7814 tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
7815 len, base_flags |
7816 ((i == last) ? TXD_FLAG_END : 0),
7817 tmp_mss, vlan)) {
7818 would_hit_hwbug = 1;
7819 break;
7820 }
7821 }
7822 }
7823
7824 if (would_hit_hwbug) {
7825 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
7826
7827 /* If the workaround fails due to memory/mapping
7828 * failure, silently drop this packet.
7829 */
7830 entry = tnapi->tx_prod;
7831 budget = tg3_tx_avail(tnapi);
7832 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
7833 base_flags, mss, vlan))
7834 goto drop_nofree;
7835 }
7836
7837 skb_tx_timestamp(skb);
7838 netdev_tx_sent_queue(txq, skb->len);
7839
7840 /* Sync BD data before updating mailbox */
7841 wmb();
7842
7843 /* Packets are ready, update Tx producer idx local and on card. */
7844 tw32_tx_mbox(tnapi->prodmbox, entry);
7845
7846 tnapi->tx_prod = entry;
7847 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
7848 netif_tx_stop_queue(txq);
7849
7850 /* netif_tx_stop_queue() must be done before checking
7851 * checking tx index in tg3_tx_avail() below, because in
7852 * tg3_tx(), we update tx index before checking for
7853 * netif_tx_queue_stopped().
7854 */
7855 smp_mb();
7856 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
7857 netif_tx_wake_queue(txq);
7858 }
7859
7860 mmiowb();
7861 return NETDEV_TX_OK;
7862
7863 dma_error:
7864 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
7865 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
7866 drop:
7867 dev_kfree_skb(skb);
7868 drop_nofree:
7869 tp->tx_dropped++;
7870 return NETDEV_TX_OK;
7871 }
7872
7873 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
7874 {
7875 if (enable) {
7876 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
7877 MAC_MODE_PORT_MODE_MASK);
7878
7879 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
7880
7881 if (!tg3_flag(tp, 5705_PLUS))
7882 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7883
7884 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
7885 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
7886 else
7887 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7888 } else {
7889 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
7890
7891 if (tg3_flag(tp, 5705_PLUS) ||
7892 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
7893 tg3_asic_rev(tp) == ASIC_REV_5700)
7894 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
7895 }
7896
7897 tw32(MAC_MODE, tp->mac_mode);
7898 udelay(40);
7899 }
7900
7901 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
7902 {
7903 u32 val, bmcr, mac_mode, ptest = 0;
7904
7905 tg3_phy_toggle_apd(tp, false);
7906 tg3_phy_toggle_automdix(tp, false);
7907
7908 if (extlpbk && tg3_phy_set_extloopbk(tp))
7909 return -EIO;
7910
7911 bmcr = BMCR_FULLDPLX;
7912 switch (speed) {
7913 case SPEED_10:
7914 break;
7915 case SPEED_100:
7916 bmcr |= BMCR_SPEED100;
7917 break;
7918 case SPEED_1000:
7919 default:
7920 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
7921 speed = SPEED_100;
7922 bmcr |= BMCR_SPEED100;
7923 } else {
7924 speed = SPEED_1000;
7925 bmcr |= BMCR_SPEED1000;
7926 }
7927 }
7928
7929 if (extlpbk) {
7930 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
7931 tg3_readphy(tp, MII_CTRL1000, &val);
7932 val |= CTL1000_AS_MASTER |
7933 CTL1000_ENABLE_MASTER;
7934 tg3_writephy(tp, MII_CTRL1000, val);
7935 } else {
7936 ptest = MII_TG3_FET_PTEST_TRIM_SEL |
7937 MII_TG3_FET_PTEST_TRIM_2;
7938 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
7939 }
7940 } else
7941 bmcr |= BMCR_LOOPBACK;
7942
7943 tg3_writephy(tp, MII_BMCR, bmcr);
7944
7945 /* The write needs to be flushed for the FETs */
7946 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
7947 tg3_readphy(tp, MII_BMCR, &bmcr);
7948
7949 udelay(40);
7950
7951 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
7952 tg3_asic_rev(tp) == ASIC_REV_5785) {
7953 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
7954 MII_TG3_FET_PTEST_FRC_TX_LINK |
7955 MII_TG3_FET_PTEST_FRC_TX_LOCK);
7956
7957 /* The write needs to be flushed for the AC131 */
7958 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
7959 }
7960
7961 /* Reset to prevent losing 1st rx packet intermittently */
7962 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
7963 tg3_flag(tp, 5780_CLASS)) {
7964 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7965 udelay(10);
7966 tw32_f(MAC_RX_MODE, tp->rx_mode);
7967 }
7968
7969 mac_mode = tp->mac_mode &
7970 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
7971 if (speed == SPEED_1000)
7972 mac_mode |= MAC_MODE_PORT_MODE_GMII;
7973 else
7974 mac_mode |= MAC_MODE_PORT_MODE_MII;
7975
7976 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
7977 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
7978
7979 if (masked_phy_id == TG3_PHY_ID_BCM5401)
7980 mac_mode &= ~MAC_MODE_LINK_POLARITY;
7981 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
7982 mac_mode |= MAC_MODE_LINK_POLARITY;
7983
7984 tg3_writephy(tp, MII_TG3_EXT_CTRL,
7985 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
7986 }
7987
7988 tw32(MAC_MODE, mac_mode);
7989 udelay(40);
7990
7991 return 0;
7992 }
7993
7994 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
7995 {
7996 struct tg3 *tp = netdev_priv(dev);
7997
7998 if (features & NETIF_F_LOOPBACK) {
7999 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
8000 return;
8001
8002 spin_lock_bh(&tp->lock);
8003 tg3_mac_loopback(tp, true);
8004 netif_carrier_on(tp->dev);
8005 spin_unlock_bh(&tp->lock);
8006 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
8007 } else {
8008 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
8009 return;
8010
8011 spin_lock_bh(&tp->lock);
8012 tg3_mac_loopback(tp, false);
8013 /* Force link status check */
8014 tg3_setup_phy(tp, true);
8015 spin_unlock_bh(&tp->lock);
8016 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
8017 }
8018 }
8019
8020 static netdev_features_t tg3_fix_features(struct net_device *dev,
8021 netdev_features_t features)
8022 {
8023 struct tg3 *tp = netdev_priv(dev);
8024
8025 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
8026 features &= ~NETIF_F_ALL_TSO;
8027
8028 return features;
8029 }
8030
8031 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
8032 {
8033 netdev_features_t changed = dev->features ^ features;
8034
8035 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
8036 tg3_set_loopback(dev, features);
8037
8038 return 0;
8039 }
8040
8041 static void tg3_rx_prodring_free(struct tg3 *tp,
8042 struct tg3_rx_prodring_set *tpr)
8043 {
8044 int i;
8045
8046 if (tpr != &tp->napi[0].prodring) {
8047 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
8048 i = (i + 1) & tp->rx_std_ring_mask)
8049 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8050 tp->rx_pkt_map_sz);
8051
8052 if (tg3_flag(tp, JUMBO_CAPABLE)) {
8053 for (i = tpr->rx_jmb_cons_idx;
8054 i != tpr->rx_jmb_prod_idx;
8055 i = (i + 1) & tp->rx_jmb_ring_mask) {
8056 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8057 TG3_RX_JMB_MAP_SZ);
8058 }
8059 }
8060
8061 return;
8062 }
8063
8064 for (i = 0; i <= tp->rx_std_ring_mask; i++)
8065 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8066 tp->rx_pkt_map_sz);
8067
8068 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8069 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
8070 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8071 TG3_RX_JMB_MAP_SZ);
8072 }
8073 }
8074
8075 /* Initialize rx rings for packet processing.
8076 *
8077 * The chip has been shut down and the driver detached from
8078 * the networking, so no interrupts or new tx packets will
8079 * end up in the driver. tp->{tx,}lock are held and thus
8080 * we may not sleep.
8081 */
8082 static int tg3_rx_prodring_alloc(struct tg3 *tp,
8083 struct tg3_rx_prodring_set *tpr)
8084 {
8085 u32 i, rx_pkt_dma_sz;
8086
8087 tpr->rx_std_cons_idx = 0;
8088 tpr->rx_std_prod_idx = 0;
8089 tpr->rx_jmb_cons_idx = 0;
8090 tpr->rx_jmb_prod_idx = 0;
8091
8092 if (tpr != &tp->napi[0].prodring) {
8093 memset(&tpr->rx_std_buffers[0], 0,
8094 TG3_RX_STD_BUFF_RING_SIZE(tp));
8095 if (tpr->rx_jmb_buffers)
8096 memset(&tpr->rx_jmb_buffers[0], 0,
8097 TG3_RX_JMB_BUFF_RING_SIZE(tp));
8098 goto done;
8099 }
8100
8101 /* Zero out all descriptors. */
8102 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
8103
8104 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
8105 if (tg3_flag(tp, 5780_CLASS) &&
8106 tp->dev->mtu > ETH_DATA_LEN)
8107 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
8108 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
8109
8110 /* Initialize invariants of the rings, we only set this
8111 * stuff once. This works because the card does not
8112 * write into the rx buffer posting rings.
8113 */
8114 for (i = 0; i <= tp->rx_std_ring_mask; i++) {
8115 struct tg3_rx_buffer_desc *rxd;
8116
8117 rxd = &tpr->rx_std[i];
8118 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
8119 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
8120 rxd->opaque = (RXD_OPAQUE_RING_STD |
8121 (i << RXD_OPAQUE_INDEX_SHIFT));
8122 }
8123
8124 /* Now allocate fresh SKBs for each rx ring. */
8125 for (i = 0; i < tp->rx_pending; i++) {
8126 unsigned int frag_size;
8127
8128 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
8129 &frag_size) < 0) {
8130 netdev_warn(tp->dev,
8131 "Using a smaller RX standard ring. Only "
8132 "%d out of %d buffers were allocated "
8133 "successfully\n", i, tp->rx_pending);
8134 if (i == 0)
8135 goto initfail;
8136 tp->rx_pending = i;
8137 break;
8138 }
8139 }
8140
8141 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8142 goto done;
8143
8144 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
8145
8146 if (!tg3_flag(tp, JUMBO_RING_ENABLE))
8147 goto done;
8148
8149 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
8150 struct tg3_rx_buffer_desc *rxd;
8151
8152 rxd = &tpr->rx_jmb[i].std;
8153 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
8154 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
8155 RXD_FLAG_JUMBO;
8156 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
8157 (i << RXD_OPAQUE_INDEX_SHIFT));
8158 }
8159
8160 for (i = 0; i < tp->rx_jumbo_pending; i++) {
8161 unsigned int frag_size;
8162
8163 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
8164 &frag_size) < 0) {
8165 netdev_warn(tp->dev,
8166 "Using a smaller RX jumbo ring. Only %d "
8167 "out of %d buffers were allocated "
8168 "successfully\n", i, tp->rx_jumbo_pending);
8169 if (i == 0)
8170 goto initfail;
8171 tp->rx_jumbo_pending = i;
8172 break;
8173 }
8174 }
8175
8176 done:
8177 return 0;
8178
8179 initfail:
8180 tg3_rx_prodring_free(tp, tpr);
8181 return -ENOMEM;
8182 }
8183
8184 static void tg3_rx_prodring_fini(struct tg3 *tp,
8185 struct tg3_rx_prodring_set *tpr)
8186 {
8187 kfree(tpr->rx_std_buffers);
8188 tpr->rx_std_buffers = NULL;
8189 kfree(tpr->rx_jmb_buffers);
8190 tpr->rx_jmb_buffers = NULL;
8191 if (tpr->rx_std) {
8192 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
8193 tpr->rx_std, tpr->rx_std_mapping);
8194 tpr->rx_std = NULL;
8195 }
8196 if (tpr->rx_jmb) {
8197 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
8198 tpr->rx_jmb, tpr->rx_jmb_mapping);
8199 tpr->rx_jmb = NULL;
8200 }
8201 }
8202
8203 static int tg3_rx_prodring_init(struct tg3 *tp,
8204 struct tg3_rx_prodring_set *tpr)
8205 {
8206 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
8207 GFP_KERNEL);
8208 if (!tpr->rx_std_buffers)
8209 return -ENOMEM;
8210
8211 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
8212 TG3_RX_STD_RING_BYTES(tp),
8213 &tpr->rx_std_mapping,
8214 GFP_KERNEL);
8215 if (!tpr->rx_std)
8216 goto err_out;
8217
8218 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8219 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
8220 GFP_KERNEL);
8221 if (!tpr->rx_jmb_buffers)
8222 goto err_out;
8223
8224 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
8225 TG3_RX_JMB_RING_BYTES(tp),
8226 &tpr->rx_jmb_mapping,
8227 GFP_KERNEL);
8228 if (!tpr->rx_jmb)
8229 goto err_out;
8230 }
8231
8232 return 0;
8233
8234 err_out:
8235 tg3_rx_prodring_fini(tp, tpr);
8236 return -ENOMEM;
8237 }
8238
8239 /* Free up pending packets in all rx/tx rings.
8240 *
8241 * The chip has been shut down and the driver detached from
8242 * the networking, so no interrupts or new tx packets will
8243 * end up in the driver. tp->{tx,}lock is not held and we are not
8244 * in an interrupt context and thus may sleep.
8245 */
8246 static void tg3_free_rings(struct tg3 *tp)
8247 {
8248 int i, j;
8249
8250 for (j = 0; j < tp->irq_cnt; j++) {
8251 struct tg3_napi *tnapi = &tp->napi[j];
8252
8253 tg3_rx_prodring_free(tp, &tnapi->prodring);
8254
8255 if (!tnapi->tx_buffers)
8256 continue;
8257
8258 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
8259 struct sk_buff *skb = tnapi->tx_buffers[i].skb;
8260
8261 if (!skb)
8262 continue;
8263
8264 tg3_tx_skb_unmap(tnapi, i,
8265 skb_shinfo(skb)->nr_frags - 1);
8266
8267 dev_kfree_skb_any(skb);
8268 }
8269 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
8270 }
8271 }
8272
8273 /* Initialize tx/rx rings for packet processing.
8274 *
8275 * The chip has been shut down and the driver detached from
8276 * the networking, so no interrupts or new tx packets will
8277 * end up in the driver. tp->{tx,}lock are held and thus
8278 * we may not sleep.
8279 */
8280 static int tg3_init_rings(struct tg3 *tp)
8281 {
8282 int i;
8283
8284 /* Free up all the SKBs. */
8285 tg3_free_rings(tp);
8286
8287 for (i = 0; i < tp->irq_cnt; i++) {
8288 struct tg3_napi *tnapi = &tp->napi[i];
8289
8290 tnapi->last_tag = 0;
8291 tnapi->last_irq_tag = 0;
8292 tnapi->hw_status->status = 0;
8293 tnapi->hw_status->status_tag = 0;
8294 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8295
8296 tnapi->tx_prod = 0;
8297 tnapi->tx_cons = 0;
8298 if (tnapi->tx_ring)
8299 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
8300
8301 tnapi->rx_rcb_ptr = 0;
8302 if (tnapi->rx_rcb)
8303 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8304
8305 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
8306 tg3_free_rings(tp);
8307 return -ENOMEM;
8308 }
8309 }
8310
8311 return 0;
8312 }
8313
8314 static void tg3_mem_tx_release(struct tg3 *tp)
8315 {
8316 int i;
8317
8318 for (i = 0; i < tp->irq_max; i++) {
8319 struct tg3_napi *tnapi = &tp->napi[i];
8320
8321 if (tnapi->tx_ring) {
8322 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
8323 tnapi->tx_ring, tnapi->tx_desc_mapping);
8324 tnapi->tx_ring = NULL;
8325 }
8326
8327 kfree(tnapi->tx_buffers);
8328 tnapi->tx_buffers = NULL;
8329 }
8330 }
8331
8332 static int tg3_mem_tx_acquire(struct tg3 *tp)
8333 {
8334 int i;
8335 struct tg3_napi *tnapi = &tp->napi[0];
8336
8337 /* If multivector TSS is enabled, vector 0 does not handle
8338 * tx interrupts. Don't allocate any resources for it.
8339 */
8340 if (tg3_flag(tp, ENABLE_TSS))
8341 tnapi++;
8342
8343 for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
8344 tnapi->tx_buffers = kzalloc(sizeof(struct tg3_tx_ring_info) *
8345 TG3_TX_RING_SIZE, GFP_KERNEL);
8346 if (!tnapi->tx_buffers)
8347 goto err_out;
8348
8349 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
8350 TG3_TX_RING_BYTES,
8351 &tnapi->tx_desc_mapping,
8352 GFP_KERNEL);
8353 if (!tnapi->tx_ring)
8354 goto err_out;
8355 }
8356
8357 return 0;
8358
8359 err_out:
8360 tg3_mem_tx_release(tp);
8361 return -ENOMEM;
8362 }
8363
8364 static void tg3_mem_rx_release(struct tg3 *tp)
8365 {
8366 int i;
8367
8368 for (i = 0; i < tp->irq_max; i++) {
8369 struct tg3_napi *tnapi = &tp->napi[i];
8370
8371 tg3_rx_prodring_fini(tp, &tnapi->prodring);
8372
8373 if (!tnapi->rx_rcb)
8374 continue;
8375
8376 dma_free_coherent(&tp->pdev->dev,
8377 TG3_RX_RCB_RING_BYTES(tp),
8378 tnapi->rx_rcb,
8379 tnapi->rx_rcb_mapping);
8380 tnapi->rx_rcb = NULL;
8381 }
8382 }
8383
8384 static int tg3_mem_rx_acquire(struct tg3 *tp)
8385 {
8386 unsigned int i, limit;
8387
8388 limit = tp->rxq_cnt;
8389
8390 /* If RSS is enabled, we need a (dummy) producer ring
8391 * set on vector zero. This is the true hw prodring.
8392 */
8393 if (tg3_flag(tp, ENABLE_RSS))
8394 limit++;
8395
8396 for (i = 0; i < limit; i++) {
8397 struct tg3_napi *tnapi = &tp->napi[i];
8398
8399 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8400 goto err_out;
8401
8402 /* If multivector RSS is enabled, vector 0
8403 * does not handle rx or tx interrupts.
8404 * Don't allocate any resources for it.
8405 */
8406 if (!i && tg3_flag(tp, ENABLE_RSS))
8407 continue;
8408
8409 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
8410 TG3_RX_RCB_RING_BYTES(tp),
8411 &tnapi->rx_rcb_mapping,
8412 GFP_KERNEL | __GFP_ZERO);
8413 if (!tnapi->rx_rcb)
8414 goto err_out;
8415 }
8416
8417 return 0;
8418
8419 err_out:
8420 tg3_mem_rx_release(tp);
8421 return -ENOMEM;
8422 }
8423
8424 /*
8425 * Must not be invoked with interrupt sources disabled and
8426 * the hardware shutdown down.
8427 */
8428 static void tg3_free_consistent(struct tg3 *tp)
8429 {
8430 int i;
8431
8432 for (i = 0; i < tp->irq_cnt; i++) {
8433 struct tg3_napi *tnapi = &tp->napi[i];
8434
8435 if (tnapi->hw_status) {
8436 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8437 tnapi->hw_status,
8438 tnapi->status_mapping);
8439 tnapi->hw_status = NULL;
8440 }
8441 }
8442
8443 tg3_mem_rx_release(tp);
8444 tg3_mem_tx_release(tp);
8445
8446 if (tp->hw_stats) {
8447 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8448 tp->hw_stats, tp->stats_mapping);
8449 tp->hw_stats = NULL;
8450 }
8451 }
8452
8453 /*
8454 * Must not be invoked with interrupt sources disabled and
8455 * the hardware shutdown down. Can sleep.
8456 */
8457 static int tg3_alloc_consistent(struct tg3 *tp)
8458 {
8459 int i;
8460
8461 tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
8462 sizeof(struct tg3_hw_stats),
8463 &tp->stats_mapping,
8464 GFP_KERNEL | __GFP_ZERO);
8465 if (!tp->hw_stats)
8466 goto err_out;
8467
8468 for (i = 0; i < tp->irq_cnt; i++) {
8469 struct tg3_napi *tnapi = &tp->napi[i];
8470 struct tg3_hw_status *sblk;
8471
8472 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
8473 TG3_HW_STATUS_SIZE,
8474 &tnapi->status_mapping,
8475 GFP_KERNEL | __GFP_ZERO);
8476 if (!tnapi->hw_status)
8477 goto err_out;
8478
8479 sblk = tnapi->hw_status;
8480
8481 if (tg3_flag(tp, ENABLE_RSS)) {
8482 u16 *prodptr = NULL;
8483
8484 /*
8485 * When RSS is enabled, the status block format changes
8486 * slightly. The "rx_jumbo_consumer", "reserved",
8487 * and "rx_mini_consumer" members get mapped to the
8488 * other three rx return ring producer indexes.
8489 */
8490 switch (i) {
8491 case 1:
8492 prodptr = &sblk->idx[0].rx_producer;
8493 break;
8494 case 2:
8495 prodptr = &sblk->rx_jumbo_consumer;
8496 break;
8497 case 3:
8498 prodptr = &sblk->reserved;
8499 break;
8500 case 4:
8501 prodptr = &sblk->rx_mini_consumer;
8502 break;
8503 }
8504 tnapi->rx_rcb_prod_idx = prodptr;
8505 } else {
8506 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8507 }
8508 }
8509
8510 if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8511 goto err_out;
8512
8513 return 0;
8514
8515 err_out:
8516 tg3_free_consistent(tp);
8517 return -ENOMEM;
8518 }
8519
8520 #define MAX_WAIT_CNT 1000
8521
8522 /* To stop a block, clear the enable bit and poll till it
8523 * clears. tp->lock is held.
8524 */
8525 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent)
8526 {
8527 unsigned int i;
8528 u32 val;
8529
8530 if (tg3_flag(tp, 5705_PLUS)) {
8531 switch (ofs) {
8532 case RCVLSC_MODE:
8533 case DMAC_MODE:
8534 case MBFREE_MODE:
8535 case BUFMGR_MODE:
8536 case MEMARB_MODE:
8537 /* We can't enable/disable these bits of the
8538 * 5705/5750, just say success.
8539 */
8540 return 0;
8541
8542 default:
8543 break;
8544 }
8545 }
8546
8547 val = tr32(ofs);
8548 val &= ~enable_bit;
8549 tw32_f(ofs, val);
8550
8551 for (i = 0; i < MAX_WAIT_CNT; i++) {
8552 udelay(100);
8553 val = tr32(ofs);
8554 if ((val & enable_bit) == 0)
8555 break;
8556 }
8557
8558 if (i == MAX_WAIT_CNT && !silent) {
8559 dev_err(&tp->pdev->dev,
8560 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8561 ofs, enable_bit);
8562 return -ENODEV;
8563 }
8564
8565 return 0;
8566 }
8567
8568 /* tp->lock is held. */
8569 static int tg3_abort_hw(struct tg3 *tp, bool silent)
8570 {
8571 int i, err;
8572
8573 tg3_disable_ints(tp);
8574
8575 tp->rx_mode &= ~RX_MODE_ENABLE;
8576 tw32_f(MAC_RX_MODE, tp->rx_mode);
8577 udelay(10);
8578
8579 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8580 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8581 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8582 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8583 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8584 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8585
8586 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8587 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8588 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8589 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8590 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8591 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8592 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8593
8594 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8595 tw32_f(MAC_MODE, tp->mac_mode);
8596 udelay(40);
8597
8598 tp->tx_mode &= ~TX_MODE_ENABLE;
8599 tw32_f(MAC_TX_MODE, tp->tx_mode);
8600
8601 for (i = 0; i < MAX_WAIT_CNT; i++) {
8602 udelay(100);
8603 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8604 break;
8605 }
8606 if (i >= MAX_WAIT_CNT) {
8607 dev_err(&tp->pdev->dev,
8608 "%s timed out, TX_MODE_ENABLE will not clear "
8609 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8610 err |= -ENODEV;
8611 }
8612
8613 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8614 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8615 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8616
8617 tw32(FTQ_RESET, 0xffffffff);
8618 tw32(FTQ_RESET, 0x00000000);
8619
8620 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8621 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8622
8623 for (i = 0; i < tp->irq_cnt; i++) {
8624 struct tg3_napi *tnapi = &tp->napi[i];
8625 if (tnapi->hw_status)
8626 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8627 }
8628
8629 return err;
8630 }
8631
8632 /* Save PCI command register before chip reset */
8633 static void tg3_save_pci_state(struct tg3 *tp)
8634 {
8635 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8636 }
8637
8638 /* Restore PCI state after chip reset */
8639 static void tg3_restore_pci_state(struct tg3 *tp)
8640 {
8641 u32 val;
8642
8643 /* Re-enable indirect register accesses. */
8644 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8645 tp->misc_host_ctrl);
8646
8647 /* Set MAX PCI retry to zero. */
8648 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8649 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
8650 tg3_flag(tp, PCIX_MODE))
8651 val |= PCISTATE_RETRY_SAME_DMA;
8652 /* Allow reads and writes to the APE register and memory space. */
8653 if (tg3_flag(tp, ENABLE_APE))
8654 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8655 PCISTATE_ALLOW_APE_SHMEM_WR |
8656 PCISTATE_ALLOW_APE_PSPACE_WR;
8657 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8658
8659 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8660
8661 if (!tg3_flag(tp, PCI_EXPRESS)) {
8662 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8663 tp->pci_cacheline_sz);
8664 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8665 tp->pci_lat_timer);
8666 }
8667
8668 /* Make sure PCI-X relaxed ordering bit is clear. */
8669 if (tg3_flag(tp, PCIX_MODE)) {
8670 u16 pcix_cmd;
8671
8672 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8673 &pcix_cmd);
8674 pcix_cmd &= ~PCI_X_CMD_ERO;
8675 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8676 pcix_cmd);
8677 }
8678
8679 if (tg3_flag(tp, 5780_CLASS)) {
8680
8681 /* Chip reset on 5780 will reset MSI enable bit,
8682 * so need to restore it.
8683 */
8684 if (tg3_flag(tp, USING_MSI)) {
8685 u16 ctrl;
8686
8687 pci_read_config_word(tp->pdev,
8688 tp->msi_cap + PCI_MSI_FLAGS,
8689 &ctrl);
8690 pci_write_config_word(tp->pdev,
8691 tp->msi_cap + PCI_MSI_FLAGS,
8692 ctrl | PCI_MSI_FLAGS_ENABLE);
8693 val = tr32(MSGINT_MODE);
8694 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
8695 }
8696 }
8697 }
8698
8699 /* tp->lock is held. */
8700 static int tg3_chip_reset(struct tg3 *tp)
8701 {
8702 u32 val;
8703 void (*write_op)(struct tg3 *, u32, u32);
8704 int i, err;
8705
8706 tg3_nvram_lock(tp);
8707
8708 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
8709
8710 /* No matching tg3_nvram_unlock() after this because
8711 * chip reset below will undo the nvram lock.
8712 */
8713 tp->nvram_lock_cnt = 0;
8714
8715 /* GRC_MISC_CFG core clock reset will clear the memory
8716 * enable bit in PCI register 4 and the MSI enable bit
8717 * on some chips, so we save relevant registers here.
8718 */
8719 tg3_save_pci_state(tp);
8720
8721 if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
8722 tg3_flag(tp, 5755_PLUS))
8723 tw32(GRC_FASTBOOT_PC, 0);
8724
8725 /*
8726 * We must avoid the readl() that normally takes place.
8727 * It locks machines, causes machine checks, and other
8728 * fun things. So, temporarily disable the 5701
8729 * hardware workaround, while we do the reset.
8730 */
8731 write_op = tp->write32;
8732 if (write_op == tg3_write_flush_reg32)
8733 tp->write32 = tg3_write32;
8734
8735 /* Prevent the irq handler from reading or writing PCI registers
8736 * during chip reset when the memory enable bit in the PCI command
8737 * register may be cleared. The chip does not generate interrupt
8738 * at this time, but the irq handler may still be called due to irq
8739 * sharing or irqpoll.
8740 */
8741 tg3_flag_set(tp, CHIP_RESETTING);
8742 for (i = 0; i < tp->irq_cnt; i++) {
8743 struct tg3_napi *tnapi = &tp->napi[i];
8744 if (tnapi->hw_status) {
8745 tnapi->hw_status->status = 0;
8746 tnapi->hw_status->status_tag = 0;
8747 }
8748 tnapi->last_tag = 0;
8749 tnapi->last_irq_tag = 0;
8750 }
8751 smp_mb();
8752
8753 for (i = 0; i < tp->irq_cnt; i++)
8754 synchronize_irq(tp->napi[i].irq_vec);
8755
8756 if (tg3_asic_rev(tp) == ASIC_REV_57780) {
8757 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8758 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8759 }
8760
8761 /* do the reset */
8762 val = GRC_MISC_CFG_CORECLK_RESET;
8763
8764 if (tg3_flag(tp, PCI_EXPRESS)) {
8765 /* Force PCIe 1.0a mode */
8766 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
8767 !tg3_flag(tp, 57765_PLUS) &&
8768 tr32(TG3_PCIE_PHY_TSTCTL) ==
8769 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
8770 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
8771
8772 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
8773 tw32(GRC_MISC_CFG, (1 << 29));
8774 val |= (1 << 29);
8775 }
8776 }
8777
8778 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
8779 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
8780 tw32(GRC_VCPU_EXT_CTRL,
8781 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
8782 }
8783
8784 /* Manage gphy power for all CPMU absent PCIe devices. */
8785 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
8786 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
8787
8788 tw32(GRC_MISC_CFG, val);
8789
8790 /* restore 5701 hardware bug workaround write method */
8791 tp->write32 = write_op;
8792
8793 /* Unfortunately, we have to delay before the PCI read back.
8794 * Some 575X chips even will not respond to a PCI cfg access
8795 * when the reset command is given to the chip.
8796 *
8797 * How do these hardware designers expect things to work
8798 * properly if the PCI write is posted for a long period
8799 * of time? It is always necessary to have some method by
8800 * which a register read back can occur to push the write
8801 * out which does the reset.
8802 *
8803 * For most tg3 variants the trick below was working.
8804 * Ho hum...
8805 */
8806 udelay(120);
8807
8808 /* Flush PCI posted writes. The normal MMIO registers
8809 * are inaccessible at this time so this is the only
8810 * way to make this reliably (actually, this is no longer
8811 * the case, see above). I tried to use indirect
8812 * register read/write but this upset some 5701 variants.
8813 */
8814 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
8815
8816 udelay(120);
8817
8818 if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
8819 u16 val16;
8820
8821 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
8822 int j;
8823 u32 cfg_val;
8824
8825 /* Wait for link training to complete. */
8826 for (j = 0; j < 5000; j++)
8827 udelay(100);
8828
8829 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
8830 pci_write_config_dword(tp->pdev, 0xc4,
8831 cfg_val | (1 << 15));
8832 }
8833
8834 /* Clear the "no snoop" and "relaxed ordering" bits. */
8835 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
8836 /*
8837 * Older PCIe devices only support the 128 byte
8838 * MPS setting. Enforce the restriction.
8839 */
8840 if (!tg3_flag(tp, CPMU_PRESENT))
8841 val16 |= PCI_EXP_DEVCTL_PAYLOAD;
8842 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
8843
8844 /* Clear error status */
8845 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
8846 PCI_EXP_DEVSTA_CED |
8847 PCI_EXP_DEVSTA_NFED |
8848 PCI_EXP_DEVSTA_FED |
8849 PCI_EXP_DEVSTA_URD);
8850 }
8851
8852 tg3_restore_pci_state(tp);
8853
8854 tg3_flag_clear(tp, CHIP_RESETTING);
8855 tg3_flag_clear(tp, ERROR_PROCESSED);
8856
8857 val = 0;
8858 if (tg3_flag(tp, 5780_CLASS))
8859 val = tr32(MEMARB_MODE);
8860 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
8861
8862 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
8863 tg3_stop_fw(tp);
8864 tw32(0x5000, 0x400);
8865 }
8866
8867 if (tg3_flag(tp, IS_SSB_CORE)) {
8868 /*
8869 * BCM4785: In order to avoid repercussions from using
8870 * potentially defective internal ROM, stop the Rx RISC CPU,
8871 * which is not required.
8872 */
8873 tg3_stop_fw(tp);
8874 tg3_halt_cpu(tp, RX_CPU_BASE);
8875 }
8876
8877 tw32(GRC_MODE, tp->grc_mode);
8878
8879 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
8880 val = tr32(0xc4);
8881
8882 tw32(0xc4, val | (1 << 15));
8883 }
8884
8885 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
8886 tg3_asic_rev(tp) == ASIC_REV_5705) {
8887 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
8888 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
8889 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
8890 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8891 }
8892
8893 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8894 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
8895 val = tp->mac_mode;
8896 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8897 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
8898 val = tp->mac_mode;
8899 } else
8900 val = 0;
8901
8902 tw32_f(MAC_MODE, val);
8903 udelay(40);
8904
8905 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
8906
8907 err = tg3_poll_fw(tp);
8908 if (err)
8909 return err;
8910
8911 tg3_mdio_start(tp);
8912
8913 if (tg3_flag(tp, PCI_EXPRESS) &&
8914 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
8915 tg3_asic_rev(tp) != ASIC_REV_5785 &&
8916 !tg3_flag(tp, 57765_PLUS)) {
8917 val = tr32(0x7c00);
8918
8919 tw32(0x7c00, val | (1 << 25));
8920 }
8921
8922 if (tg3_asic_rev(tp) == ASIC_REV_5720) {
8923 val = tr32(TG3_CPMU_CLCK_ORIDE);
8924 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
8925 }
8926
8927 /* Reprobe ASF enable state. */
8928 tg3_flag_clear(tp, ENABLE_ASF);
8929 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
8930 TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
8931
8932 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
8933 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
8934 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
8935 u32 nic_cfg;
8936
8937 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
8938 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
8939 tg3_flag_set(tp, ENABLE_ASF);
8940 tp->last_event_jiffies = jiffies;
8941 if (tg3_flag(tp, 5750_PLUS))
8942 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
8943
8944 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg);
8945 if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK)
8946 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
8947 if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID)
8948 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
8949 }
8950 }
8951
8952 return 0;
8953 }
8954
8955 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
8956 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
8957
8958 /* tp->lock is held. */
8959 static int tg3_halt(struct tg3 *tp, int kind, bool silent)
8960 {
8961 int err;
8962
8963 tg3_stop_fw(tp);
8964
8965 tg3_write_sig_pre_reset(tp, kind);
8966
8967 tg3_abort_hw(tp, silent);
8968 err = tg3_chip_reset(tp);
8969
8970 __tg3_set_mac_addr(tp, false);
8971
8972 tg3_write_sig_legacy(tp, kind);
8973 tg3_write_sig_post_reset(tp, kind);
8974
8975 if (tp->hw_stats) {
8976 /* Save the stats across chip resets... */
8977 tg3_get_nstats(tp, &tp->net_stats_prev);
8978 tg3_get_estats(tp, &tp->estats_prev);
8979
8980 /* And make sure the next sample is new data */
8981 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
8982 }
8983
8984 if (err)
8985 return err;
8986
8987 return 0;
8988 }
8989
8990 static int tg3_set_mac_addr(struct net_device *dev, void *p)
8991 {
8992 struct tg3 *tp = netdev_priv(dev);
8993 struct sockaddr *addr = p;
8994 int err = 0;
8995 bool skip_mac_1 = false;
8996
8997 if (!is_valid_ether_addr(addr->sa_data))
8998 return -EADDRNOTAVAIL;
8999
9000 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9001
9002 if (!netif_running(dev))
9003 return 0;
9004
9005 if (tg3_flag(tp, ENABLE_ASF)) {
9006 u32 addr0_high, addr0_low, addr1_high, addr1_low;
9007
9008 addr0_high = tr32(MAC_ADDR_0_HIGH);
9009 addr0_low = tr32(MAC_ADDR_0_LOW);
9010 addr1_high = tr32(MAC_ADDR_1_HIGH);
9011 addr1_low = tr32(MAC_ADDR_1_LOW);
9012
9013 /* Skip MAC addr 1 if ASF is using it. */
9014 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
9015 !(addr1_high == 0 && addr1_low == 0))
9016 skip_mac_1 = true;
9017 }
9018 spin_lock_bh(&tp->lock);
9019 __tg3_set_mac_addr(tp, skip_mac_1);
9020 spin_unlock_bh(&tp->lock);
9021
9022 return err;
9023 }
9024
9025 /* tp->lock is held. */
9026 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
9027 dma_addr_t mapping, u32 maxlen_flags,
9028 u32 nic_addr)
9029 {
9030 tg3_write_mem(tp,
9031 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
9032 ((u64) mapping >> 32));
9033 tg3_write_mem(tp,
9034 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
9035 ((u64) mapping & 0xffffffff));
9036 tg3_write_mem(tp,
9037 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
9038 maxlen_flags);
9039
9040 if (!tg3_flag(tp, 5705_PLUS))
9041 tg3_write_mem(tp,
9042 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
9043 nic_addr);
9044 }
9045
9046
9047 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9048 {
9049 int i = 0;
9050
9051 if (!tg3_flag(tp, ENABLE_TSS)) {
9052 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
9053 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
9054 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
9055 } else {
9056 tw32(HOSTCC_TXCOL_TICKS, 0);
9057 tw32(HOSTCC_TXMAX_FRAMES, 0);
9058 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
9059
9060 for (; i < tp->txq_cnt; i++) {
9061 u32 reg;
9062
9063 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
9064 tw32(reg, ec->tx_coalesce_usecs);
9065 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
9066 tw32(reg, ec->tx_max_coalesced_frames);
9067 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
9068 tw32(reg, ec->tx_max_coalesced_frames_irq);
9069 }
9070 }
9071
9072 for (; i < tp->irq_max - 1; i++) {
9073 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
9074 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
9075 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9076 }
9077 }
9078
9079 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9080 {
9081 int i = 0;
9082 u32 limit = tp->rxq_cnt;
9083
9084 if (!tg3_flag(tp, ENABLE_RSS)) {
9085 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
9086 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
9087 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
9088 limit--;
9089 } else {
9090 tw32(HOSTCC_RXCOL_TICKS, 0);
9091 tw32(HOSTCC_RXMAX_FRAMES, 0);
9092 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
9093 }
9094
9095 for (; i < limit; i++) {
9096 u32 reg;
9097
9098 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
9099 tw32(reg, ec->rx_coalesce_usecs);
9100 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
9101 tw32(reg, ec->rx_max_coalesced_frames);
9102 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
9103 tw32(reg, ec->rx_max_coalesced_frames_irq);
9104 }
9105
9106 for (; i < tp->irq_max - 1; i++) {
9107 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
9108 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
9109 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9110 }
9111 }
9112
9113 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
9114 {
9115 tg3_coal_tx_init(tp, ec);
9116 tg3_coal_rx_init(tp, ec);
9117
9118 if (!tg3_flag(tp, 5705_PLUS)) {
9119 u32 val = ec->stats_block_coalesce_usecs;
9120
9121 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
9122 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
9123
9124 if (!tp->link_up)
9125 val = 0;
9126
9127 tw32(HOSTCC_STAT_COAL_TICKS, val);
9128 }
9129 }
9130
9131 /* tp->lock is held. */
9132 static void tg3_rings_reset(struct tg3 *tp)
9133 {
9134 int i;
9135 u32 stblk, txrcb, rxrcb, limit;
9136 struct tg3_napi *tnapi = &tp->napi[0];
9137
9138 /* Disable all transmit rings but the first. */
9139 if (!tg3_flag(tp, 5705_PLUS))
9140 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
9141 else if (tg3_flag(tp, 5717_PLUS))
9142 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
9143 else if (tg3_flag(tp, 57765_CLASS) ||
9144 tg3_asic_rev(tp) == ASIC_REV_5762)
9145 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
9146 else
9147 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9148
9149 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9150 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
9151 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
9152 BDINFO_FLAGS_DISABLED);
9153
9154
9155 /* Disable all receive return rings but the first. */
9156 if (tg3_flag(tp, 5717_PLUS))
9157 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
9158 else if (!tg3_flag(tp, 5705_PLUS))
9159 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
9160 else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9161 tg3_asic_rev(tp) == ASIC_REV_5762 ||
9162 tg3_flag(tp, 57765_CLASS))
9163 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
9164 else
9165 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9166
9167 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9168 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
9169 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
9170 BDINFO_FLAGS_DISABLED);
9171
9172 /* Disable interrupts */
9173 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
9174 tp->napi[0].chk_msi_cnt = 0;
9175 tp->napi[0].last_rx_cons = 0;
9176 tp->napi[0].last_tx_cons = 0;
9177
9178 /* Zero mailbox registers. */
9179 if (tg3_flag(tp, SUPPORT_MSIX)) {
9180 for (i = 1; i < tp->irq_max; i++) {
9181 tp->napi[i].tx_prod = 0;
9182 tp->napi[i].tx_cons = 0;
9183 if (tg3_flag(tp, ENABLE_TSS))
9184 tw32_mailbox(tp->napi[i].prodmbox, 0);
9185 tw32_rx_mbox(tp->napi[i].consmbox, 0);
9186 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
9187 tp->napi[i].chk_msi_cnt = 0;
9188 tp->napi[i].last_rx_cons = 0;
9189 tp->napi[i].last_tx_cons = 0;
9190 }
9191 if (!tg3_flag(tp, ENABLE_TSS))
9192 tw32_mailbox(tp->napi[0].prodmbox, 0);
9193 } else {
9194 tp->napi[0].tx_prod = 0;
9195 tp->napi[0].tx_cons = 0;
9196 tw32_mailbox(tp->napi[0].prodmbox, 0);
9197 tw32_rx_mbox(tp->napi[0].consmbox, 0);
9198 }
9199
9200 /* Make sure the NIC-based send BD rings are disabled. */
9201 if (!tg3_flag(tp, 5705_PLUS)) {
9202 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
9203 for (i = 0; i < 16; i++)
9204 tw32_tx_mbox(mbox + i * 8, 0);
9205 }
9206
9207 txrcb = NIC_SRAM_SEND_RCB;
9208 rxrcb = NIC_SRAM_RCV_RET_RCB;
9209
9210 /* Clear status block in ram. */
9211 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9212
9213 /* Set status block DMA address */
9214 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9215 ((u64) tnapi->status_mapping >> 32));
9216 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9217 ((u64) tnapi->status_mapping & 0xffffffff));
9218
9219 if (tnapi->tx_ring) {
9220 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9221 (TG3_TX_RING_SIZE <<
9222 BDINFO_FLAGS_MAXLEN_SHIFT),
9223 NIC_SRAM_TX_BUFFER_DESC);
9224 txrcb += TG3_BDINFO_SIZE;
9225 }
9226
9227 if (tnapi->rx_rcb) {
9228 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9229 (tp->rx_ret_ring_mask + 1) <<
9230 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
9231 rxrcb += TG3_BDINFO_SIZE;
9232 }
9233
9234 stblk = HOSTCC_STATBLCK_RING1;
9235
9236 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
9237 u64 mapping = (u64)tnapi->status_mapping;
9238 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
9239 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
9240
9241 /* Clear status block in ram. */
9242 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9243
9244 if (tnapi->tx_ring) {
9245 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9246 (TG3_TX_RING_SIZE <<
9247 BDINFO_FLAGS_MAXLEN_SHIFT),
9248 NIC_SRAM_TX_BUFFER_DESC);
9249 txrcb += TG3_BDINFO_SIZE;
9250 }
9251
9252 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9253 ((tp->rx_ret_ring_mask + 1) <<
9254 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
9255
9256 stblk += 8;
9257 rxrcb += TG3_BDINFO_SIZE;
9258 }
9259 }
9260
9261 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
9262 {
9263 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
9264
9265 if (!tg3_flag(tp, 5750_PLUS) ||
9266 tg3_flag(tp, 5780_CLASS) ||
9267 tg3_asic_rev(tp) == ASIC_REV_5750 ||
9268 tg3_asic_rev(tp) == ASIC_REV_5752 ||
9269 tg3_flag(tp, 57765_PLUS))
9270 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
9271 else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9272 tg3_asic_rev(tp) == ASIC_REV_5787)
9273 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
9274 else
9275 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
9276
9277 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
9278 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
9279
9280 val = min(nic_rep_thresh, host_rep_thresh);
9281 tw32(RCVBDI_STD_THRESH, val);
9282
9283 if (tg3_flag(tp, 57765_PLUS))
9284 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
9285
9286 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
9287 return;
9288
9289 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
9290
9291 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
9292
9293 val = min(bdcache_maxcnt / 2, host_rep_thresh);
9294 tw32(RCVBDI_JUMBO_THRESH, val);
9295
9296 if (tg3_flag(tp, 57765_PLUS))
9297 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
9298 }
9299
9300 static inline u32 calc_crc(unsigned char *buf, int len)
9301 {
9302 u32 reg;
9303 u32 tmp;
9304 int j, k;
9305
9306 reg = 0xffffffff;
9307
9308 for (j = 0; j < len; j++) {
9309 reg ^= buf[j];
9310
9311 for (k = 0; k < 8; k++) {
9312 tmp = reg & 0x01;
9313
9314 reg >>= 1;
9315
9316 if (tmp)
9317 reg ^= 0xedb88320;
9318 }
9319 }
9320
9321 return ~reg;
9322 }
9323
9324 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9325 {
9326 /* accept or reject all multicast frames */
9327 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9328 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9329 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9330 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9331 }
9332
9333 static void __tg3_set_rx_mode(struct net_device *dev)
9334 {
9335 struct tg3 *tp = netdev_priv(dev);
9336 u32 rx_mode;
9337
9338 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9339 RX_MODE_KEEP_VLAN_TAG);
9340
9341 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9342 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9343 * flag clear.
9344 */
9345 if (!tg3_flag(tp, ENABLE_ASF))
9346 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9347 #endif
9348
9349 if (dev->flags & IFF_PROMISC) {
9350 /* Promiscuous mode. */
9351 rx_mode |= RX_MODE_PROMISC;
9352 } else if (dev->flags & IFF_ALLMULTI) {
9353 /* Accept all multicast. */
9354 tg3_set_multi(tp, 1);
9355 } else if (netdev_mc_empty(dev)) {
9356 /* Reject all multicast. */
9357 tg3_set_multi(tp, 0);
9358 } else {
9359 /* Accept one or more multicast(s). */
9360 struct netdev_hw_addr *ha;
9361 u32 mc_filter[4] = { 0, };
9362 u32 regidx;
9363 u32 bit;
9364 u32 crc;
9365
9366 netdev_for_each_mc_addr(ha, dev) {
9367 crc = calc_crc(ha->addr, ETH_ALEN);
9368 bit = ~crc & 0x7f;
9369 regidx = (bit & 0x60) >> 5;
9370 bit &= 0x1f;
9371 mc_filter[regidx] |= (1 << bit);
9372 }
9373
9374 tw32(MAC_HASH_REG_0, mc_filter[0]);
9375 tw32(MAC_HASH_REG_1, mc_filter[1]);
9376 tw32(MAC_HASH_REG_2, mc_filter[2]);
9377 tw32(MAC_HASH_REG_3, mc_filter[3]);
9378 }
9379
9380 if (rx_mode != tp->rx_mode) {
9381 tp->rx_mode = rx_mode;
9382 tw32_f(MAC_RX_MODE, rx_mode);
9383 udelay(10);
9384 }
9385 }
9386
9387 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9388 {
9389 int i;
9390
9391 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9392 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9393 }
9394
9395 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9396 {
9397 int i;
9398
9399 if (!tg3_flag(tp, SUPPORT_MSIX))
9400 return;
9401
9402 if (tp->rxq_cnt == 1) {
9403 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9404 return;
9405 }
9406
9407 /* Validate table against current IRQ count */
9408 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9409 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9410 break;
9411 }
9412
9413 if (i != TG3_RSS_INDIR_TBL_SIZE)
9414 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9415 }
9416
9417 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9418 {
9419 int i = 0;
9420 u32 reg = MAC_RSS_INDIR_TBL_0;
9421
9422 while (i < TG3_RSS_INDIR_TBL_SIZE) {
9423 u32 val = tp->rss_ind_tbl[i];
9424 i++;
9425 for (; i % 8; i++) {
9426 val <<= 4;
9427 val |= tp->rss_ind_tbl[i];
9428 }
9429 tw32(reg, val);
9430 reg += 4;
9431 }
9432 }
9433
9434 /* tp->lock is held. */
9435 static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
9436 {
9437 u32 val, rdmac_mode;
9438 int i, err, limit;
9439 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9440
9441 tg3_disable_ints(tp);
9442
9443 tg3_stop_fw(tp);
9444
9445 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9446
9447 if (tg3_flag(tp, INIT_COMPLETE))
9448 tg3_abort_hw(tp, 1);
9449
9450 /* Enable MAC control of LPI */
9451 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
9452 val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
9453 TG3_CPMU_EEE_LNKIDL_UART_IDL;
9454 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
9455 val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
9456
9457 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
9458
9459 tw32_f(TG3_CPMU_EEE_CTRL,
9460 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
9461
9462 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
9463 TG3_CPMU_EEEMD_LPI_IN_TX |
9464 TG3_CPMU_EEEMD_LPI_IN_RX |
9465 TG3_CPMU_EEEMD_EEE_ENABLE;
9466
9467 if (tg3_asic_rev(tp) != ASIC_REV_5717)
9468 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
9469
9470 if (tg3_flag(tp, ENABLE_APE))
9471 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
9472
9473 tw32_f(TG3_CPMU_EEE_MODE, val);
9474
9475 tw32_f(TG3_CPMU_EEE_DBTMR1,
9476 TG3_CPMU_DBTMR1_PCIEXIT_2047US |
9477 TG3_CPMU_DBTMR1_LNKIDLE_2047US);
9478
9479 tw32_f(TG3_CPMU_EEE_DBTMR2,
9480 TG3_CPMU_DBTMR2_APE_TX_2047US |
9481 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
9482 }
9483
9484 if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
9485 !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) {
9486 tg3_phy_pull_config(tp);
9487 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
9488 }
9489
9490 if (reset_phy)
9491 tg3_phy_reset(tp);
9492
9493 err = tg3_chip_reset(tp);
9494 if (err)
9495 return err;
9496
9497 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9498
9499 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
9500 val = tr32(TG3_CPMU_CTRL);
9501 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9502 tw32(TG3_CPMU_CTRL, val);
9503
9504 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9505 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9506 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9507 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9508
9509 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9510 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9511 val |= CPMU_LNK_AWARE_MACCLK_6_25;
9512 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9513
9514 val = tr32(TG3_CPMU_HST_ACC);
9515 val &= ~CPMU_HST_ACC_MACCLK_MASK;
9516 val |= CPMU_HST_ACC_MACCLK_6_25;
9517 tw32(TG3_CPMU_HST_ACC, val);
9518 }
9519
9520 if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9521 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9522 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9523 PCIE_PWR_MGMT_L1_THRESH_4MS;
9524 tw32(PCIE_PWR_MGMT_THRESH, val);
9525
9526 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9527 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9528
9529 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9530
9531 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9532 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9533 }
9534
9535 if (tg3_flag(tp, L1PLLPD_EN)) {
9536 u32 grc_mode = tr32(GRC_MODE);
9537
9538 /* Access the lower 1K of PL PCIE block registers. */
9539 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9540 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9541
9542 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9543 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9544 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9545
9546 tw32(GRC_MODE, grc_mode);
9547 }
9548
9549 if (tg3_flag(tp, 57765_CLASS)) {
9550 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
9551 u32 grc_mode = tr32(GRC_MODE);
9552
9553 /* Access the lower 1K of PL PCIE block registers. */
9554 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9555 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9556
9557 val = tr32(TG3_PCIE_TLDLPL_PORT +
9558 TG3_PCIE_PL_LO_PHYCTL5);
9559 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9560 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
9561
9562 tw32(GRC_MODE, grc_mode);
9563 }
9564
9565 if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
9566 u32 grc_mode;
9567
9568 /* Fix transmit hangs */
9569 val = tr32(TG3_CPMU_PADRNG_CTL);
9570 val |= TG3_CPMU_PADRNG_CTL_RDIV2;
9571 tw32(TG3_CPMU_PADRNG_CTL, val);
9572
9573 grc_mode = tr32(GRC_MODE);
9574
9575 /* Access the lower 1K of DL PCIE block registers. */
9576 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9577 tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9578
9579 val = tr32(TG3_PCIE_TLDLPL_PORT +
9580 TG3_PCIE_DL_LO_FTSMAX);
9581 val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9582 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9583 val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9584
9585 tw32(GRC_MODE, grc_mode);
9586 }
9587
9588 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9589 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9590 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9591 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9592 }
9593
9594 /* This works around an issue with Athlon chipsets on
9595 * B3 tigon3 silicon. This bit has no effect on any
9596 * other revision. But do not set this on PCI Express
9597 * chips and don't even touch the clocks if the CPMU is present.
9598 */
9599 if (!tg3_flag(tp, CPMU_PRESENT)) {
9600 if (!tg3_flag(tp, PCI_EXPRESS))
9601 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
9602 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9603 }
9604
9605 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
9606 tg3_flag(tp, PCIX_MODE)) {
9607 val = tr32(TG3PCI_PCISTATE);
9608 val |= PCISTATE_RETRY_SAME_DMA;
9609 tw32(TG3PCI_PCISTATE, val);
9610 }
9611
9612 if (tg3_flag(tp, ENABLE_APE)) {
9613 /* Allow reads and writes to the
9614 * APE register and memory space.
9615 */
9616 val = tr32(TG3PCI_PCISTATE);
9617 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
9618 PCISTATE_ALLOW_APE_SHMEM_WR |
9619 PCISTATE_ALLOW_APE_PSPACE_WR;
9620 tw32(TG3PCI_PCISTATE, val);
9621 }
9622
9623 if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
9624 /* Enable some hw fixes. */
9625 val = tr32(TG3PCI_MSI_DATA);
9626 val |= (1 << 26) | (1 << 28) | (1 << 29);
9627 tw32(TG3PCI_MSI_DATA, val);
9628 }
9629
9630 /* Descriptor ring init may make accesses to the
9631 * NIC SRAM area to setup the TX descriptors, so we
9632 * can only do this after the hardware has been
9633 * successfully reset.
9634 */
9635 err = tg3_init_rings(tp);
9636 if (err)
9637 return err;
9638
9639 if (tg3_flag(tp, 57765_PLUS)) {
9640 val = tr32(TG3PCI_DMA_RW_CTRL) &
9641 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
9642 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
9643 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
9644 if (!tg3_flag(tp, 57765_CLASS) &&
9645 tg3_asic_rev(tp) != ASIC_REV_5717 &&
9646 tg3_asic_rev(tp) != ASIC_REV_5762)
9647 val |= DMA_RWCTRL_TAGGED_STAT_WA;
9648 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
9649 } else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
9650 tg3_asic_rev(tp) != ASIC_REV_5761) {
9651 /* This value is determined during the probe time DMA
9652 * engine test, tg3_test_dma.
9653 */
9654 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
9655 }
9656
9657 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
9658 GRC_MODE_4X_NIC_SEND_RINGS |
9659 GRC_MODE_NO_TX_PHDR_CSUM |
9660 GRC_MODE_NO_RX_PHDR_CSUM);
9661 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
9662
9663 /* Pseudo-header checksum is done by hardware logic and not
9664 * the offload processers, so make the chip do the pseudo-
9665 * header checksums on receive. For transmit it is more
9666 * convenient to do the pseudo-header checksum in software
9667 * as Linux does that on transmit for us in all cases.
9668 */
9669 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
9670
9671 val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
9672 if (tp->rxptpctl)
9673 tw32(TG3_RX_PTP_CTL,
9674 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
9675
9676 if (tg3_flag(tp, PTP_CAPABLE))
9677 val |= GRC_MODE_TIME_SYNC_ENABLE;
9678
9679 tw32(GRC_MODE, tp->grc_mode | val);
9680
9681 /* Setup the timer prescalar register. Clock is always 66Mhz. */
9682 val = tr32(GRC_MISC_CFG);
9683 val &= ~0xff;
9684 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
9685 tw32(GRC_MISC_CFG, val);
9686
9687 /* Initialize MBUF/DESC pool. */
9688 if (tg3_flag(tp, 5750_PLUS)) {
9689 /* Do nothing. */
9690 } else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
9691 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
9692 if (tg3_asic_rev(tp) == ASIC_REV_5704)
9693 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
9694 else
9695 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
9696 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
9697 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
9698 } else if (tg3_flag(tp, TSO_CAPABLE)) {
9699 int fw_len;
9700
9701 fw_len = tp->fw_len;
9702 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
9703 tw32(BUFMGR_MB_POOL_ADDR,
9704 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
9705 tw32(BUFMGR_MB_POOL_SIZE,
9706 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
9707 }
9708
9709 if (tp->dev->mtu <= ETH_DATA_LEN) {
9710 tw32(BUFMGR_MB_RDMA_LOW_WATER,
9711 tp->bufmgr_config.mbuf_read_dma_low_water);
9712 tw32(BUFMGR_MB_MACRX_LOW_WATER,
9713 tp->bufmgr_config.mbuf_mac_rx_low_water);
9714 tw32(BUFMGR_MB_HIGH_WATER,
9715 tp->bufmgr_config.mbuf_high_water);
9716 } else {
9717 tw32(BUFMGR_MB_RDMA_LOW_WATER,
9718 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
9719 tw32(BUFMGR_MB_MACRX_LOW_WATER,
9720 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
9721 tw32(BUFMGR_MB_HIGH_WATER,
9722 tp->bufmgr_config.mbuf_high_water_jumbo);
9723 }
9724 tw32(BUFMGR_DMA_LOW_WATER,
9725 tp->bufmgr_config.dma_low_water);
9726 tw32(BUFMGR_DMA_HIGH_WATER,
9727 tp->bufmgr_config.dma_high_water);
9728
9729 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
9730 if (tg3_asic_rev(tp) == ASIC_REV_5719)
9731 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
9732 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
9733 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9734 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
9735 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
9736 tw32(BUFMGR_MODE, val);
9737 for (i = 0; i < 2000; i++) {
9738 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
9739 break;
9740 udelay(10);
9741 }
9742 if (i >= 2000) {
9743 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
9744 return -ENODEV;
9745 }
9746
9747 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
9748 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
9749
9750 tg3_setup_rxbd_thresholds(tp);
9751
9752 /* Initialize TG3_BDINFO's at:
9753 * RCVDBDI_STD_BD: standard eth size rx ring
9754 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
9755 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
9756 *
9757 * like so:
9758 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
9759 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
9760 * ring attribute flags
9761 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
9762 *
9763 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
9764 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
9765 *
9766 * The size of each ring is fixed in the firmware, but the location is
9767 * configurable.
9768 */
9769 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9770 ((u64) tpr->rx_std_mapping >> 32));
9771 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9772 ((u64) tpr->rx_std_mapping & 0xffffffff));
9773 if (!tg3_flag(tp, 5717_PLUS))
9774 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
9775 NIC_SRAM_RX_BUFFER_DESC);
9776
9777 /* Disable the mini ring */
9778 if (!tg3_flag(tp, 5705_PLUS))
9779 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
9780 BDINFO_FLAGS_DISABLED);
9781
9782 /* Program the jumbo buffer descriptor ring control
9783 * blocks on those devices that have them.
9784 */
9785 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9786 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
9787
9788 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
9789 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9790 ((u64) tpr->rx_jmb_mapping >> 32));
9791 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9792 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
9793 val = TG3_RX_JMB_RING_SIZE(tp) <<
9794 BDINFO_FLAGS_MAXLEN_SHIFT;
9795 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9796 val | BDINFO_FLAGS_USE_EXT_RECV);
9797 if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
9798 tg3_flag(tp, 57765_CLASS) ||
9799 tg3_asic_rev(tp) == ASIC_REV_5762)
9800 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
9801 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
9802 } else {
9803 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9804 BDINFO_FLAGS_DISABLED);
9805 }
9806
9807 if (tg3_flag(tp, 57765_PLUS)) {
9808 val = TG3_RX_STD_RING_SIZE(tp);
9809 val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
9810 val |= (TG3_RX_STD_DMA_SZ << 2);
9811 } else
9812 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
9813 } else
9814 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
9815
9816 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
9817
9818 tpr->rx_std_prod_idx = tp->rx_pending;
9819 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
9820
9821 tpr->rx_jmb_prod_idx =
9822 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
9823 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
9824
9825 tg3_rings_reset(tp);
9826
9827 /* Initialize MAC address and backoff seed. */
9828 __tg3_set_mac_addr(tp, false);
9829
9830 /* MTU + ethernet header + FCS + optional VLAN tag */
9831 tw32(MAC_RX_MTU_SIZE,
9832 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
9833
9834 /* The slot time is changed by tg3_setup_phy if we
9835 * run at gigabit with half duplex.
9836 */
9837 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
9838 (6 << TX_LENGTHS_IPG_SHIFT) |
9839 (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
9840
9841 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
9842 tg3_asic_rev(tp) == ASIC_REV_5762)
9843 val |= tr32(MAC_TX_LENGTHS) &
9844 (TX_LENGTHS_JMB_FRM_LEN_MSK |
9845 TX_LENGTHS_CNT_DWN_VAL_MSK);
9846
9847 tw32(MAC_TX_LENGTHS, val);
9848
9849 /* Receive rules. */
9850 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
9851 tw32(RCVLPC_CONFIG, 0x0181);
9852
9853 /* Calculate RDMAC_MODE setting early, we need it to determine
9854 * the RCVLPC_STATE_ENABLE mask.
9855 */
9856 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
9857 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
9858 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
9859 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
9860 RDMAC_MODE_LNGREAD_ENAB);
9861
9862 if (tg3_asic_rev(tp) == ASIC_REV_5717)
9863 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
9864
9865 if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
9866 tg3_asic_rev(tp) == ASIC_REV_5785 ||
9867 tg3_asic_rev(tp) == ASIC_REV_57780)
9868 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
9869 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
9870 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
9871
9872 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
9873 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
9874 if (tg3_flag(tp, TSO_CAPABLE) &&
9875 tg3_asic_rev(tp) == ASIC_REV_5705) {
9876 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
9877 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9878 !tg3_flag(tp, IS_5788)) {
9879 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9880 }
9881 }
9882
9883 if (tg3_flag(tp, PCI_EXPRESS))
9884 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9885
9886 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
9887 tp->dma_limit = 0;
9888 if (tp->dev->mtu <= ETH_DATA_LEN) {
9889 rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
9890 tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
9891 }
9892 }
9893
9894 if (tg3_flag(tp, HW_TSO_1) ||
9895 tg3_flag(tp, HW_TSO_2) ||
9896 tg3_flag(tp, HW_TSO_3))
9897 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
9898
9899 if (tg3_flag(tp, 57765_PLUS) ||
9900 tg3_asic_rev(tp) == ASIC_REV_5785 ||
9901 tg3_asic_rev(tp) == ASIC_REV_57780)
9902 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
9903
9904 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
9905 tg3_asic_rev(tp) == ASIC_REV_5762)
9906 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
9907
9908 if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
9909 tg3_asic_rev(tp) == ASIC_REV_5784 ||
9910 tg3_asic_rev(tp) == ASIC_REV_5785 ||
9911 tg3_asic_rev(tp) == ASIC_REV_57780 ||
9912 tg3_flag(tp, 57765_PLUS)) {
9913 u32 tgtreg;
9914
9915 if (tg3_asic_rev(tp) == ASIC_REV_5762)
9916 tgtreg = TG3_RDMA_RSRVCTRL_REG2;
9917 else
9918 tgtreg = TG3_RDMA_RSRVCTRL_REG;
9919
9920 val = tr32(tgtreg);
9921 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9922 tg3_asic_rev(tp) == ASIC_REV_5762) {
9923 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
9924 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
9925 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
9926 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
9927 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
9928 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
9929 }
9930 tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
9931 }
9932
9933 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
9934 tg3_asic_rev(tp) == ASIC_REV_5720 ||
9935 tg3_asic_rev(tp) == ASIC_REV_5762) {
9936 u32 tgtreg;
9937
9938 if (tg3_asic_rev(tp) == ASIC_REV_5762)
9939 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
9940 else
9941 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
9942
9943 val = tr32(tgtreg);
9944 tw32(tgtreg, val |
9945 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
9946 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
9947 }
9948
9949 /* Receive/send statistics. */
9950 if (tg3_flag(tp, 5750_PLUS)) {
9951 val = tr32(RCVLPC_STATS_ENABLE);
9952 val &= ~RCVLPC_STATSENAB_DACK_FIX;
9953 tw32(RCVLPC_STATS_ENABLE, val);
9954 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
9955 tg3_flag(tp, TSO_CAPABLE)) {
9956 val = tr32(RCVLPC_STATS_ENABLE);
9957 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
9958 tw32(RCVLPC_STATS_ENABLE, val);
9959 } else {
9960 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
9961 }
9962 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
9963 tw32(SNDDATAI_STATSENAB, 0xffffff);
9964 tw32(SNDDATAI_STATSCTRL,
9965 (SNDDATAI_SCTRL_ENABLE |
9966 SNDDATAI_SCTRL_FASTUPD));
9967
9968 /* Setup host coalescing engine. */
9969 tw32(HOSTCC_MODE, 0);
9970 for (i = 0; i < 2000; i++) {
9971 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
9972 break;
9973 udelay(10);
9974 }
9975
9976 __tg3_set_coalesce(tp, &tp->coal);
9977
9978 if (!tg3_flag(tp, 5705_PLUS)) {
9979 /* Status/statistics block address. See tg3_timer,
9980 * the tg3_periodic_fetch_stats call there, and
9981 * tg3_get_stats to see how this works for 5705/5750 chips.
9982 */
9983 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9984 ((u64) tp->stats_mapping >> 32));
9985 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9986 ((u64) tp->stats_mapping & 0xffffffff));
9987 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
9988
9989 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
9990
9991 /* Clear statistics and status block memory areas */
9992 for (i = NIC_SRAM_STATS_BLK;
9993 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
9994 i += sizeof(u32)) {
9995 tg3_write_mem(tp, i, 0);
9996 udelay(40);
9997 }
9998 }
9999
10000 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
10001
10002 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
10003 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
10004 if (!tg3_flag(tp, 5705_PLUS))
10005 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
10006
10007 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
10008 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
10009 /* reset to prevent losing 1st rx packet intermittently */
10010 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10011 udelay(10);
10012 }
10013
10014 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
10015 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
10016 MAC_MODE_FHDE_ENABLE;
10017 if (tg3_flag(tp, ENABLE_APE))
10018 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
10019 if (!tg3_flag(tp, 5705_PLUS) &&
10020 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10021 tg3_asic_rev(tp) != ASIC_REV_5700)
10022 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
10023 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
10024 udelay(40);
10025
10026 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
10027 * If TG3_FLAG_IS_NIC is zero, we should read the
10028 * register to preserve the GPIO settings for LOMs. The GPIOs,
10029 * whether used as inputs or outputs, are set by boot code after
10030 * reset.
10031 */
10032 if (!tg3_flag(tp, IS_NIC)) {
10033 u32 gpio_mask;
10034
10035 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
10036 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
10037 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
10038
10039 if (tg3_asic_rev(tp) == ASIC_REV_5752)
10040 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
10041 GRC_LCLCTRL_GPIO_OUTPUT3;
10042
10043 if (tg3_asic_rev(tp) == ASIC_REV_5755)
10044 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
10045
10046 tp->grc_local_ctrl &= ~gpio_mask;
10047 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
10048
10049 /* GPIO1 must be driven high for eeprom write protect */
10050 if (tg3_flag(tp, EEPROM_WRITE_PROT))
10051 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10052 GRC_LCLCTRL_GPIO_OUTPUT1);
10053 }
10054 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10055 udelay(100);
10056
10057 if (tg3_flag(tp, USING_MSIX)) {
10058 val = tr32(MSGINT_MODE);
10059 val |= MSGINT_MODE_ENABLE;
10060 if (tp->irq_cnt > 1)
10061 val |= MSGINT_MODE_MULTIVEC_EN;
10062 if (!tg3_flag(tp, 1SHOT_MSI))
10063 val |= MSGINT_MODE_ONE_SHOT_DISABLE;
10064 tw32(MSGINT_MODE, val);
10065 }
10066
10067 if (!tg3_flag(tp, 5705_PLUS)) {
10068 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
10069 udelay(40);
10070 }
10071
10072 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
10073 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
10074 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
10075 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
10076 WDMAC_MODE_LNGREAD_ENAB);
10077
10078 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10079 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10080 if (tg3_flag(tp, TSO_CAPABLE) &&
10081 (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
10082 tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
10083 /* nothing */
10084 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10085 !tg3_flag(tp, IS_5788)) {
10086 val |= WDMAC_MODE_RX_ACCEL;
10087 }
10088 }
10089
10090 /* Enable host coalescing bug fix */
10091 if (tg3_flag(tp, 5755_PLUS))
10092 val |= WDMAC_MODE_STATUS_TAG_FIX;
10093
10094 if (tg3_asic_rev(tp) == ASIC_REV_5785)
10095 val |= WDMAC_MODE_BURST_ALL_DATA;
10096
10097 tw32_f(WDMAC_MODE, val);
10098 udelay(40);
10099
10100 if (tg3_flag(tp, PCIX_MODE)) {
10101 u16 pcix_cmd;
10102
10103 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10104 &pcix_cmd);
10105 if (tg3_asic_rev(tp) == ASIC_REV_5703) {
10106 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
10107 pcix_cmd |= PCI_X_CMD_READ_2K;
10108 } else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
10109 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
10110 pcix_cmd |= PCI_X_CMD_READ_2K;
10111 }
10112 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10113 pcix_cmd);
10114 }
10115
10116 tw32_f(RDMAC_MODE, rdmac_mode);
10117 udelay(40);
10118
10119 if (tg3_asic_rev(tp) == ASIC_REV_5719) {
10120 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
10121 if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
10122 break;
10123 }
10124 if (i < TG3_NUM_RDMA_CHANNELS) {
10125 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10126 val |= TG3_LSO_RD_DMA_TX_LENGTH_WA;
10127 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10128 tg3_flag_set(tp, 5719_RDMA_BUG);
10129 }
10130 }
10131
10132 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
10133 if (!tg3_flag(tp, 5705_PLUS))
10134 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
10135
10136 if (tg3_asic_rev(tp) == ASIC_REV_5761)
10137 tw32(SNDDATAC_MODE,
10138 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
10139 else
10140 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
10141
10142 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
10143 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
10144 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
10145 if (tg3_flag(tp, LRG_PROD_RING_CAP))
10146 val |= RCVDBDI_MODE_LRG_RING_SZ;
10147 tw32(RCVDBDI_MODE, val);
10148 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
10149 if (tg3_flag(tp, HW_TSO_1) ||
10150 tg3_flag(tp, HW_TSO_2) ||
10151 tg3_flag(tp, HW_TSO_3))
10152 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
10153 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
10154 if (tg3_flag(tp, ENABLE_TSS))
10155 val |= SNDBDI_MODE_MULTI_TXQ_EN;
10156 tw32(SNDBDI_MODE, val);
10157 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
10158
10159 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
10160 err = tg3_load_5701_a0_firmware_fix(tp);
10161 if (err)
10162 return err;
10163 }
10164
10165 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10166 /* Ignore any errors for the firmware download. If download
10167 * fails, the device will operate with EEE disabled
10168 */
10169 tg3_load_57766_firmware(tp);
10170 }
10171
10172 if (tg3_flag(tp, TSO_CAPABLE)) {
10173 err = tg3_load_tso_firmware(tp);
10174 if (err)
10175 return err;
10176 }
10177
10178 tp->tx_mode = TX_MODE_ENABLE;
10179
10180 if (tg3_flag(tp, 5755_PLUS) ||
10181 tg3_asic_rev(tp) == ASIC_REV_5906)
10182 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
10183
10184 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10185 tg3_asic_rev(tp) == ASIC_REV_5762) {
10186 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
10187 tp->tx_mode &= ~val;
10188 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
10189 }
10190
10191 tw32_f(MAC_TX_MODE, tp->tx_mode);
10192 udelay(100);
10193
10194 if (tg3_flag(tp, ENABLE_RSS)) {
10195 tg3_rss_write_indir_tbl(tp);
10196
10197 /* Setup the "secret" hash key. */
10198 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
10199 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
10200 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
10201 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
10202 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
10203 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
10204 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
10205 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
10206 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
10207 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
10208 }
10209
10210 tp->rx_mode = RX_MODE_ENABLE;
10211 if (tg3_flag(tp, 5755_PLUS))
10212 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
10213
10214 if (tg3_flag(tp, ENABLE_RSS))
10215 tp->rx_mode |= RX_MODE_RSS_ENABLE |
10216 RX_MODE_RSS_ITBL_HASH_BITS_7 |
10217 RX_MODE_RSS_IPV6_HASH_EN |
10218 RX_MODE_RSS_TCP_IPV6_HASH_EN |
10219 RX_MODE_RSS_IPV4_HASH_EN |
10220 RX_MODE_RSS_TCP_IPV4_HASH_EN;
10221
10222 tw32_f(MAC_RX_MODE, tp->rx_mode);
10223 udelay(10);
10224
10225 tw32(MAC_LED_CTRL, tp->led_ctrl);
10226
10227 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
10228 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10229 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10230 udelay(10);
10231 }
10232 tw32_f(MAC_RX_MODE, tp->rx_mode);
10233 udelay(10);
10234
10235 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10236 if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
10237 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
10238 /* Set drive transmission level to 1.2V */
10239 /* only if the signal pre-emphasis bit is not set */
10240 val = tr32(MAC_SERDES_CFG);
10241 val &= 0xfffff000;
10242 val |= 0x880;
10243 tw32(MAC_SERDES_CFG, val);
10244 }
10245 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
10246 tw32(MAC_SERDES_CFG, 0x616000);
10247 }
10248
10249 /* Prevent chip from dropping frames when flow control
10250 * is enabled.
10251 */
10252 if (tg3_flag(tp, 57765_CLASS))
10253 val = 1;
10254 else
10255 val = 2;
10256 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
10257
10258 if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
10259 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
10260 /* Use hardware link auto-negotiation */
10261 tg3_flag_set(tp, HW_AUTONEG);
10262 }
10263
10264 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10265 tg3_asic_rev(tp) == ASIC_REV_5714) {
10266 u32 tmp;
10267
10268 tmp = tr32(SERDES_RX_CTRL);
10269 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
10270 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
10271 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
10272 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10273 }
10274
10275 if (!tg3_flag(tp, USE_PHYLIB)) {
10276 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10277 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
10278
10279 err = tg3_setup_phy(tp, false);
10280 if (err)
10281 return err;
10282
10283 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10284 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
10285 u32 tmp;
10286
10287 /* Clear CRC stats. */
10288 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
10289 tg3_writephy(tp, MII_TG3_TEST1,
10290 tmp | MII_TG3_TEST1_CRC_EN);
10291 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
10292 }
10293 }
10294 }
10295
10296 __tg3_set_rx_mode(tp->dev);
10297
10298 /* Initialize receive rules. */
10299 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
10300 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
10301 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
10302 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
10303
10304 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
10305 limit = 8;
10306 else
10307 limit = 16;
10308 if (tg3_flag(tp, ENABLE_ASF))
10309 limit -= 4;
10310 switch (limit) {
10311 case 16:
10312 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
10313 case 15:
10314 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
10315 case 14:
10316 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
10317 case 13:
10318 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
10319 case 12:
10320 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
10321 case 11:
10322 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
10323 case 10:
10324 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
10325 case 9:
10326 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
10327 case 8:
10328 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
10329 case 7:
10330 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
10331 case 6:
10332 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
10333 case 5:
10334 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
10335 case 4:
10336 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
10337 case 3:
10338 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
10339 case 2:
10340 case 1:
10341
10342 default:
10343 break;
10344 }
10345
10346 if (tg3_flag(tp, ENABLE_APE))
10347 /* Write our heartbeat update interval to APE. */
10348 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
10349 APE_HOST_HEARTBEAT_INT_DISABLE);
10350
10351 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
10352
10353 return 0;
10354 }
10355
10356 /* Called at device open time to get the chip ready for
10357 * packet processing. Invoked with tp->lock held.
10358 */
10359 static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
10360 {
10361 tg3_switch_clocks(tp);
10362
10363 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10364
10365 return tg3_reset_hw(tp, reset_phy);
10366 }
10367
10368 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10369 {
10370 int i;
10371
10372 for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
10373 u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
10374
10375 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
10376 off += len;
10377
10378 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
10379 !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
10380 memset(ocir, 0, TG3_OCIR_LEN);
10381 }
10382 }
10383
10384 /* sysfs attributes for hwmon */
10385 static ssize_t tg3_show_temp(struct device *dev,
10386 struct device_attribute *devattr, char *buf)
10387 {
10388 struct pci_dev *pdev = to_pci_dev(dev);
10389 struct net_device *netdev = pci_get_drvdata(pdev);
10390 struct tg3 *tp = netdev_priv(netdev);
10391 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10392 u32 temperature;
10393
10394 spin_lock_bh(&tp->lock);
10395 tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10396 sizeof(temperature));
10397 spin_unlock_bh(&tp->lock);
10398 return sprintf(buf, "%u\n", temperature);
10399 }
10400
10401
10402 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL,
10403 TG3_TEMP_SENSOR_OFFSET);
10404 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL,
10405 TG3_TEMP_CAUTION_OFFSET);
10406 static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL,
10407 TG3_TEMP_MAX_OFFSET);
10408
10409 static struct attribute *tg3_attributes[] = {
10410 &sensor_dev_attr_temp1_input.dev_attr.attr,
10411 &sensor_dev_attr_temp1_crit.dev_attr.attr,
10412 &sensor_dev_attr_temp1_max.dev_attr.attr,
10413 NULL
10414 };
10415
10416 static const struct attribute_group tg3_group = {
10417 .attrs = tg3_attributes,
10418 };
10419
10420 static void tg3_hwmon_close(struct tg3 *tp)
10421 {
10422 if (tp->hwmon_dev) {
10423 hwmon_device_unregister(tp->hwmon_dev);
10424 tp->hwmon_dev = NULL;
10425 sysfs_remove_group(&tp->pdev->dev.kobj, &tg3_group);
10426 }
10427 }
10428
10429 static void tg3_hwmon_open(struct tg3 *tp)
10430 {
10431 int i, err;
10432 u32 size = 0;
10433 struct pci_dev *pdev = tp->pdev;
10434 struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10435
10436 tg3_sd_scan_scratchpad(tp, ocirs);
10437
10438 for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10439 if (!ocirs[i].src_data_length)
10440 continue;
10441
10442 size += ocirs[i].src_hdr_length;
10443 size += ocirs[i].src_data_length;
10444 }
10445
10446 if (!size)
10447 return;
10448
10449 /* Register hwmon sysfs hooks */
10450 err = sysfs_create_group(&pdev->dev.kobj, &tg3_group);
10451 if (err) {
10452 dev_err(&pdev->dev, "Cannot create sysfs group, aborting\n");
10453 return;
10454 }
10455
10456 tp->hwmon_dev = hwmon_device_register(&pdev->dev);
10457 if (IS_ERR(tp->hwmon_dev)) {
10458 tp->hwmon_dev = NULL;
10459 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10460 sysfs_remove_group(&pdev->dev.kobj, &tg3_group);
10461 }
10462 }
10463
10464
10465 #define TG3_STAT_ADD32(PSTAT, REG) \
10466 do { u32 __val = tr32(REG); \
10467 (PSTAT)->low += __val; \
10468 if ((PSTAT)->low < __val) \
10469 (PSTAT)->high += 1; \
10470 } while (0)
10471
10472 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10473 {
10474 struct tg3_hw_stats *sp = tp->hw_stats;
10475
10476 if (!tp->link_up)
10477 return;
10478
10479 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10480 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10481 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10482 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10483 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10484 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10485 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10486 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10487 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10488 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10489 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10490 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10491 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10492 if (unlikely(tg3_flag(tp, 5719_RDMA_BUG) &&
10493 (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10494 sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10495 u32 val;
10496
10497 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10498 val &= ~TG3_LSO_RD_DMA_TX_LENGTH_WA;
10499 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10500 tg3_flag_clear(tp, 5719_RDMA_BUG);
10501 }
10502
10503 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10504 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10505 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10506 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10507 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10508 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10509 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10510 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10511 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10512 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10513 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10514 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10515 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
10516 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
10517
10518 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
10519 if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
10520 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
10521 tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
10522 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10523 } else {
10524 u32 val = tr32(HOSTCC_FLOW_ATTN);
10525 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10526 if (val) {
10527 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10528 sp->rx_discards.low += val;
10529 if (sp->rx_discards.low < val)
10530 sp->rx_discards.high += 1;
10531 }
10532 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10533 }
10534 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
10535 }
10536
10537 static void tg3_chk_missed_msi(struct tg3 *tp)
10538 {
10539 u32 i;
10540
10541 for (i = 0; i < tp->irq_cnt; i++) {
10542 struct tg3_napi *tnapi = &tp->napi[i];
10543
10544 if (tg3_has_work(tnapi)) {
10545 if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10546 tnapi->last_tx_cons == tnapi->tx_cons) {
10547 if (tnapi->chk_msi_cnt < 1) {
10548 tnapi->chk_msi_cnt++;
10549 return;
10550 }
10551 tg3_msi(0, tnapi);
10552 }
10553 }
10554 tnapi->chk_msi_cnt = 0;
10555 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10556 tnapi->last_tx_cons = tnapi->tx_cons;
10557 }
10558 }
10559
10560 static void tg3_timer(unsigned long __opaque)
10561 {
10562 struct tg3 *tp = (struct tg3 *) __opaque;
10563
10564 if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
10565 goto restart_timer;
10566
10567 spin_lock(&tp->lock);
10568
10569 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10570 tg3_flag(tp, 57765_CLASS))
10571 tg3_chk_missed_msi(tp);
10572
10573 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
10574 /* BCM4785: Flush posted writes from GbE to host memory. */
10575 tr32(HOSTCC_MODE);
10576 }
10577
10578 if (!tg3_flag(tp, TAGGED_STATUS)) {
10579 /* All of this garbage is because when using non-tagged
10580 * IRQ status the mailbox/status_block protocol the chip
10581 * uses with the cpu is race prone.
10582 */
10583 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
10584 tw32(GRC_LOCAL_CTRL,
10585 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
10586 } else {
10587 tw32(HOSTCC_MODE, tp->coalesce_mode |
10588 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
10589 }
10590
10591 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10592 spin_unlock(&tp->lock);
10593 tg3_reset_task_schedule(tp);
10594 goto restart_timer;
10595 }
10596 }
10597
10598 /* This part only runs once per second. */
10599 if (!--tp->timer_counter) {
10600 if (tg3_flag(tp, 5705_PLUS))
10601 tg3_periodic_fetch_stats(tp);
10602
10603 if (tp->setlpicnt && !--tp->setlpicnt)
10604 tg3_phy_eee_enable(tp);
10605
10606 if (tg3_flag(tp, USE_LINKCHG_REG)) {
10607 u32 mac_stat;
10608 int phy_event;
10609
10610 mac_stat = tr32(MAC_STATUS);
10611
10612 phy_event = 0;
10613 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
10614 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
10615 phy_event = 1;
10616 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
10617 phy_event = 1;
10618
10619 if (phy_event)
10620 tg3_setup_phy(tp, false);
10621 } else if (tg3_flag(tp, POLL_SERDES)) {
10622 u32 mac_stat = tr32(MAC_STATUS);
10623 int need_setup = 0;
10624
10625 if (tp->link_up &&
10626 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
10627 need_setup = 1;
10628 }
10629 if (!tp->link_up &&
10630 (mac_stat & (MAC_STATUS_PCS_SYNCED |
10631 MAC_STATUS_SIGNAL_DET))) {
10632 need_setup = 1;
10633 }
10634 if (need_setup) {
10635 if (!tp->serdes_counter) {
10636 tw32_f(MAC_MODE,
10637 (tp->mac_mode &
10638 ~MAC_MODE_PORT_MODE_MASK));
10639 udelay(40);
10640 tw32_f(MAC_MODE, tp->mac_mode);
10641 udelay(40);
10642 }
10643 tg3_setup_phy(tp, false);
10644 }
10645 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10646 tg3_flag(tp, 5780_CLASS)) {
10647 tg3_serdes_parallel_detect(tp);
10648 }
10649
10650 tp->timer_counter = tp->timer_multiplier;
10651 }
10652
10653 /* Heartbeat is only sent once every 2 seconds.
10654 *
10655 * The heartbeat is to tell the ASF firmware that the host
10656 * driver is still alive. In the event that the OS crashes,
10657 * ASF needs to reset the hardware to free up the FIFO space
10658 * that may be filled with rx packets destined for the host.
10659 * If the FIFO is full, ASF will no longer function properly.
10660 *
10661 * Unintended resets have been reported on real time kernels
10662 * where the timer doesn't run on time. Netpoll will also have
10663 * same problem.
10664 *
10665 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
10666 * to check the ring condition when the heartbeat is expiring
10667 * before doing the reset. This will prevent most unintended
10668 * resets.
10669 */
10670 if (!--tp->asf_counter) {
10671 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
10672 tg3_wait_for_event_ack(tp);
10673
10674 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
10675 FWCMD_NICDRV_ALIVE3);
10676 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
10677 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
10678 TG3_FW_UPDATE_TIMEOUT_SEC);
10679
10680 tg3_generate_fw_event(tp);
10681 }
10682 tp->asf_counter = tp->asf_multiplier;
10683 }
10684
10685 spin_unlock(&tp->lock);
10686
10687 restart_timer:
10688 tp->timer.expires = jiffies + tp->timer_offset;
10689 add_timer(&tp->timer);
10690 }
10691
10692 static void tg3_timer_init(struct tg3 *tp)
10693 {
10694 if (tg3_flag(tp, TAGGED_STATUS) &&
10695 tg3_asic_rev(tp) != ASIC_REV_5717 &&
10696 !tg3_flag(tp, 57765_CLASS))
10697 tp->timer_offset = HZ;
10698 else
10699 tp->timer_offset = HZ / 10;
10700
10701 BUG_ON(tp->timer_offset > HZ);
10702
10703 tp->timer_multiplier = (HZ / tp->timer_offset);
10704 tp->asf_multiplier = (HZ / tp->timer_offset) *
10705 TG3_FW_UPDATE_FREQ_SEC;
10706
10707 init_timer(&tp->timer);
10708 tp->timer.data = (unsigned long) tp;
10709 tp->timer.function = tg3_timer;
10710 }
10711
10712 static void tg3_timer_start(struct tg3 *tp)
10713 {
10714 tp->asf_counter = tp->asf_multiplier;
10715 tp->timer_counter = tp->timer_multiplier;
10716
10717 tp->timer.expires = jiffies + tp->timer_offset;
10718 add_timer(&tp->timer);
10719 }
10720
10721 static void tg3_timer_stop(struct tg3 *tp)
10722 {
10723 del_timer_sync(&tp->timer);
10724 }
10725
10726 /* Restart hardware after configuration changes, self-test, etc.
10727 * Invoked with tp->lock held.
10728 */
10729 static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
10730 __releases(tp->lock)
10731 __acquires(tp->lock)
10732 {
10733 int err;
10734
10735 err = tg3_init_hw(tp, reset_phy);
10736 if (err) {
10737 netdev_err(tp->dev,
10738 "Failed to re-initialize device, aborting\n");
10739 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10740 tg3_full_unlock(tp);
10741 tg3_timer_stop(tp);
10742 tp->irq_sync = 0;
10743 tg3_napi_enable(tp);
10744 dev_close(tp->dev);
10745 tg3_full_lock(tp, 0);
10746 }
10747 return err;
10748 }
10749
10750 static void tg3_reset_task(struct work_struct *work)
10751 {
10752 struct tg3 *tp = container_of(work, struct tg3, reset_task);
10753 int err;
10754
10755 tg3_full_lock(tp, 0);
10756
10757 if (!netif_running(tp->dev)) {
10758 tg3_flag_clear(tp, RESET_TASK_PENDING);
10759 tg3_full_unlock(tp);
10760 return;
10761 }
10762
10763 tg3_full_unlock(tp);
10764
10765 tg3_phy_stop(tp);
10766
10767 tg3_netif_stop(tp);
10768
10769 tg3_full_lock(tp, 1);
10770
10771 if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
10772 tp->write32_tx_mbox = tg3_write32_tx_mbox;
10773 tp->write32_rx_mbox = tg3_write_flush_reg32;
10774 tg3_flag_set(tp, MBOX_WRITE_REORDER);
10775 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
10776 }
10777
10778 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
10779 err = tg3_init_hw(tp, true);
10780 if (err)
10781 goto out;
10782
10783 tg3_netif_start(tp);
10784
10785 out:
10786 tg3_full_unlock(tp);
10787
10788 if (!err)
10789 tg3_phy_start(tp);
10790
10791 tg3_flag_clear(tp, RESET_TASK_PENDING);
10792 }
10793
10794 static int tg3_request_irq(struct tg3 *tp, int irq_num)
10795 {
10796 irq_handler_t fn;
10797 unsigned long flags;
10798 char *name;
10799 struct tg3_napi *tnapi = &tp->napi[irq_num];
10800
10801 if (tp->irq_cnt == 1)
10802 name = tp->dev->name;
10803 else {
10804 name = &tnapi->irq_lbl[0];
10805 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
10806 name[IFNAMSIZ-1] = 0;
10807 }
10808
10809 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
10810 fn = tg3_msi;
10811 if (tg3_flag(tp, 1SHOT_MSI))
10812 fn = tg3_msi_1shot;
10813 flags = 0;
10814 } else {
10815 fn = tg3_interrupt;
10816 if (tg3_flag(tp, TAGGED_STATUS))
10817 fn = tg3_interrupt_tagged;
10818 flags = IRQF_SHARED;
10819 }
10820
10821 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
10822 }
10823
10824 static int tg3_test_interrupt(struct tg3 *tp)
10825 {
10826 struct tg3_napi *tnapi = &tp->napi[0];
10827 struct net_device *dev = tp->dev;
10828 int err, i, intr_ok = 0;
10829 u32 val;
10830
10831 if (!netif_running(dev))
10832 return -ENODEV;
10833
10834 tg3_disable_ints(tp);
10835
10836 free_irq(tnapi->irq_vec, tnapi);
10837
10838 /*
10839 * Turn off MSI one shot mode. Otherwise this test has no
10840 * observable way to know whether the interrupt was delivered.
10841 */
10842 if (tg3_flag(tp, 57765_PLUS)) {
10843 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
10844 tw32(MSGINT_MODE, val);
10845 }
10846
10847 err = request_irq(tnapi->irq_vec, tg3_test_isr,
10848 IRQF_SHARED, dev->name, tnapi);
10849 if (err)
10850 return err;
10851
10852 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
10853 tg3_enable_ints(tp);
10854
10855 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10856 tnapi->coal_now);
10857
10858 for (i = 0; i < 5; i++) {
10859 u32 int_mbox, misc_host_ctrl;
10860
10861 int_mbox = tr32_mailbox(tnapi->int_mbox);
10862 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
10863
10864 if ((int_mbox != 0) ||
10865 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
10866 intr_ok = 1;
10867 break;
10868 }
10869
10870 if (tg3_flag(tp, 57765_PLUS) &&
10871 tnapi->hw_status->status_tag != tnapi->last_tag)
10872 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
10873
10874 msleep(10);
10875 }
10876
10877 tg3_disable_ints(tp);
10878
10879 free_irq(tnapi->irq_vec, tnapi);
10880
10881 err = tg3_request_irq(tp, 0);
10882
10883 if (err)
10884 return err;
10885
10886 if (intr_ok) {
10887 /* Reenable MSI one shot mode. */
10888 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
10889 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
10890 tw32(MSGINT_MODE, val);
10891 }
10892 return 0;
10893 }
10894
10895 return -EIO;
10896 }
10897
10898 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
10899 * successfully restored
10900 */
10901 static int tg3_test_msi(struct tg3 *tp)
10902 {
10903 int err;
10904 u16 pci_cmd;
10905
10906 if (!tg3_flag(tp, USING_MSI))
10907 return 0;
10908
10909 /* Turn off SERR reporting in case MSI terminates with Master
10910 * Abort.
10911 */
10912 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10913 pci_write_config_word(tp->pdev, PCI_COMMAND,
10914 pci_cmd & ~PCI_COMMAND_SERR);
10915
10916 err = tg3_test_interrupt(tp);
10917
10918 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10919
10920 if (!err)
10921 return 0;
10922
10923 /* other failures */
10924 if (err != -EIO)
10925 return err;
10926
10927 /* MSI test failed, go back to INTx mode */
10928 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
10929 "to INTx mode. Please report this failure to the PCI "
10930 "maintainer and include system chipset information\n");
10931
10932 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
10933
10934 pci_disable_msi(tp->pdev);
10935
10936 tg3_flag_clear(tp, USING_MSI);
10937 tp->napi[0].irq_vec = tp->pdev->irq;
10938
10939 err = tg3_request_irq(tp, 0);
10940 if (err)
10941 return err;
10942
10943 /* Need to reset the chip because the MSI cycle may have terminated
10944 * with Master Abort.
10945 */
10946 tg3_full_lock(tp, 1);
10947
10948 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10949 err = tg3_init_hw(tp, true);
10950
10951 tg3_full_unlock(tp);
10952
10953 if (err)
10954 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
10955
10956 return err;
10957 }
10958
10959 static int tg3_request_firmware(struct tg3 *tp)
10960 {
10961 const struct tg3_firmware_hdr *fw_hdr;
10962
10963 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
10964 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
10965 tp->fw_needed);
10966 return -ENOENT;
10967 }
10968
10969 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
10970
10971 /* Firmware blob starts with version numbers, followed by
10972 * start address and _full_ length including BSS sections
10973 * (which must be longer than the actual data, of course
10974 */
10975
10976 tp->fw_len = be32_to_cpu(fw_hdr->len); /* includes bss */
10977 if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
10978 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
10979 tp->fw_len, tp->fw_needed);
10980 release_firmware(tp->fw);
10981 tp->fw = NULL;
10982 return -EINVAL;
10983 }
10984
10985 /* We no longer need firmware; we have it. */
10986 tp->fw_needed = NULL;
10987 return 0;
10988 }
10989
10990 static u32 tg3_irq_count(struct tg3 *tp)
10991 {
10992 u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
10993
10994 if (irq_cnt > 1) {
10995 /* We want as many rx rings enabled as there are cpus.
10996 * In multiqueue MSI-X mode, the first MSI-X vector
10997 * only deals with link interrupts, etc, so we add
10998 * one to the number of vectors we are requesting.
10999 */
11000 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
11001 }
11002
11003 return irq_cnt;
11004 }
11005
11006 static bool tg3_enable_msix(struct tg3 *tp)
11007 {
11008 int i, rc;
11009 struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
11010
11011 tp->txq_cnt = tp->txq_req;
11012 tp->rxq_cnt = tp->rxq_req;
11013 if (!tp->rxq_cnt)
11014 tp->rxq_cnt = netif_get_num_default_rss_queues();
11015 if (tp->rxq_cnt > tp->rxq_max)
11016 tp->rxq_cnt = tp->rxq_max;
11017
11018 /* Disable multiple TX rings by default. Simple round-robin hardware
11019 * scheduling of the TX rings can cause starvation of rings with
11020 * small packets when other rings have TSO or jumbo packets.
11021 */
11022 if (!tp->txq_req)
11023 tp->txq_cnt = 1;
11024
11025 tp->irq_cnt = tg3_irq_count(tp);
11026
11027 for (i = 0; i < tp->irq_max; i++) {
11028 msix_ent[i].entry = i;
11029 msix_ent[i].vector = 0;
11030 }
11031
11032 rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
11033 if (rc < 0) {
11034 return false;
11035 } else if (rc != 0) {
11036 if (pci_enable_msix(tp->pdev, msix_ent, rc))
11037 return false;
11038 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
11039 tp->irq_cnt, rc);
11040 tp->irq_cnt = rc;
11041 tp->rxq_cnt = max(rc - 1, 1);
11042 if (tp->txq_cnt)
11043 tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
11044 }
11045
11046 for (i = 0; i < tp->irq_max; i++)
11047 tp->napi[i].irq_vec = msix_ent[i].vector;
11048
11049 if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
11050 pci_disable_msix(tp->pdev);
11051 return false;
11052 }
11053
11054 if (tp->irq_cnt == 1)
11055 return true;
11056
11057 tg3_flag_set(tp, ENABLE_RSS);
11058
11059 if (tp->txq_cnt > 1)
11060 tg3_flag_set(tp, ENABLE_TSS);
11061
11062 netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
11063
11064 return true;
11065 }
11066
11067 static void tg3_ints_init(struct tg3 *tp)
11068 {
11069 if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
11070 !tg3_flag(tp, TAGGED_STATUS)) {
11071 /* All MSI supporting chips should support tagged
11072 * status. Assert that this is the case.
11073 */
11074 netdev_warn(tp->dev,
11075 "MSI without TAGGED_STATUS? Not using MSI\n");
11076 goto defcfg;
11077 }
11078
11079 if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
11080 tg3_flag_set(tp, USING_MSIX);
11081 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
11082 tg3_flag_set(tp, USING_MSI);
11083
11084 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11085 u32 msi_mode = tr32(MSGINT_MODE);
11086 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
11087 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
11088 if (!tg3_flag(tp, 1SHOT_MSI))
11089 msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
11090 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
11091 }
11092 defcfg:
11093 if (!tg3_flag(tp, USING_MSIX)) {
11094 tp->irq_cnt = 1;
11095 tp->napi[0].irq_vec = tp->pdev->irq;
11096 }
11097
11098 if (tp->irq_cnt == 1) {
11099 tp->txq_cnt = 1;
11100 tp->rxq_cnt = 1;
11101 netif_set_real_num_tx_queues(tp->dev, 1);
11102 netif_set_real_num_rx_queues(tp->dev, 1);
11103 }
11104 }
11105
11106 static void tg3_ints_fini(struct tg3 *tp)
11107 {
11108 if (tg3_flag(tp, USING_MSIX))
11109 pci_disable_msix(tp->pdev);
11110 else if (tg3_flag(tp, USING_MSI))
11111 pci_disable_msi(tp->pdev);
11112 tg3_flag_clear(tp, USING_MSI);
11113 tg3_flag_clear(tp, USING_MSIX);
11114 tg3_flag_clear(tp, ENABLE_RSS);
11115 tg3_flag_clear(tp, ENABLE_TSS);
11116 }
11117
11118 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
11119 bool init)
11120 {
11121 struct net_device *dev = tp->dev;
11122 int i, err;
11123
11124 /*
11125 * Setup interrupts first so we know how
11126 * many NAPI resources to allocate
11127 */
11128 tg3_ints_init(tp);
11129
11130 tg3_rss_check_indir_tbl(tp);
11131
11132 /* The placement of this call is tied
11133 * to the setup and use of Host TX descriptors.
11134 */
11135 err = tg3_alloc_consistent(tp);
11136 if (err)
11137 goto err_out1;
11138
11139 tg3_napi_init(tp);
11140
11141 tg3_napi_enable(tp);
11142
11143 for (i = 0; i < tp->irq_cnt; i++) {
11144 struct tg3_napi *tnapi = &tp->napi[i];
11145 err = tg3_request_irq(tp, i);
11146 if (err) {
11147 for (i--; i >= 0; i--) {
11148 tnapi = &tp->napi[i];
11149 free_irq(tnapi->irq_vec, tnapi);
11150 }
11151 goto err_out2;
11152 }
11153 }
11154
11155 tg3_full_lock(tp, 0);
11156
11157 err = tg3_init_hw(tp, reset_phy);
11158 if (err) {
11159 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11160 tg3_free_rings(tp);
11161 }
11162
11163 tg3_full_unlock(tp);
11164
11165 if (err)
11166 goto err_out3;
11167
11168 if (test_irq && tg3_flag(tp, USING_MSI)) {
11169 err = tg3_test_msi(tp);
11170
11171 if (err) {
11172 tg3_full_lock(tp, 0);
11173 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11174 tg3_free_rings(tp);
11175 tg3_full_unlock(tp);
11176
11177 goto err_out2;
11178 }
11179
11180 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
11181 u32 val = tr32(PCIE_TRANSACTION_CFG);
11182
11183 tw32(PCIE_TRANSACTION_CFG,
11184 val | PCIE_TRANS_CFG_1SHOT_MSI);
11185 }
11186 }
11187
11188 tg3_phy_start(tp);
11189
11190 tg3_hwmon_open(tp);
11191
11192 tg3_full_lock(tp, 0);
11193
11194 tg3_timer_start(tp);
11195 tg3_flag_set(tp, INIT_COMPLETE);
11196 tg3_enable_ints(tp);
11197
11198 if (init)
11199 tg3_ptp_init(tp);
11200 else
11201 tg3_ptp_resume(tp);
11202
11203
11204 tg3_full_unlock(tp);
11205
11206 netif_tx_start_all_queues(dev);
11207
11208 /*
11209 * Reset loopback feature if it was turned on while the device was down
11210 * make sure that it's installed properly now.
11211 */
11212 if (dev->features & NETIF_F_LOOPBACK)
11213 tg3_set_loopback(dev, dev->features);
11214
11215 return 0;
11216
11217 err_out3:
11218 for (i = tp->irq_cnt - 1; i >= 0; i--) {
11219 struct tg3_napi *tnapi = &tp->napi[i];
11220 free_irq(tnapi->irq_vec, tnapi);
11221 }
11222
11223 err_out2:
11224 tg3_napi_disable(tp);
11225 tg3_napi_fini(tp);
11226 tg3_free_consistent(tp);
11227
11228 err_out1:
11229 tg3_ints_fini(tp);
11230
11231 return err;
11232 }
11233
11234 static void tg3_stop(struct tg3 *tp)
11235 {
11236 int i;
11237
11238 tg3_reset_task_cancel(tp);
11239 tg3_netif_stop(tp);
11240
11241 tg3_timer_stop(tp);
11242
11243 tg3_hwmon_close(tp);
11244
11245 tg3_phy_stop(tp);
11246
11247 tg3_full_lock(tp, 1);
11248
11249 tg3_disable_ints(tp);
11250
11251 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11252 tg3_free_rings(tp);
11253 tg3_flag_clear(tp, INIT_COMPLETE);
11254
11255 tg3_full_unlock(tp);
11256
11257 for (i = tp->irq_cnt - 1; i >= 0; i--) {
11258 struct tg3_napi *tnapi = &tp->napi[i];
11259 free_irq(tnapi->irq_vec, tnapi);
11260 }
11261
11262 tg3_ints_fini(tp);
11263
11264 tg3_napi_fini(tp);
11265
11266 tg3_free_consistent(tp);
11267 }
11268
11269 static int tg3_open(struct net_device *dev)
11270 {
11271 struct tg3 *tp = netdev_priv(dev);
11272 int err;
11273
11274 if (tp->fw_needed) {
11275 err = tg3_request_firmware(tp);
11276 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
11277 if (err) {
11278 netdev_warn(tp->dev, "EEE capability disabled\n");
11279 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11280 } else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
11281 netdev_warn(tp->dev, "EEE capability restored\n");
11282 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
11283 }
11284 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
11285 if (err)
11286 return err;
11287 } else if (err) {
11288 netdev_warn(tp->dev, "TSO capability disabled\n");
11289 tg3_flag_clear(tp, TSO_CAPABLE);
11290 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
11291 netdev_notice(tp->dev, "TSO capability restored\n");
11292 tg3_flag_set(tp, TSO_CAPABLE);
11293 }
11294 }
11295
11296 tg3_carrier_off(tp);
11297
11298 err = tg3_power_up(tp);
11299 if (err)
11300 return err;
11301
11302 tg3_full_lock(tp, 0);
11303
11304 tg3_disable_ints(tp);
11305 tg3_flag_clear(tp, INIT_COMPLETE);
11306
11307 tg3_full_unlock(tp);
11308
11309 err = tg3_start(tp,
11310 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN),
11311 true, true);
11312 if (err) {
11313 tg3_frob_aux_power(tp, false);
11314 pci_set_power_state(tp->pdev, PCI_D3hot);
11315 }
11316
11317 if (tg3_flag(tp, PTP_CAPABLE)) {
11318 tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
11319 &tp->pdev->dev);
11320 if (IS_ERR(tp->ptp_clock))
11321 tp->ptp_clock = NULL;
11322 }
11323
11324 return err;
11325 }
11326
11327 static int tg3_close(struct net_device *dev)
11328 {
11329 struct tg3 *tp = netdev_priv(dev);
11330
11331 tg3_ptp_fini(tp);
11332
11333 tg3_stop(tp);
11334
11335 /* Clear stats across close / open calls */
11336 memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
11337 memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
11338
11339 tg3_power_down(tp);
11340
11341 tg3_carrier_off(tp);
11342
11343 return 0;
11344 }
11345
11346 static inline u64 get_stat64(tg3_stat64_t *val)
11347 {
11348 return ((u64)val->high << 32) | ((u64)val->low);
11349 }
11350
11351 static u64 tg3_calc_crc_errors(struct tg3 *tp)
11352 {
11353 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11354
11355 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11356 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
11357 tg3_asic_rev(tp) == ASIC_REV_5701)) {
11358 u32 val;
11359
11360 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
11361 tg3_writephy(tp, MII_TG3_TEST1,
11362 val | MII_TG3_TEST1_CRC_EN);
11363 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
11364 } else
11365 val = 0;
11366
11367 tp->phy_crc_errors += val;
11368
11369 return tp->phy_crc_errors;
11370 }
11371
11372 return get_stat64(&hw_stats->rx_fcs_errors);
11373 }
11374
11375 #define ESTAT_ADD(member) \
11376 estats->member = old_estats->member + \
11377 get_stat64(&hw_stats->member)
11378
11379 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
11380 {
11381 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
11382 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11383
11384 ESTAT_ADD(rx_octets);
11385 ESTAT_ADD(rx_fragments);
11386 ESTAT_ADD(rx_ucast_packets);
11387 ESTAT_ADD(rx_mcast_packets);
11388 ESTAT_ADD(rx_bcast_packets);
11389 ESTAT_ADD(rx_fcs_errors);
11390 ESTAT_ADD(rx_align_errors);
11391 ESTAT_ADD(rx_xon_pause_rcvd);
11392 ESTAT_ADD(rx_xoff_pause_rcvd);
11393 ESTAT_ADD(rx_mac_ctrl_rcvd);
11394 ESTAT_ADD(rx_xoff_entered);
11395 ESTAT_ADD(rx_frame_too_long_errors);
11396 ESTAT_ADD(rx_jabbers);
11397 ESTAT_ADD(rx_undersize_packets);
11398 ESTAT_ADD(rx_in_length_errors);
11399 ESTAT_ADD(rx_out_length_errors);
11400 ESTAT_ADD(rx_64_or_less_octet_packets);
11401 ESTAT_ADD(rx_65_to_127_octet_packets);
11402 ESTAT_ADD(rx_128_to_255_octet_packets);
11403 ESTAT_ADD(rx_256_to_511_octet_packets);
11404 ESTAT_ADD(rx_512_to_1023_octet_packets);
11405 ESTAT_ADD(rx_1024_to_1522_octet_packets);
11406 ESTAT_ADD(rx_1523_to_2047_octet_packets);
11407 ESTAT_ADD(rx_2048_to_4095_octet_packets);
11408 ESTAT_ADD(rx_4096_to_8191_octet_packets);
11409 ESTAT_ADD(rx_8192_to_9022_octet_packets);
11410
11411 ESTAT_ADD(tx_octets);
11412 ESTAT_ADD(tx_collisions);
11413 ESTAT_ADD(tx_xon_sent);
11414 ESTAT_ADD(tx_xoff_sent);
11415 ESTAT_ADD(tx_flow_control);
11416 ESTAT_ADD(tx_mac_errors);
11417 ESTAT_ADD(tx_single_collisions);
11418 ESTAT_ADD(tx_mult_collisions);
11419 ESTAT_ADD(tx_deferred);
11420 ESTAT_ADD(tx_excessive_collisions);
11421 ESTAT_ADD(tx_late_collisions);
11422 ESTAT_ADD(tx_collide_2times);
11423 ESTAT_ADD(tx_collide_3times);
11424 ESTAT_ADD(tx_collide_4times);
11425 ESTAT_ADD(tx_collide_5times);
11426 ESTAT_ADD(tx_collide_6times);
11427 ESTAT_ADD(tx_collide_7times);
11428 ESTAT_ADD(tx_collide_8times);
11429 ESTAT_ADD(tx_collide_9times);
11430 ESTAT_ADD(tx_collide_10times);
11431 ESTAT_ADD(tx_collide_11times);
11432 ESTAT_ADD(tx_collide_12times);
11433 ESTAT_ADD(tx_collide_13times);
11434 ESTAT_ADD(tx_collide_14times);
11435 ESTAT_ADD(tx_collide_15times);
11436 ESTAT_ADD(tx_ucast_packets);
11437 ESTAT_ADD(tx_mcast_packets);
11438 ESTAT_ADD(tx_bcast_packets);
11439 ESTAT_ADD(tx_carrier_sense_errors);
11440 ESTAT_ADD(tx_discards);
11441 ESTAT_ADD(tx_errors);
11442
11443 ESTAT_ADD(dma_writeq_full);
11444 ESTAT_ADD(dma_write_prioq_full);
11445 ESTAT_ADD(rxbds_empty);
11446 ESTAT_ADD(rx_discards);
11447 ESTAT_ADD(rx_errors);
11448 ESTAT_ADD(rx_threshold_hit);
11449
11450 ESTAT_ADD(dma_readq_full);
11451 ESTAT_ADD(dma_read_prioq_full);
11452 ESTAT_ADD(tx_comp_queue_full);
11453
11454 ESTAT_ADD(ring_set_send_prod_index);
11455 ESTAT_ADD(ring_status_update);
11456 ESTAT_ADD(nic_irqs);
11457 ESTAT_ADD(nic_avoided_irqs);
11458 ESTAT_ADD(nic_tx_threshold_hit);
11459
11460 ESTAT_ADD(mbuf_lwm_thresh_hit);
11461 }
11462
11463 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
11464 {
11465 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
11466 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11467
11468 stats->rx_packets = old_stats->rx_packets +
11469 get_stat64(&hw_stats->rx_ucast_packets) +
11470 get_stat64(&hw_stats->rx_mcast_packets) +
11471 get_stat64(&hw_stats->rx_bcast_packets);
11472
11473 stats->tx_packets = old_stats->tx_packets +
11474 get_stat64(&hw_stats->tx_ucast_packets) +
11475 get_stat64(&hw_stats->tx_mcast_packets) +
11476 get_stat64(&hw_stats->tx_bcast_packets);
11477
11478 stats->rx_bytes = old_stats->rx_bytes +
11479 get_stat64(&hw_stats->rx_octets);
11480 stats->tx_bytes = old_stats->tx_bytes +
11481 get_stat64(&hw_stats->tx_octets);
11482
11483 stats->rx_errors = old_stats->rx_errors +
11484 get_stat64(&hw_stats->rx_errors);
11485 stats->tx_errors = old_stats->tx_errors +
11486 get_stat64(&hw_stats->tx_errors) +
11487 get_stat64(&hw_stats->tx_mac_errors) +
11488 get_stat64(&hw_stats->tx_carrier_sense_errors) +
11489 get_stat64(&hw_stats->tx_discards);
11490
11491 stats->multicast = old_stats->multicast +
11492 get_stat64(&hw_stats->rx_mcast_packets);
11493 stats->collisions = old_stats->collisions +
11494 get_stat64(&hw_stats->tx_collisions);
11495
11496 stats->rx_length_errors = old_stats->rx_length_errors +
11497 get_stat64(&hw_stats->rx_frame_too_long_errors) +
11498 get_stat64(&hw_stats->rx_undersize_packets);
11499
11500 stats->rx_over_errors = old_stats->rx_over_errors +
11501 get_stat64(&hw_stats->rxbds_empty);
11502 stats->rx_frame_errors = old_stats->rx_frame_errors +
11503 get_stat64(&hw_stats->rx_align_errors);
11504 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
11505 get_stat64(&hw_stats->tx_discards);
11506 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
11507 get_stat64(&hw_stats->tx_carrier_sense_errors);
11508
11509 stats->rx_crc_errors = old_stats->rx_crc_errors +
11510 tg3_calc_crc_errors(tp);
11511
11512 stats->rx_missed_errors = old_stats->rx_missed_errors +
11513 get_stat64(&hw_stats->rx_discards);
11514
11515 stats->rx_dropped = tp->rx_dropped;
11516 stats->tx_dropped = tp->tx_dropped;
11517 }
11518
11519 static int tg3_get_regs_len(struct net_device *dev)
11520 {
11521 return TG3_REG_BLK_SIZE;
11522 }
11523
11524 static void tg3_get_regs(struct net_device *dev,
11525 struct ethtool_regs *regs, void *_p)
11526 {
11527 struct tg3 *tp = netdev_priv(dev);
11528
11529 regs->version = 0;
11530
11531 memset(_p, 0, TG3_REG_BLK_SIZE);
11532
11533 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11534 return;
11535
11536 tg3_full_lock(tp, 0);
11537
11538 tg3_dump_legacy_regs(tp, (u32 *)_p);
11539
11540 tg3_full_unlock(tp);
11541 }
11542
11543 static int tg3_get_eeprom_len(struct net_device *dev)
11544 {
11545 struct tg3 *tp = netdev_priv(dev);
11546
11547 return tp->nvram_size;
11548 }
11549
11550 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11551 {
11552 struct tg3 *tp = netdev_priv(dev);
11553 int ret;
11554 u8 *pd;
11555 u32 i, offset, len, b_offset, b_count;
11556 __be32 val;
11557
11558 if (tg3_flag(tp, NO_NVRAM))
11559 return -EINVAL;
11560
11561 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11562 return -EAGAIN;
11563
11564 offset = eeprom->offset;
11565 len = eeprom->len;
11566 eeprom->len = 0;
11567
11568 eeprom->magic = TG3_EEPROM_MAGIC;
11569
11570 if (offset & 3) {
11571 /* adjustments to start on required 4 byte boundary */
11572 b_offset = offset & 3;
11573 b_count = 4 - b_offset;
11574 if (b_count > len) {
11575 /* i.e. offset=1 len=2 */
11576 b_count = len;
11577 }
11578 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
11579 if (ret)
11580 return ret;
11581 memcpy(data, ((char *)&val) + b_offset, b_count);
11582 len -= b_count;
11583 offset += b_count;
11584 eeprom->len += b_count;
11585 }
11586
11587 /* read bytes up to the last 4 byte boundary */
11588 pd = &data[eeprom->len];
11589 for (i = 0; i < (len - (len & 3)); i += 4) {
11590 ret = tg3_nvram_read_be32(tp, offset + i, &val);
11591 if (ret) {
11592 eeprom->len += i;
11593 return ret;
11594 }
11595 memcpy(pd + i, &val, 4);
11596 }
11597 eeprom->len += i;
11598
11599 if (len & 3) {
11600 /* read last bytes not ending on 4 byte boundary */
11601 pd = &data[eeprom->len];
11602 b_count = len & 3;
11603 b_offset = offset + len - b_count;
11604 ret = tg3_nvram_read_be32(tp, b_offset, &val);
11605 if (ret)
11606 return ret;
11607 memcpy(pd, &val, b_count);
11608 eeprom->len += b_count;
11609 }
11610 return 0;
11611 }
11612
11613 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11614 {
11615 struct tg3 *tp = netdev_priv(dev);
11616 int ret;
11617 u32 offset, len, b_offset, odd_len;
11618 u8 *buf;
11619 __be32 start, end;
11620
11621 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11622 return -EAGAIN;
11623
11624 if (tg3_flag(tp, NO_NVRAM) ||
11625 eeprom->magic != TG3_EEPROM_MAGIC)
11626 return -EINVAL;
11627
11628 offset = eeprom->offset;
11629 len = eeprom->len;
11630
11631 if ((b_offset = (offset & 3))) {
11632 /* adjustments to start on required 4 byte boundary */
11633 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
11634 if (ret)
11635 return ret;
11636 len += b_offset;
11637 offset &= ~3;
11638 if (len < 4)
11639 len = 4;
11640 }
11641
11642 odd_len = 0;
11643 if (len & 3) {
11644 /* adjustments to end on required 4 byte boundary */
11645 odd_len = 1;
11646 len = (len + 3) & ~3;
11647 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
11648 if (ret)
11649 return ret;
11650 }
11651
11652 buf = data;
11653 if (b_offset || odd_len) {
11654 buf = kmalloc(len, GFP_KERNEL);
11655 if (!buf)
11656 return -ENOMEM;
11657 if (b_offset)
11658 memcpy(buf, &start, 4);
11659 if (odd_len)
11660 memcpy(buf+len-4, &end, 4);
11661 memcpy(buf + b_offset, data, eeprom->len);
11662 }
11663
11664 ret = tg3_nvram_write_block(tp, offset, len, buf);
11665
11666 if (buf != data)
11667 kfree(buf);
11668
11669 return ret;
11670 }
11671
11672 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11673 {
11674 struct tg3 *tp = netdev_priv(dev);
11675
11676 if (tg3_flag(tp, USE_PHYLIB)) {
11677 struct phy_device *phydev;
11678 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11679 return -EAGAIN;
11680 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11681 return phy_ethtool_gset(phydev, cmd);
11682 }
11683
11684 cmd->supported = (SUPPORTED_Autoneg);
11685
11686 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
11687 cmd->supported |= (SUPPORTED_1000baseT_Half |
11688 SUPPORTED_1000baseT_Full);
11689
11690 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11691 cmd->supported |= (SUPPORTED_100baseT_Half |
11692 SUPPORTED_100baseT_Full |
11693 SUPPORTED_10baseT_Half |
11694 SUPPORTED_10baseT_Full |
11695 SUPPORTED_TP);
11696 cmd->port = PORT_TP;
11697 } else {
11698 cmd->supported |= SUPPORTED_FIBRE;
11699 cmd->port = PORT_FIBRE;
11700 }
11701
11702 cmd->advertising = tp->link_config.advertising;
11703 if (tg3_flag(tp, PAUSE_AUTONEG)) {
11704 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
11705 if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11706 cmd->advertising |= ADVERTISED_Pause;
11707 } else {
11708 cmd->advertising |= ADVERTISED_Pause |
11709 ADVERTISED_Asym_Pause;
11710 }
11711 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11712 cmd->advertising |= ADVERTISED_Asym_Pause;
11713 }
11714 }
11715 if (netif_running(dev) && tp->link_up) {
11716 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
11717 cmd->duplex = tp->link_config.active_duplex;
11718 cmd->lp_advertising = tp->link_config.rmt_adv;
11719 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11720 if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
11721 cmd->eth_tp_mdix = ETH_TP_MDI_X;
11722 else
11723 cmd->eth_tp_mdix = ETH_TP_MDI;
11724 }
11725 } else {
11726 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
11727 cmd->duplex = DUPLEX_UNKNOWN;
11728 cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
11729 }
11730 cmd->phy_address = tp->phy_addr;
11731 cmd->transceiver = XCVR_INTERNAL;
11732 cmd->autoneg = tp->link_config.autoneg;
11733 cmd->maxtxpkt = 0;
11734 cmd->maxrxpkt = 0;
11735 return 0;
11736 }
11737
11738 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11739 {
11740 struct tg3 *tp = netdev_priv(dev);
11741 u32 speed = ethtool_cmd_speed(cmd);
11742
11743 if (tg3_flag(tp, USE_PHYLIB)) {
11744 struct phy_device *phydev;
11745 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11746 return -EAGAIN;
11747 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11748 return phy_ethtool_sset(phydev, cmd);
11749 }
11750
11751 if (cmd->autoneg != AUTONEG_ENABLE &&
11752 cmd->autoneg != AUTONEG_DISABLE)
11753 return -EINVAL;
11754
11755 if (cmd->autoneg == AUTONEG_DISABLE &&
11756 cmd->duplex != DUPLEX_FULL &&
11757 cmd->duplex != DUPLEX_HALF)
11758 return -EINVAL;
11759
11760 if (cmd->autoneg == AUTONEG_ENABLE) {
11761 u32 mask = ADVERTISED_Autoneg |
11762 ADVERTISED_Pause |
11763 ADVERTISED_Asym_Pause;
11764
11765 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
11766 mask |= ADVERTISED_1000baseT_Half |
11767 ADVERTISED_1000baseT_Full;
11768
11769 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
11770 mask |= ADVERTISED_100baseT_Half |
11771 ADVERTISED_100baseT_Full |
11772 ADVERTISED_10baseT_Half |
11773 ADVERTISED_10baseT_Full |
11774 ADVERTISED_TP;
11775 else
11776 mask |= ADVERTISED_FIBRE;
11777
11778 if (cmd->advertising & ~mask)
11779 return -EINVAL;
11780
11781 mask &= (ADVERTISED_1000baseT_Half |
11782 ADVERTISED_1000baseT_Full |
11783 ADVERTISED_100baseT_Half |
11784 ADVERTISED_100baseT_Full |
11785 ADVERTISED_10baseT_Half |
11786 ADVERTISED_10baseT_Full);
11787
11788 cmd->advertising &= mask;
11789 } else {
11790 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
11791 if (speed != SPEED_1000)
11792 return -EINVAL;
11793
11794 if (cmd->duplex != DUPLEX_FULL)
11795 return -EINVAL;
11796 } else {
11797 if (speed != SPEED_100 &&
11798 speed != SPEED_10)
11799 return -EINVAL;
11800 }
11801 }
11802
11803 tg3_full_lock(tp, 0);
11804
11805 tp->link_config.autoneg = cmd->autoneg;
11806 if (cmd->autoneg == AUTONEG_ENABLE) {
11807 tp->link_config.advertising = (cmd->advertising |
11808 ADVERTISED_Autoneg);
11809 tp->link_config.speed = SPEED_UNKNOWN;
11810 tp->link_config.duplex = DUPLEX_UNKNOWN;
11811 } else {
11812 tp->link_config.advertising = 0;
11813 tp->link_config.speed = speed;
11814 tp->link_config.duplex = cmd->duplex;
11815 }
11816
11817 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
11818
11819 tg3_warn_mgmt_link_flap(tp);
11820
11821 if (netif_running(dev))
11822 tg3_setup_phy(tp, true);
11823
11824 tg3_full_unlock(tp);
11825
11826 return 0;
11827 }
11828
11829 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
11830 {
11831 struct tg3 *tp = netdev_priv(dev);
11832
11833 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
11834 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
11835 strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
11836 strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
11837 }
11838
11839 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11840 {
11841 struct tg3 *tp = netdev_priv(dev);
11842
11843 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
11844 wol->supported = WAKE_MAGIC;
11845 else
11846 wol->supported = 0;
11847 wol->wolopts = 0;
11848 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
11849 wol->wolopts = WAKE_MAGIC;
11850 memset(&wol->sopass, 0, sizeof(wol->sopass));
11851 }
11852
11853 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11854 {
11855 struct tg3 *tp = netdev_priv(dev);
11856 struct device *dp = &tp->pdev->dev;
11857
11858 if (wol->wolopts & ~WAKE_MAGIC)
11859 return -EINVAL;
11860 if ((wol->wolopts & WAKE_MAGIC) &&
11861 !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
11862 return -EINVAL;
11863
11864 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
11865
11866 spin_lock_bh(&tp->lock);
11867 if (device_may_wakeup(dp))
11868 tg3_flag_set(tp, WOL_ENABLE);
11869 else
11870 tg3_flag_clear(tp, WOL_ENABLE);
11871 spin_unlock_bh(&tp->lock);
11872
11873 return 0;
11874 }
11875
11876 static u32 tg3_get_msglevel(struct net_device *dev)
11877 {
11878 struct tg3 *tp = netdev_priv(dev);
11879 return tp->msg_enable;
11880 }
11881
11882 static void tg3_set_msglevel(struct net_device *dev, u32 value)
11883 {
11884 struct tg3 *tp = netdev_priv(dev);
11885 tp->msg_enable = value;
11886 }
11887
11888 static int tg3_nway_reset(struct net_device *dev)
11889 {
11890 struct tg3 *tp = netdev_priv(dev);
11891 int r;
11892
11893 if (!netif_running(dev))
11894 return -EAGAIN;
11895
11896 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11897 return -EINVAL;
11898
11899 tg3_warn_mgmt_link_flap(tp);
11900
11901 if (tg3_flag(tp, USE_PHYLIB)) {
11902 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11903 return -EAGAIN;
11904 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
11905 } else {
11906 u32 bmcr;
11907
11908 spin_lock_bh(&tp->lock);
11909 r = -EINVAL;
11910 tg3_readphy(tp, MII_BMCR, &bmcr);
11911 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
11912 ((bmcr & BMCR_ANENABLE) ||
11913 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
11914 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
11915 BMCR_ANENABLE);
11916 r = 0;
11917 }
11918 spin_unlock_bh(&tp->lock);
11919 }
11920
11921 return r;
11922 }
11923
11924 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
11925 {
11926 struct tg3 *tp = netdev_priv(dev);
11927
11928 ering->rx_max_pending = tp->rx_std_ring_mask;
11929 if (tg3_flag(tp, JUMBO_RING_ENABLE))
11930 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
11931 else
11932 ering->rx_jumbo_max_pending = 0;
11933
11934 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
11935
11936 ering->rx_pending = tp->rx_pending;
11937 if (tg3_flag(tp, JUMBO_RING_ENABLE))
11938 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
11939 else
11940 ering->rx_jumbo_pending = 0;
11941
11942 ering->tx_pending = tp->napi[0].tx_pending;
11943 }
11944
11945 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
11946 {
11947 struct tg3 *tp = netdev_priv(dev);
11948 int i, irq_sync = 0, err = 0;
11949
11950 if ((ering->rx_pending > tp->rx_std_ring_mask) ||
11951 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
11952 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
11953 (ering->tx_pending <= MAX_SKB_FRAGS) ||
11954 (tg3_flag(tp, TSO_BUG) &&
11955 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
11956 return -EINVAL;
11957
11958 if (netif_running(dev)) {
11959 tg3_phy_stop(tp);
11960 tg3_netif_stop(tp);
11961 irq_sync = 1;
11962 }
11963
11964 tg3_full_lock(tp, irq_sync);
11965
11966 tp->rx_pending = ering->rx_pending;
11967
11968 if (tg3_flag(tp, MAX_RXPEND_64) &&
11969 tp->rx_pending > 63)
11970 tp->rx_pending = 63;
11971 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
11972
11973 for (i = 0; i < tp->irq_max; i++)
11974 tp->napi[i].tx_pending = ering->tx_pending;
11975
11976 if (netif_running(dev)) {
11977 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11978 err = tg3_restart_hw(tp, false);
11979 if (!err)
11980 tg3_netif_start(tp);
11981 }
11982
11983 tg3_full_unlock(tp);
11984
11985 if (irq_sync && !err)
11986 tg3_phy_start(tp);
11987
11988 return err;
11989 }
11990
11991 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
11992 {
11993 struct tg3 *tp = netdev_priv(dev);
11994
11995 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
11996
11997 if (tp->link_config.flowctrl & FLOW_CTRL_RX)
11998 epause->rx_pause = 1;
11999 else
12000 epause->rx_pause = 0;
12001
12002 if (tp->link_config.flowctrl & FLOW_CTRL_TX)
12003 epause->tx_pause = 1;
12004 else
12005 epause->tx_pause = 0;
12006 }
12007
12008 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12009 {
12010 struct tg3 *tp = netdev_priv(dev);
12011 int err = 0;
12012
12013 if (tp->link_config.autoneg == AUTONEG_ENABLE)
12014 tg3_warn_mgmt_link_flap(tp);
12015
12016 if (tg3_flag(tp, USE_PHYLIB)) {
12017 u32 newadv;
12018 struct phy_device *phydev;
12019
12020 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
12021
12022 if (!(phydev->supported & SUPPORTED_Pause) ||
12023 (!(phydev->supported & SUPPORTED_Asym_Pause) &&
12024 (epause->rx_pause != epause->tx_pause)))
12025 return -EINVAL;
12026
12027 tp->link_config.flowctrl = 0;
12028 if (epause->rx_pause) {
12029 tp->link_config.flowctrl |= FLOW_CTRL_RX;
12030
12031 if (epause->tx_pause) {
12032 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12033 newadv = ADVERTISED_Pause;
12034 } else
12035 newadv = ADVERTISED_Pause |
12036 ADVERTISED_Asym_Pause;
12037 } else if (epause->tx_pause) {
12038 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12039 newadv = ADVERTISED_Asym_Pause;
12040 } else
12041 newadv = 0;
12042
12043 if (epause->autoneg)
12044 tg3_flag_set(tp, PAUSE_AUTONEG);
12045 else
12046 tg3_flag_clear(tp, PAUSE_AUTONEG);
12047
12048 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
12049 u32 oldadv = phydev->advertising &
12050 (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
12051 if (oldadv != newadv) {
12052 phydev->advertising &=
12053 ~(ADVERTISED_Pause |
12054 ADVERTISED_Asym_Pause);
12055 phydev->advertising |= newadv;
12056 if (phydev->autoneg) {
12057 /*
12058 * Always renegotiate the link to
12059 * inform our link partner of our
12060 * flow control settings, even if the
12061 * flow control is forced. Let
12062 * tg3_adjust_link() do the final
12063 * flow control setup.
12064 */
12065 return phy_start_aneg(phydev);
12066 }
12067 }
12068
12069 if (!epause->autoneg)
12070 tg3_setup_flow_control(tp, 0, 0);
12071 } else {
12072 tp->link_config.advertising &=
12073 ~(ADVERTISED_Pause |
12074 ADVERTISED_Asym_Pause);
12075 tp->link_config.advertising |= newadv;
12076 }
12077 } else {
12078 int irq_sync = 0;
12079
12080 if (netif_running(dev)) {
12081 tg3_netif_stop(tp);
12082 irq_sync = 1;
12083 }
12084
12085 tg3_full_lock(tp, irq_sync);
12086
12087 if (epause->autoneg)
12088 tg3_flag_set(tp, PAUSE_AUTONEG);
12089 else
12090 tg3_flag_clear(tp, PAUSE_AUTONEG);
12091 if (epause->rx_pause)
12092 tp->link_config.flowctrl |= FLOW_CTRL_RX;
12093 else
12094 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
12095 if (epause->tx_pause)
12096 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12097 else
12098 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
12099
12100 if (netif_running(dev)) {
12101 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12102 err = tg3_restart_hw(tp, false);
12103 if (!err)
12104 tg3_netif_start(tp);
12105 }
12106
12107 tg3_full_unlock(tp);
12108 }
12109
12110 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12111
12112 return err;
12113 }
12114
12115 static int tg3_get_sset_count(struct net_device *dev, int sset)
12116 {
12117 switch (sset) {
12118 case ETH_SS_TEST:
12119 return TG3_NUM_TEST;
12120 case ETH_SS_STATS:
12121 return TG3_NUM_STATS;
12122 default:
12123 return -EOPNOTSUPP;
12124 }
12125 }
12126
12127 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
12128 u32 *rules __always_unused)
12129 {
12130 struct tg3 *tp = netdev_priv(dev);
12131
12132 if (!tg3_flag(tp, SUPPORT_MSIX))
12133 return -EOPNOTSUPP;
12134
12135 switch (info->cmd) {
12136 case ETHTOOL_GRXRINGS:
12137 if (netif_running(tp->dev))
12138 info->data = tp->rxq_cnt;
12139 else {
12140 info->data = num_online_cpus();
12141 if (info->data > TG3_RSS_MAX_NUM_QS)
12142 info->data = TG3_RSS_MAX_NUM_QS;
12143 }
12144
12145 /* The first interrupt vector only
12146 * handles link interrupts.
12147 */
12148 info->data -= 1;
12149 return 0;
12150
12151 default:
12152 return -EOPNOTSUPP;
12153 }
12154 }
12155
12156 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
12157 {
12158 u32 size = 0;
12159 struct tg3 *tp = netdev_priv(dev);
12160
12161 if (tg3_flag(tp, SUPPORT_MSIX))
12162 size = TG3_RSS_INDIR_TBL_SIZE;
12163
12164 return size;
12165 }
12166
12167 static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
12168 {
12169 struct tg3 *tp = netdev_priv(dev);
12170 int i;
12171
12172 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12173 indir[i] = tp->rss_ind_tbl[i];
12174
12175 return 0;
12176 }
12177
12178 static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
12179 {
12180 struct tg3 *tp = netdev_priv(dev);
12181 size_t i;
12182
12183 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12184 tp->rss_ind_tbl[i] = indir[i];
12185
12186 if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
12187 return 0;
12188
12189 /* It is legal to write the indirection
12190 * table while the device is running.
12191 */
12192 tg3_full_lock(tp, 0);
12193 tg3_rss_write_indir_tbl(tp);
12194 tg3_full_unlock(tp);
12195
12196 return 0;
12197 }
12198
12199 static void tg3_get_channels(struct net_device *dev,
12200 struct ethtool_channels *channel)
12201 {
12202 struct tg3 *tp = netdev_priv(dev);
12203 u32 deflt_qs = netif_get_num_default_rss_queues();
12204
12205 channel->max_rx = tp->rxq_max;
12206 channel->max_tx = tp->txq_max;
12207
12208 if (netif_running(dev)) {
12209 channel->rx_count = tp->rxq_cnt;
12210 channel->tx_count = tp->txq_cnt;
12211 } else {
12212 if (tp->rxq_req)
12213 channel->rx_count = tp->rxq_req;
12214 else
12215 channel->rx_count = min(deflt_qs, tp->rxq_max);
12216
12217 if (tp->txq_req)
12218 channel->tx_count = tp->txq_req;
12219 else
12220 channel->tx_count = min(deflt_qs, tp->txq_max);
12221 }
12222 }
12223
12224 static int tg3_set_channels(struct net_device *dev,
12225 struct ethtool_channels *channel)
12226 {
12227 struct tg3 *tp = netdev_priv(dev);
12228
12229 if (!tg3_flag(tp, SUPPORT_MSIX))
12230 return -EOPNOTSUPP;
12231
12232 if (channel->rx_count > tp->rxq_max ||
12233 channel->tx_count > tp->txq_max)
12234 return -EINVAL;
12235
12236 tp->rxq_req = channel->rx_count;
12237 tp->txq_req = channel->tx_count;
12238
12239 if (!netif_running(dev))
12240 return 0;
12241
12242 tg3_stop(tp);
12243
12244 tg3_carrier_off(tp);
12245
12246 tg3_start(tp, true, false, false);
12247
12248 return 0;
12249 }
12250
12251 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
12252 {
12253 switch (stringset) {
12254 case ETH_SS_STATS:
12255 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
12256 break;
12257 case ETH_SS_TEST:
12258 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
12259 break;
12260 default:
12261 WARN_ON(1); /* we need a WARN() */
12262 break;
12263 }
12264 }
12265
12266 static int tg3_set_phys_id(struct net_device *dev,
12267 enum ethtool_phys_id_state state)
12268 {
12269 struct tg3 *tp = netdev_priv(dev);
12270
12271 if (!netif_running(tp->dev))
12272 return -EAGAIN;
12273
12274 switch (state) {
12275 case ETHTOOL_ID_ACTIVE:
12276 return 1; /* cycle on/off once per second */
12277
12278 case ETHTOOL_ID_ON:
12279 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12280 LED_CTRL_1000MBPS_ON |
12281 LED_CTRL_100MBPS_ON |
12282 LED_CTRL_10MBPS_ON |
12283 LED_CTRL_TRAFFIC_OVERRIDE |
12284 LED_CTRL_TRAFFIC_BLINK |
12285 LED_CTRL_TRAFFIC_LED);
12286 break;
12287
12288 case ETHTOOL_ID_OFF:
12289 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12290 LED_CTRL_TRAFFIC_OVERRIDE);
12291 break;
12292
12293 case ETHTOOL_ID_INACTIVE:
12294 tw32(MAC_LED_CTRL, tp->led_ctrl);
12295 break;
12296 }
12297
12298 return 0;
12299 }
12300
12301 static void tg3_get_ethtool_stats(struct net_device *dev,
12302 struct ethtool_stats *estats, u64 *tmp_stats)
12303 {
12304 struct tg3 *tp = netdev_priv(dev);
12305
12306 if (tp->hw_stats)
12307 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
12308 else
12309 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
12310 }
12311
12312 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
12313 {
12314 int i;
12315 __be32 *buf;
12316 u32 offset = 0, len = 0;
12317 u32 magic, val;
12318
12319 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
12320 return NULL;
12321
12322 if (magic == TG3_EEPROM_MAGIC) {
12323 for (offset = TG3_NVM_DIR_START;
12324 offset < TG3_NVM_DIR_END;
12325 offset += TG3_NVM_DIRENT_SIZE) {
12326 if (tg3_nvram_read(tp, offset, &val))
12327 return NULL;
12328
12329 if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
12330 TG3_NVM_DIRTYPE_EXTVPD)
12331 break;
12332 }
12333
12334 if (offset != TG3_NVM_DIR_END) {
12335 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
12336 if (tg3_nvram_read(tp, offset + 4, &offset))
12337 return NULL;
12338
12339 offset = tg3_nvram_logical_addr(tp, offset);
12340 }
12341 }
12342
12343 if (!offset || !len) {
12344 offset = TG3_NVM_VPD_OFF;
12345 len = TG3_NVM_VPD_LEN;
12346 }
12347
12348 buf = kmalloc(len, GFP_KERNEL);
12349 if (buf == NULL)
12350 return NULL;
12351
12352 if (magic == TG3_EEPROM_MAGIC) {
12353 for (i = 0; i < len; i += 4) {
12354 /* The data is in little-endian format in NVRAM.
12355 * Use the big-endian read routines to preserve
12356 * the byte order as it exists in NVRAM.
12357 */
12358 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
12359 goto error;
12360 }
12361 } else {
12362 u8 *ptr;
12363 ssize_t cnt;
12364 unsigned int pos = 0;
12365
12366 ptr = (u8 *)&buf[0];
12367 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
12368 cnt = pci_read_vpd(tp->pdev, pos,
12369 len - pos, ptr);
12370 if (cnt == -ETIMEDOUT || cnt == -EINTR)
12371 cnt = 0;
12372 else if (cnt < 0)
12373 goto error;
12374 }
12375 if (pos != len)
12376 goto error;
12377 }
12378
12379 *vpdlen = len;
12380
12381 return buf;
12382
12383 error:
12384 kfree(buf);
12385 return NULL;
12386 }
12387
12388 #define NVRAM_TEST_SIZE 0x100
12389 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
12390 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
12391 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
12392 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
12393 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
12394 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
12395 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12396 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12397
12398 static int tg3_test_nvram(struct tg3 *tp)
12399 {
12400 u32 csum, magic, len;
12401 __be32 *buf;
12402 int i, j, k, err = 0, size;
12403
12404 if (tg3_flag(tp, NO_NVRAM))
12405 return 0;
12406
12407 if (tg3_nvram_read(tp, 0, &magic) != 0)
12408 return -EIO;
12409
12410 if (magic == TG3_EEPROM_MAGIC)
12411 size = NVRAM_TEST_SIZE;
12412 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
12413 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
12414 TG3_EEPROM_SB_FORMAT_1) {
12415 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
12416 case TG3_EEPROM_SB_REVISION_0:
12417 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
12418 break;
12419 case TG3_EEPROM_SB_REVISION_2:
12420 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
12421 break;
12422 case TG3_EEPROM_SB_REVISION_3:
12423 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
12424 break;
12425 case TG3_EEPROM_SB_REVISION_4:
12426 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
12427 break;
12428 case TG3_EEPROM_SB_REVISION_5:
12429 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
12430 break;
12431 case TG3_EEPROM_SB_REVISION_6:
12432 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
12433 break;
12434 default:
12435 return -EIO;
12436 }
12437 } else
12438 return 0;
12439 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12440 size = NVRAM_SELFBOOT_HW_SIZE;
12441 else
12442 return -EIO;
12443
12444 buf = kmalloc(size, GFP_KERNEL);
12445 if (buf == NULL)
12446 return -ENOMEM;
12447
12448 err = -EIO;
12449 for (i = 0, j = 0; i < size; i += 4, j++) {
12450 err = tg3_nvram_read_be32(tp, i, &buf[j]);
12451 if (err)
12452 break;
12453 }
12454 if (i < size)
12455 goto out;
12456
12457 /* Selfboot format */
12458 magic = be32_to_cpu(buf[0]);
12459 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
12460 TG3_EEPROM_MAGIC_FW) {
12461 u8 *buf8 = (u8 *) buf, csum8 = 0;
12462
12463 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
12464 TG3_EEPROM_SB_REVISION_2) {
12465 /* For rev 2, the csum doesn't include the MBA. */
12466 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
12467 csum8 += buf8[i];
12468 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
12469 csum8 += buf8[i];
12470 } else {
12471 for (i = 0; i < size; i++)
12472 csum8 += buf8[i];
12473 }
12474
12475 if (csum8 == 0) {
12476 err = 0;
12477 goto out;
12478 }
12479
12480 err = -EIO;
12481 goto out;
12482 }
12483
12484 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
12485 TG3_EEPROM_MAGIC_HW) {
12486 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
12487 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
12488 u8 *buf8 = (u8 *) buf;
12489
12490 /* Separate the parity bits and the data bytes. */
12491 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
12492 if ((i == 0) || (i == 8)) {
12493 int l;
12494 u8 msk;
12495
12496 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
12497 parity[k++] = buf8[i] & msk;
12498 i++;
12499 } else if (i == 16) {
12500 int l;
12501 u8 msk;
12502
12503 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
12504 parity[k++] = buf8[i] & msk;
12505 i++;
12506
12507 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
12508 parity[k++] = buf8[i] & msk;
12509 i++;
12510 }
12511 data[j++] = buf8[i];
12512 }
12513
12514 err = -EIO;
12515 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
12516 u8 hw8 = hweight8(data[i]);
12517
12518 if ((hw8 & 0x1) && parity[i])
12519 goto out;
12520 else if (!(hw8 & 0x1) && !parity[i])
12521 goto out;
12522 }
12523 err = 0;
12524 goto out;
12525 }
12526
12527 err = -EIO;
12528
12529 /* Bootstrap checksum at offset 0x10 */
12530 csum = calc_crc((unsigned char *) buf, 0x10);
12531 if (csum != le32_to_cpu(buf[0x10/4]))
12532 goto out;
12533
12534 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
12535 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
12536 if (csum != le32_to_cpu(buf[0xfc/4]))
12537 goto out;
12538
12539 kfree(buf);
12540
12541 buf = tg3_vpd_readblock(tp, &len);
12542 if (!buf)
12543 return -ENOMEM;
12544
12545 i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
12546 if (i > 0) {
12547 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
12548 if (j < 0)
12549 goto out;
12550
12551 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
12552 goto out;
12553
12554 i += PCI_VPD_LRDT_TAG_SIZE;
12555 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
12556 PCI_VPD_RO_KEYWORD_CHKSUM);
12557 if (j > 0) {
12558 u8 csum8 = 0;
12559
12560 j += PCI_VPD_INFO_FLD_HDR_SIZE;
12561
12562 for (i = 0; i <= j; i++)
12563 csum8 += ((u8 *)buf)[i];
12564
12565 if (csum8)
12566 goto out;
12567 }
12568 }
12569
12570 err = 0;
12571
12572 out:
12573 kfree(buf);
12574 return err;
12575 }
12576
12577 #define TG3_SERDES_TIMEOUT_SEC 2
12578 #define TG3_COPPER_TIMEOUT_SEC 6
12579
12580 static int tg3_test_link(struct tg3 *tp)
12581 {
12582 int i, max;
12583
12584 if (!netif_running(tp->dev))
12585 return -ENODEV;
12586
12587 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
12588 max = TG3_SERDES_TIMEOUT_SEC;
12589 else
12590 max = TG3_COPPER_TIMEOUT_SEC;
12591
12592 for (i = 0; i < max; i++) {
12593 if (tp->link_up)
12594 return 0;
12595
12596 if (msleep_interruptible(1000))
12597 break;
12598 }
12599
12600 return -EIO;
12601 }
12602
12603 /* Only test the commonly used registers */
12604 static int tg3_test_registers(struct tg3 *tp)
12605 {
12606 int i, is_5705, is_5750;
12607 u32 offset, read_mask, write_mask, val, save_val, read_val;
12608 static struct {
12609 u16 offset;
12610 u16 flags;
12611 #define TG3_FL_5705 0x1
12612 #define TG3_FL_NOT_5705 0x2
12613 #define TG3_FL_NOT_5788 0x4
12614 #define TG3_FL_NOT_5750 0x8
12615 u32 read_mask;
12616 u32 write_mask;
12617 } reg_tbl[] = {
12618 /* MAC Control Registers */
12619 { MAC_MODE, TG3_FL_NOT_5705,
12620 0x00000000, 0x00ef6f8c },
12621 { MAC_MODE, TG3_FL_5705,
12622 0x00000000, 0x01ef6b8c },
12623 { MAC_STATUS, TG3_FL_NOT_5705,
12624 0x03800107, 0x00000000 },
12625 { MAC_STATUS, TG3_FL_5705,
12626 0x03800100, 0x00000000 },
12627 { MAC_ADDR_0_HIGH, 0x0000,
12628 0x00000000, 0x0000ffff },
12629 { MAC_ADDR_0_LOW, 0x0000,
12630 0x00000000, 0xffffffff },
12631 { MAC_RX_MTU_SIZE, 0x0000,
12632 0x00000000, 0x0000ffff },
12633 { MAC_TX_MODE, 0x0000,
12634 0x00000000, 0x00000070 },
12635 { MAC_TX_LENGTHS, 0x0000,
12636 0x00000000, 0x00003fff },
12637 { MAC_RX_MODE, TG3_FL_NOT_5705,
12638 0x00000000, 0x000007fc },
12639 { MAC_RX_MODE, TG3_FL_5705,
12640 0x00000000, 0x000007dc },
12641 { MAC_HASH_REG_0, 0x0000,
12642 0x00000000, 0xffffffff },
12643 { MAC_HASH_REG_1, 0x0000,
12644 0x00000000, 0xffffffff },
12645 { MAC_HASH_REG_2, 0x0000,
12646 0x00000000, 0xffffffff },
12647 { MAC_HASH_REG_3, 0x0000,
12648 0x00000000, 0xffffffff },
12649
12650 /* Receive Data and Receive BD Initiator Control Registers. */
12651 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
12652 0x00000000, 0xffffffff },
12653 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
12654 0x00000000, 0xffffffff },
12655 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
12656 0x00000000, 0x00000003 },
12657 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
12658 0x00000000, 0xffffffff },
12659 { RCVDBDI_STD_BD+0, 0x0000,
12660 0x00000000, 0xffffffff },
12661 { RCVDBDI_STD_BD+4, 0x0000,
12662 0x00000000, 0xffffffff },
12663 { RCVDBDI_STD_BD+8, 0x0000,
12664 0x00000000, 0xffff0002 },
12665 { RCVDBDI_STD_BD+0xc, 0x0000,
12666 0x00000000, 0xffffffff },
12667
12668 /* Receive BD Initiator Control Registers. */
12669 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
12670 0x00000000, 0xffffffff },
12671 { RCVBDI_STD_THRESH, TG3_FL_5705,
12672 0x00000000, 0x000003ff },
12673 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
12674 0x00000000, 0xffffffff },
12675
12676 /* Host Coalescing Control Registers. */
12677 { HOSTCC_MODE, TG3_FL_NOT_5705,
12678 0x00000000, 0x00000004 },
12679 { HOSTCC_MODE, TG3_FL_5705,
12680 0x00000000, 0x000000f6 },
12681 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
12682 0x00000000, 0xffffffff },
12683 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
12684 0x00000000, 0x000003ff },
12685 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
12686 0x00000000, 0xffffffff },
12687 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
12688 0x00000000, 0x000003ff },
12689 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
12690 0x00000000, 0xffffffff },
12691 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12692 0x00000000, 0x000000ff },
12693 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
12694 0x00000000, 0xffffffff },
12695 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12696 0x00000000, 0x000000ff },
12697 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
12698 0x00000000, 0xffffffff },
12699 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
12700 0x00000000, 0xffffffff },
12701 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12702 0x00000000, 0xffffffff },
12703 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12704 0x00000000, 0x000000ff },
12705 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12706 0x00000000, 0xffffffff },
12707 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12708 0x00000000, 0x000000ff },
12709 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
12710 0x00000000, 0xffffffff },
12711 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
12712 0x00000000, 0xffffffff },
12713 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
12714 0x00000000, 0xffffffff },
12715 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
12716 0x00000000, 0xffffffff },
12717 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
12718 0x00000000, 0xffffffff },
12719 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
12720 0xffffffff, 0x00000000 },
12721 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
12722 0xffffffff, 0x00000000 },
12723
12724 /* Buffer Manager Control Registers. */
12725 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
12726 0x00000000, 0x007fff80 },
12727 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
12728 0x00000000, 0x007fffff },
12729 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
12730 0x00000000, 0x0000003f },
12731 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
12732 0x00000000, 0x000001ff },
12733 { BUFMGR_MB_HIGH_WATER, 0x0000,
12734 0x00000000, 0x000001ff },
12735 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
12736 0xffffffff, 0x00000000 },
12737 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
12738 0xffffffff, 0x00000000 },
12739
12740 /* Mailbox Registers */
12741 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
12742 0x00000000, 0x000001ff },
12743 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
12744 0x00000000, 0x000001ff },
12745 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
12746 0x00000000, 0x000007ff },
12747 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
12748 0x00000000, 0x000001ff },
12749
12750 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
12751 };
12752
12753 is_5705 = is_5750 = 0;
12754 if (tg3_flag(tp, 5705_PLUS)) {
12755 is_5705 = 1;
12756 if (tg3_flag(tp, 5750_PLUS))
12757 is_5750 = 1;
12758 }
12759
12760 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
12761 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
12762 continue;
12763
12764 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
12765 continue;
12766
12767 if (tg3_flag(tp, IS_5788) &&
12768 (reg_tbl[i].flags & TG3_FL_NOT_5788))
12769 continue;
12770
12771 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
12772 continue;
12773
12774 offset = (u32) reg_tbl[i].offset;
12775 read_mask = reg_tbl[i].read_mask;
12776 write_mask = reg_tbl[i].write_mask;
12777
12778 /* Save the original register content */
12779 save_val = tr32(offset);
12780
12781 /* Determine the read-only value. */
12782 read_val = save_val & read_mask;
12783
12784 /* Write zero to the register, then make sure the read-only bits
12785 * are not changed and the read/write bits are all zeros.
12786 */
12787 tw32(offset, 0);
12788
12789 val = tr32(offset);
12790
12791 /* Test the read-only and read/write bits. */
12792 if (((val & read_mask) != read_val) || (val & write_mask))
12793 goto out;
12794
12795 /* Write ones to all the bits defined by RdMask and WrMask, then
12796 * make sure the read-only bits are not changed and the
12797 * read/write bits are all ones.
12798 */
12799 tw32(offset, read_mask | write_mask);
12800
12801 val = tr32(offset);
12802
12803 /* Test the read-only bits. */
12804 if ((val & read_mask) != read_val)
12805 goto out;
12806
12807 /* Test the read/write bits. */
12808 if ((val & write_mask) != write_mask)
12809 goto out;
12810
12811 tw32(offset, save_val);
12812 }
12813
12814 return 0;
12815
12816 out:
12817 if (netif_msg_hw(tp))
12818 netdev_err(tp->dev,
12819 "Register test failed at offset %x\n", offset);
12820 tw32(offset, save_val);
12821 return -EIO;
12822 }
12823
12824 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
12825 {
12826 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
12827 int i;
12828 u32 j;
12829
12830 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
12831 for (j = 0; j < len; j += 4) {
12832 u32 val;
12833
12834 tg3_write_mem(tp, offset + j, test_pattern[i]);
12835 tg3_read_mem(tp, offset + j, &val);
12836 if (val != test_pattern[i])
12837 return -EIO;
12838 }
12839 }
12840 return 0;
12841 }
12842
12843 static int tg3_test_memory(struct tg3 *tp)
12844 {
12845 static struct mem_entry {
12846 u32 offset;
12847 u32 len;
12848 } mem_tbl_570x[] = {
12849 { 0x00000000, 0x00b50},
12850 { 0x00002000, 0x1c000},
12851 { 0xffffffff, 0x00000}
12852 }, mem_tbl_5705[] = {
12853 { 0x00000100, 0x0000c},
12854 { 0x00000200, 0x00008},
12855 { 0x00004000, 0x00800},
12856 { 0x00006000, 0x01000},
12857 { 0x00008000, 0x02000},
12858 { 0x00010000, 0x0e000},
12859 { 0xffffffff, 0x00000}
12860 }, mem_tbl_5755[] = {
12861 { 0x00000200, 0x00008},
12862 { 0x00004000, 0x00800},
12863 { 0x00006000, 0x00800},
12864 { 0x00008000, 0x02000},
12865 { 0x00010000, 0x0c000},
12866 { 0xffffffff, 0x00000}
12867 }, mem_tbl_5906[] = {
12868 { 0x00000200, 0x00008},
12869 { 0x00004000, 0x00400},
12870 { 0x00006000, 0x00400},
12871 { 0x00008000, 0x01000},
12872 { 0x00010000, 0x01000},
12873 { 0xffffffff, 0x00000}
12874 }, mem_tbl_5717[] = {
12875 { 0x00000200, 0x00008},
12876 { 0x00010000, 0x0a000},
12877 { 0x00020000, 0x13c00},
12878 { 0xffffffff, 0x00000}
12879 }, mem_tbl_57765[] = {
12880 { 0x00000200, 0x00008},
12881 { 0x00004000, 0x00800},
12882 { 0x00006000, 0x09800},
12883 { 0x00010000, 0x0a000},
12884 { 0xffffffff, 0x00000}
12885 };
12886 struct mem_entry *mem_tbl;
12887 int err = 0;
12888 int i;
12889
12890 if (tg3_flag(tp, 5717_PLUS))
12891 mem_tbl = mem_tbl_5717;
12892 else if (tg3_flag(tp, 57765_CLASS) ||
12893 tg3_asic_rev(tp) == ASIC_REV_5762)
12894 mem_tbl = mem_tbl_57765;
12895 else if (tg3_flag(tp, 5755_PLUS))
12896 mem_tbl = mem_tbl_5755;
12897 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
12898 mem_tbl = mem_tbl_5906;
12899 else if (tg3_flag(tp, 5705_PLUS))
12900 mem_tbl = mem_tbl_5705;
12901 else
12902 mem_tbl = mem_tbl_570x;
12903
12904 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
12905 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
12906 if (err)
12907 break;
12908 }
12909
12910 return err;
12911 }
12912
12913 #define TG3_TSO_MSS 500
12914
12915 #define TG3_TSO_IP_HDR_LEN 20
12916 #define TG3_TSO_TCP_HDR_LEN 20
12917 #define TG3_TSO_TCP_OPT_LEN 12
12918
12919 static const u8 tg3_tso_header[] = {
12920 0x08, 0x00,
12921 0x45, 0x00, 0x00, 0x00,
12922 0x00, 0x00, 0x40, 0x00,
12923 0x40, 0x06, 0x00, 0x00,
12924 0x0a, 0x00, 0x00, 0x01,
12925 0x0a, 0x00, 0x00, 0x02,
12926 0x0d, 0x00, 0xe0, 0x00,
12927 0x00, 0x00, 0x01, 0x00,
12928 0x00, 0x00, 0x02, 0x00,
12929 0x80, 0x10, 0x10, 0x00,
12930 0x14, 0x09, 0x00, 0x00,
12931 0x01, 0x01, 0x08, 0x0a,
12932 0x11, 0x11, 0x11, 0x11,
12933 0x11, 0x11, 0x11, 0x11,
12934 };
12935
12936 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
12937 {
12938 u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
12939 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
12940 u32 budget;
12941 struct sk_buff *skb;
12942 u8 *tx_data, *rx_data;
12943 dma_addr_t map;
12944 int num_pkts, tx_len, rx_len, i, err;
12945 struct tg3_rx_buffer_desc *desc;
12946 struct tg3_napi *tnapi, *rnapi;
12947 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
12948
12949 tnapi = &tp->napi[0];
12950 rnapi = &tp->napi[0];
12951 if (tp->irq_cnt > 1) {
12952 if (tg3_flag(tp, ENABLE_RSS))
12953 rnapi = &tp->napi[1];
12954 if (tg3_flag(tp, ENABLE_TSS))
12955 tnapi = &tp->napi[1];
12956 }
12957 coal_now = tnapi->coal_now | rnapi->coal_now;
12958
12959 err = -EIO;
12960
12961 tx_len = pktsz;
12962 skb = netdev_alloc_skb(tp->dev, tx_len);
12963 if (!skb)
12964 return -ENOMEM;
12965
12966 tx_data = skb_put(skb, tx_len);
12967 memcpy(tx_data, tp->dev->dev_addr, 6);
12968 memset(tx_data + 6, 0x0, 8);
12969
12970 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
12971
12972 if (tso_loopback) {
12973 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
12974
12975 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
12976 TG3_TSO_TCP_OPT_LEN;
12977
12978 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
12979 sizeof(tg3_tso_header));
12980 mss = TG3_TSO_MSS;
12981
12982 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
12983 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
12984
12985 /* Set the total length field in the IP header */
12986 iph->tot_len = htons((u16)(mss + hdr_len));
12987
12988 base_flags = (TXD_FLAG_CPU_PRE_DMA |
12989 TXD_FLAG_CPU_POST_DMA);
12990
12991 if (tg3_flag(tp, HW_TSO_1) ||
12992 tg3_flag(tp, HW_TSO_2) ||
12993 tg3_flag(tp, HW_TSO_3)) {
12994 struct tcphdr *th;
12995 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
12996 th = (struct tcphdr *)&tx_data[val];
12997 th->check = 0;
12998 } else
12999 base_flags |= TXD_FLAG_TCPUDP_CSUM;
13000
13001 if (tg3_flag(tp, HW_TSO_3)) {
13002 mss |= (hdr_len & 0xc) << 12;
13003 if (hdr_len & 0x10)
13004 base_flags |= 0x00000010;
13005 base_flags |= (hdr_len & 0x3e0) << 5;
13006 } else if (tg3_flag(tp, HW_TSO_2))
13007 mss |= hdr_len << 9;
13008 else if (tg3_flag(tp, HW_TSO_1) ||
13009 tg3_asic_rev(tp) == ASIC_REV_5705) {
13010 mss |= (TG3_TSO_TCP_OPT_LEN << 9);
13011 } else {
13012 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
13013 }
13014
13015 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
13016 } else {
13017 num_pkts = 1;
13018 data_off = ETH_HLEN;
13019
13020 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
13021 tx_len > VLAN_ETH_FRAME_LEN)
13022 base_flags |= TXD_FLAG_JMB_PKT;
13023 }
13024
13025 for (i = data_off; i < tx_len; i++)
13026 tx_data[i] = (u8) (i & 0xff);
13027
13028 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
13029 if (pci_dma_mapping_error(tp->pdev, map)) {
13030 dev_kfree_skb(skb);
13031 return -EIO;
13032 }
13033
13034 val = tnapi->tx_prod;
13035 tnapi->tx_buffers[val].skb = skb;
13036 dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
13037
13038 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13039 rnapi->coal_now);
13040
13041 udelay(10);
13042
13043 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
13044
13045 budget = tg3_tx_avail(tnapi);
13046 if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
13047 base_flags | TXD_FLAG_END, mss, 0)) {
13048 tnapi->tx_buffers[val].skb = NULL;
13049 dev_kfree_skb(skb);
13050 return -EIO;
13051 }
13052
13053 tnapi->tx_prod++;
13054
13055 /* Sync BD data before updating mailbox */
13056 wmb();
13057
13058 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
13059 tr32_mailbox(tnapi->prodmbox);
13060
13061 udelay(10);
13062
13063 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
13064 for (i = 0; i < 35; i++) {
13065 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13066 coal_now);
13067
13068 udelay(10);
13069
13070 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
13071 rx_idx = rnapi->hw_status->idx[0].rx_producer;
13072 if ((tx_idx == tnapi->tx_prod) &&
13073 (rx_idx == (rx_start_idx + num_pkts)))
13074 break;
13075 }
13076
13077 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
13078 dev_kfree_skb(skb);
13079
13080 if (tx_idx != tnapi->tx_prod)
13081 goto out;
13082
13083 if (rx_idx != rx_start_idx + num_pkts)
13084 goto out;
13085
13086 val = data_off;
13087 while (rx_idx != rx_start_idx) {
13088 desc = &rnapi->rx_rcb[rx_start_idx++];
13089 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
13090 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
13091
13092 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
13093 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
13094 goto out;
13095
13096 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
13097 - ETH_FCS_LEN;
13098
13099 if (!tso_loopback) {
13100 if (rx_len != tx_len)
13101 goto out;
13102
13103 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
13104 if (opaque_key != RXD_OPAQUE_RING_STD)
13105 goto out;
13106 } else {
13107 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
13108 goto out;
13109 }
13110 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
13111 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
13112 >> RXD_TCPCSUM_SHIFT != 0xffff) {
13113 goto out;
13114 }
13115
13116 if (opaque_key == RXD_OPAQUE_RING_STD) {
13117 rx_data = tpr->rx_std_buffers[desc_idx].data;
13118 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
13119 mapping);
13120 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
13121 rx_data = tpr->rx_jmb_buffers[desc_idx].data;
13122 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
13123 mapping);
13124 } else
13125 goto out;
13126
13127 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
13128 PCI_DMA_FROMDEVICE);
13129
13130 rx_data += TG3_RX_OFFSET(tp);
13131 for (i = data_off; i < rx_len; i++, val++) {
13132 if (*(rx_data + i) != (u8) (val & 0xff))
13133 goto out;
13134 }
13135 }
13136
13137 err = 0;
13138
13139 /* tg3_free_rings will unmap and free the rx_data */
13140 out:
13141 return err;
13142 }
13143
13144 #define TG3_STD_LOOPBACK_FAILED 1
13145 #define TG3_JMB_LOOPBACK_FAILED 2
13146 #define TG3_TSO_LOOPBACK_FAILED 4
13147 #define TG3_LOOPBACK_FAILED \
13148 (TG3_STD_LOOPBACK_FAILED | \
13149 TG3_JMB_LOOPBACK_FAILED | \
13150 TG3_TSO_LOOPBACK_FAILED)
13151
13152 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
13153 {
13154 int err = -EIO;
13155 u32 eee_cap;
13156 u32 jmb_pkt_sz = 9000;
13157
13158 if (tp->dma_limit)
13159 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
13160
13161 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
13162 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
13163
13164 if (!netif_running(tp->dev)) {
13165 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13166 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13167 if (do_extlpbk)
13168 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13169 goto done;
13170 }
13171
13172 err = tg3_reset_hw(tp, true);
13173 if (err) {
13174 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13175 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13176 if (do_extlpbk)
13177 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13178 goto done;
13179 }
13180
13181 if (tg3_flag(tp, ENABLE_RSS)) {
13182 int i;
13183
13184 /* Reroute all rx packets to the 1st queue */
13185 for (i = MAC_RSS_INDIR_TBL_0;
13186 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
13187 tw32(i, 0x0);
13188 }
13189
13190 /* HW errata - mac loopback fails in some cases on 5780.
13191 * Normal traffic and PHY loopback are not affected by
13192 * errata. Also, the MAC loopback test is deprecated for
13193 * all newer ASIC revisions.
13194 */
13195 if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
13196 !tg3_flag(tp, CPMU_PRESENT)) {
13197 tg3_mac_loopback(tp, true);
13198
13199 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13200 data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13201
13202 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13203 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13204 data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13205
13206 tg3_mac_loopback(tp, false);
13207 }
13208
13209 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
13210 !tg3_flag(tp, USE_PHYLIB)) {
13211 int i;
13212
13213 tg3_phy_lpbk_set(tp, 0, false);
13214
13215 /* Wait for link */
13216 for (i = 0; i < 100; i++) {
13217 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
13218 break;
13219 mdelay(1);
13220 }
13221
13222 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13223 data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13224 if (tg3_flag(tp, TSO_CAPABLE) &&
13225 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13226 data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
13227 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13228 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13229 data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13230
13231 if (do_extlpbk) {
13232 tg3_phy_lpbk_set(tp, 0, true);
13233
13234 /* All link indications report up, but the hardware
13235 * isn't really ready for about 20 msec. Double it
13236 * to be sure.
13237 */
13238 mdelay(40);
13239
13240 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13241 data[TG3_EXT_LOOPB_TEST] |=
13242 TG3_STD_LOOPBACK_FAILED;
13243 if (tg3_flag(tp, TSO_CAPABLE) &&
13244 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13245 data[TG3_EXT_LOOPB_TEST] |=
13246 TG3_TSO_LOOPBACK_FAILED;
13247 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13248 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13249 data[TG3_EXT_LOOPB_TEST] |=
13250 TG3_JMB_LOOPBACK_FAILED;
13251 }
13252
13253 /* Re-enable gphy autopowerdown. */
13254 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
13255 tg3_phy_toggle_apd(tp, true);
13256 }
13257
13258 err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
13259 data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
13260
13261 done:
13262 tp->phy_flags |= eee_cap;
13263
13264 return err;
13265 }
13266
13267 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
13268 u64 *data)
13269 {
13270 struct tg3 *tp = netdev_priv(dev);
13271 bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
13272
13273 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
13274 tg3_power_up(tp)) {
13275 etest->flags |= ETH_TEST_FL_FAILED;
13276 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
13277 return;
13278 }
13279
13280 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
13281
13282 if (tg3_test_nvram(tp) != 0) {
13283 etest->flags |= ETH_TEST_FL_FAILED;
13284 data[TG3_NVRAM_TEST] = 1;
13285 }
13286 if (!doextlpbk && tg3_test_link(tp)) {
13287 etest->flags |= ETH_TEST_FL_FAILED;
13288 data[TG3_LINK_TEST] = 1;
13289 }
13290 if (etest->flags & ETH_TEST_FL_OFFLINE) {
13291 int err, err2 = 0, irq_sync = 0;
13292
13293 if (netif_running(dev)) {
13294 tg3_phy_stop(tp);
13295 tg3_netif_stop(tp);
13296 irq_sync = 1;
13297 }
13298
13299 tg3_full_lock(tp, irq_sync);
13300 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
13301 err = tg3_nvram_lock(tp);
13302 tg3_halt_cpu(tp, RX_CPU_BASE);
13303 if (!tg3_flag(tp, 5705_PLUS))
13304 tg3_halt_cpu(tp, TX_CPU_BASE);
13305 if (!err)
13306 tg3_nvram_unlock(tp);
13307
13308 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
13309 tg3_phy_reset(tp);
13310
13311 if (tg3_test_registers(tp) != 0) {
13312 etest->flags |= ETH_TEST_FL_FAILED;
13313 data[TG3_REGISTER_TEST] = 1;
13314 }
13315
13316 if (tg3_test_memory(tp) != 0) {
13317 etest->flags |= ETH_TEST_FL_FAILED;
13318 data[TG3_MEMORY_TEST] = 1;
13319 }
13320
13321 if (doextlpbk)
13322 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
13323
13324 if (tg3_test_loopback(tp, data, doextlpbk))
13325 etest->flags |= ETH_TEST_FL_FAILED;
13326
13327 tg3_full_unlock(tp);
13328
13329 if (tg3_test_interrupt(tp) != 0) {
13330 etest->flags |= ETH_TEST_FL_FAILED;
13331 data[TG3_INTERRUPT_TEST] = 1;
13332 }
13333
13334 tg3_full_lock(tp, 0);
13335
13336 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13337 if (netif_running(dev)) {
13338 tg3_flag_set(tp, INIT_COMPLETE);
13339 err2 = tg3_restart_hw(tp, true);
13340 if (!err2)
13341 tg3_netif_start(tp);
13342 }
13343
13344 tg3_full_unlock(tp);
13345
13346 if (irq_sync && !err2)
13347 tg3_phy_start(tp);
13348 }
13349 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
13350 tg3_power_down(tp);
13351
13352 }
13353
13354 static int tg3_hwtstamp_ioctl(struct net_device *dev,
13355 struct ifreq *ifr, int cmd)
13356 {
13357 struct tg3 *tp = netdev_priv(dev);
13358 struct hwtstamp_config stmpconf;
13359
13360 if (!tg3_flag(tp, PTP_CAPABLE))
13361 return -EINVAL;
13362
13363 if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
13364 return -EFAULT;
13365
13366 if (stmpconf.flags)
13367 return -EINVAL;
13368
13369 switch (stmpconf.tx_type) {
13370 case HWTSTAMP_TX_ON:
13371 tg3_flag_set(tp, TX_TSTAMP_EN);
13372 break;
13373 case HWTSTAMP_TX_OFF:
13374 tg3_flag_clear(tp, TX_TSTAMP_EN);
13375 break;
13376 default:
13377 return -ERANGE;
13378 }
13379
13380 switch (stmpconf.rx_filter) {
13381 case HWTSTAMP_FILTER_NONE:
13382 tp->rxptpctl = 0;
13383 break;
13384 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
13385 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13386 TG3_RX_PTP_CTL_ALL_V1_EVENTS;
13387 break;
13388 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
13389 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13390 TG3_RX_PTP_CTL_SYNC_EVNT;
13391 break;
13392 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
13393 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13394 TG3_RX_PTP_CTL_DELAY_REQ;
13395 break;
13396 case HWTSTAMP_FILTER_PTP_V2_EVENT:
13397 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13398 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13399 break;
13400 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
13401 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13402 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13403 break;
13404 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
13405 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13406 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13407 break;
13408 case HWTSTAMP_FILTER_PTP_V2_SYNC:
13409 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13410 TG3_RX_PTP_CTL_SYNC_EVNT;
13411 break;
13412 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13413 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13414 TG3_RX_PTP_CTL_SYNC_EVNT;
13415 break;
13416 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13417 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13418 TG3_RX_PTP_CTL_SYNC_EVNT;
13419 break;
13420 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13421 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13422 TG3_RX_PTP_CTL_DELAY_REQ;
13423 break;
13424 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13425 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13426 TG3_RX_PTP_CTL_DELAY_REQ;
13427 break;
13428 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13429 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13430 TG3_RX_PTP_CTL_DELAY_REQ;
13431 break;
13432 default:
13433 return -ERANGE;
13434 }
13435
13436 if (netif_running(dev) && tp->rxptpctl)
13437 tw32(TG3_RX_PTP_CTL,
13438 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13439
13440 return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13441 -EFAULT : 0;
13442 }
13443
13444 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
13445 {
13446 struct mii_ioctl_data *data = if_mii(ifr);
13447 struct tg3 *tp = netdev_priv(dev);
13448 int err;
13449
13450 if (tg3_flag(tp, USE_PHYLIB)) {
13451 struct phy_device *phydev;
13452 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
13453 return -EAGAIN;
13454 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
13455 return phy_mii_ioctl(phydev, ifr, cmd);
13456 }
13457
13458 switch (cmd) {
13459 case SIOCGMIIPHY:
13460 data->phy_id = tp->phy_addr;
13461
13462 /* fallthru */
13463 case SIOCGMIIREG: {
13464 u32 mii_regval;
13465
13466 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13467 break; /* We have no PHY */
13468
13469 if (!netif_running(dev))
13470 return -EAGAIN;
13471
13472 spin_lock_bh(&tp->lock);
13473 err = __tg3_readphy(tp, data->phy_id & 0x1f,
13474 data->reg_num & 0x1f, &mii_regval);
13475 spin_unlock_bh(&tp->lock);
13476
13477 data->val_out = mii_regval;
13478
13479 return err;
13480 }
13481
13482 case SIOCSMIIREG:
13483 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13484 break; /* We have no PHY */
13485
13486 if (!netif_running(dev))
13487 return -EAGAIN;
13488
13489 spin_lock_bh(&tp->lock);
13490 err = __tg3_writephy(tp, data->phy_id & 0x1f,
13491 data->reg_num & 0x1f, data->val_in);
13492 spin_unlock_bh(&tp->lock);
13493
13494 return err;
13495
13496 case SIOCSHWTSTAMP:
13497 return tg3_hwtstamp_ioctl(dev, ifr, cmd);
13498
13499 default:
13500 /* do nothing */
13501 break;
13502 }
13503 return -EOPNOTSUPP;
13504 }
13505
13506 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13507 {
13508 struct tg3 *tp = netdev_priv(dev);
13509
13510 memcpy(ec, &tp->coal, sizeof(*ec));
13511 return 0;
13512 }
13513
13514 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13515 {
13516 struct tg3 *tp = netdev_priv(dev);
13517 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
13518 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
13519
13520 if (!tg3_flag(tp, 5705_PLUS)) {
13521 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
13522 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
13523 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
13524 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
13525 }
13526
13527 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
13528 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
13529 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
13530 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
13531 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
13532 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
13533 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
13534 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
13535 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
13536 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
13537 return -EINVAL;
13538
13539 /* No rx interrupts will be generated if both are zero */
13540 if ((ec->rx_coalesce_usecs == 0) &&
13541 (ec->rx_max_coalesced_frames == 0))
13542 return -EINVAL;
13543
13544 /* No tx interrupts will be generated if both are zero */
13545 if ((ec->tx_coalesce_usecs == 0) &&
13546 (ec->tx_max_coalesced_frames == 0))
13547 return -EINVAL;
13548
13549 /* Only copy relevant parameters, ignore all others. */
13550 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
13551 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
13552 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
13553 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
13554 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
13555 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
13556 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
13557 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
13558 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
13559
13560 if (netif_running(dev)) {
13561 tg3_full_lock(tp, 0);
13562 __tg3_set_coalesce(tp, &tp->coal);
13563 tg3_full_unlock(tp);
13564 }
13565 return 0;
13566 }
13567
13568 static const struct ethtool_ops tg3_ethtool_ops = {
13569 .get_settings = tg3_get_settings,
13570 .set_settings = tg3_set_settings,
13571 .get_drvinfo = tg3_get_drvinfo,
13572 .get_regs_len = tg3_get_regs_len,
13573 .get_regs = tg3_get_regs,
13574 .get_wol = tg3_get_wol,
13575 .set_wol = tg3_set_wol,
13576 .get_msglevel = tg3_get_msglevel,
13577 .set_msglevel = tg3_set_msglevel,
13578 .nway_reset = tg3_nway_reset,
13579 .get_link = ethtool_op_get_link,
13580 .get_eeprom_len = tg3_get_eeprom_len,
13581 .get_eeprom = tg3_get_eeprom,
13582 .set_eeprom = tg3_set_eeprom,
13583 .get_ringparam = tg3_get_ringparam,
13584 .set_ringparam = tg3_set_ringparam,
13585 .get_pauseparam = tg3_get_pauseparam,
13586 .set_pauseparam = tg3_set_pauseparam,
13587 .self_test = tg3_self_test,
13588 .get_strings = tg3_get_strings,
13589 .set_phys_id = tg3_set_phys_id,
13590 .get_ethtool_stats = tg3_get_ethtool_stats,
13591 .get_coalesce = tg3_get_coalesce,
13592 .set_coalesce = tg3_set_coalesce,
13593 .get_sset_count = tg3_get_sset_count,
13594 .get_rxnfc = tg3_get_rxnfc,
13595 .get_rxfh_indir_size = tg3_get_rxfh_indir_size,
13596 .get_rxfh_indir = tg3_get_rxfh_indir,
13597 .set_rxfh_indir = tg3_set_rxfh_indir,
13598 .get_channels = tg3_get_channels,
13599 .set_channels = tg3_set_channels,
13600 .get_ts_info = tg3_get_ts_info,
13601 };
13602
13603 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
13604 struct rtnl_link_stats64 *stats)
13605 {
13606 struct tg3 *tp = netdev_priv(dev);
13607
13608 spin_lock_bh(&tp->lock);
13609 if (!tp->hw_stats) {
13610 spin_unlock_bh(&tp->lock);
13611 return &tp->net_stats_prev;
13612 }
13613
13614 tg3_get_nstats(tp, stats);
13615 spin_unlock_bh(&tp->lock);
13616
13617 return stats;
13618 }
13619
13620 static void tg3_set_rx_mode(struct net_device *dev)
13621 {
13622 struct tg3 *tp = netdev_priv(dev);
13623
13624 if (!netif_running(dev))
13625 return;
13626
13627 tg3_full_lock(tp, 0);
13628 __tg3_set_rx_mode(dev);
13629 tg3_full_unlock(tp);
13630 }
13631
13632 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
13633 int new_mtu)
13634 {
13635 dev->mtu = new_mtu;
13636
13637 if (new_mtu > ETH_DATA_LEN) {
13638 if (tg3_flag(tp, 5780_CLASS)) {
13639 netdev_update_features(dev);
13640 tg3_flag_clear(tp, TSO_CAPABLE);
13641 } else {
13642 tg3_flag_set(tp, JUMBO_RING_ENABLE);
13643 }
13644 } else {
13645 if (tg3_flag(tp, 5780_CLASS)) {
13646 tg3_flag_set(tp, TSO_CAPABLE);
13647 netdev_update_features(dev);
13648 }
13649 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
13650 }
13651 }
13652
13653 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
13654 {
13655 struct tg3 *tp = netdev_priv(dev);
13656 int err;
13657 bool reset_phy = false;
13658
13659 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
13660 return -EINVAL;
13661
13662 if (!netif_running(dev)) {
13663 /* We'll just catch it later when the
13664 * device is up'd.
13665 */
13666 tg3_set_mtu(dev, tp, new_mtu);
13667 return 0;
13668 }
13669
13670 tg3_phy_stop(tp);
13671
13672 tg3_netif_stop(tp);
13673
13674 tg3_full_lock(tp, 1);
13675
13676 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13677
13678 tg3_set_mtu(dev, tp, new_mtu);
13679
13680 /* Reset PHY, otherwise the read DMA engine will be in a mode that
13681 * breaks all requests to 256 bytes.
13682 */
13683 if (tg3_asic_rev(tp) == ASIC_REV_57766)
13684 reset_phy = true;
13685
13686 err = tg3_restart_hw(tp, reset_phy);
13687
13688 if (!err)
13689 tg3_netif_start(tp);
13690
13691 tg3_full_unlock(tp);
13692
13693 if (!err)
13694 tg3_phy_start(tp);
13695
13696 return err;
13697 }
13698
13699 static const struct net_device_ops tg3_netdev_ops = {
13700 .ndo_open = tg3_open,
13701 .ndo_stop = tg3_close,
13702 .ndo_start_xmit = tg3_start_xmit,
13703 .ndo_get_stats64 = tg3_get_stats64,
13704 .ndo_validate_addr = eth_validate_addr,
13705 .ndo_set_rx_mode = tg3_set_rx_mode,
13706 .ndo_set_mac_address = tg3_set_mac_addr,
13707 .ndo_do_ioctl = tg3_ioctl,
13708 .ndo_tx_timeout = tg3_tx_timeout,
13709 .ndo_change_mtu = tg3_change_mtu,
13710 .ndo_fix_features = tg3_fix_features,
13711 .ndo_set_features = tg3_set_features,
13712 #ifdef CONFIG_NET_POLL_CONTROLLER
13713 .ndo_poll_controller = tg3_poll_controller,
13714 #endif
13715 };
13716
13717 static void tg3_get_eeprom_size(struct tg3 *tp)
13718 {
13719 u32 cursize, val, magic;
13720
13721 tp->nvram_size = EEPROM_CHIP_SIZE;
13722
13723 if (tg3_nvram_read(tp, 0, &magic) != 0)
13724 return;
13725
13726 if ((magic != TG3_EEPROM_MAGIC) &&
13727 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
13728 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
13729 return;
13730
13731 /*
13732 * Size the chip by reading offsets at increasing powers of two.
13733 * When we encounter our validation signature, we know the addressing
13734 * has wrapped around, and thus have our chip size.
13735 */
13736 cursize = 0x10;
13737
13738 while (cursize < tp->nvram_size) {
13739 if (tg3_nvram_read(tp, cursize, &val) != 0)
13740 return;
13741
13742 if (val == magic)
13743 break;
13744
13745 cursize <<= 1;
13746 }
13747
13748 tp->nvram_size = cursize;
13749 }
13750
13751 static void tg3_get_nvram_size(struct tg3 *tp)
13752 {
13753 u32 val;
13754
13755 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
13756 return;
13757
13758 /* Selfboot format */
13759 if (val != TG3_EEPROM_MAGIC) {
13760 tg3_get_eeprom_size(tp);
13761 return;
13762 }
13763
13764 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
13765 if (val != 0) {
13766 /* This is confusing. We want to operate on the
13767 * 16-bit value at offset 0xf2. The tg3_nvram_read()
13768 * call will read from NVRAM and byteswap the data
13769 * according to the byteswapping settings for all
13770 * other register accesses. This ensures the data we
13771 * want will always reside in the lower 16-bits.
13772 * However, the data in NVRAM is in LE format, which
13773 * means the data from the NVRAM read will always be
13774 * opposite the endianness of the CPU. The 16-bit
13775 * byteswap then brings the data to CPU endianness.
13776 */
13777 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
13778 return;
13779 }
13780 }
13781 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13782 }
13783
13784 static void tg3_get_nvram_info(struct tg3 *tp)
13785 {
13786 u32 nvcfg1;
13787
13788 nvcfg1 = tr32(NVRAM_CFG1);
13789 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
13790 tg3_flag_set(tp, FLASH);
13791 } else {
13792 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13793 tw32(NVRAM_CFG1, nvcfg1);
13794 }
13795
13796 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
13797 tg3_flag(tp, 5780_CLASS)) {
13798 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
13799 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
13800 tp->nvram_jedecnum = JEDEC_ATMEL;
13801 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
13802 tg3_flag_set(tp, NVRAM_BUFFERED);
13803 break;
13804 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
13805 tp->nvram_jedecnum = JEDEC_ATMEL;
13806 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
13807 break;
13808 case FLASH_VENDOR_ATMEL_EEPROM:
13809 tp->nvram_jedecnum = JEDEC_ATMEL;
13810 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13811 tg3_flag_set(tp, NVRAM_BUFFERED);
13812 break;
13813 case FLASH_VENDOR_ST:
13814 tp->nvram_jedecnum = JEDEC_ST;
13815 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
13816 tg3_flag_set(tp, NVRAM_BUFFERED);
13817 break;
13818 case FLASH_VENDOR_SAIFUN:
13819 tp->nvram_jedecnum = JEDEC_SAIFUN;
13820 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
13821 break;
13822 case FLASH_VENDOR_SST_SMALL:
13823 case FLASH_VENDOR_SST_LARGE:
13824 tp->nvram_jedecnum = JEDEC_SST;
13825 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
13826 break;
13827 }
13828 } else {
13829 tp->nvram_jedecnum = JEDEC_ATMEL;
13830 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
13831 tg3_flag_set(tp, NVRAM_BUFFERED);
13832 }
13833 }
13834
13835 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
13836 {
13837 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
13838 case FLASH_5752PAGE_SIZE_256:
13839 tp->nvram_pagesize = 256;
13840 break;
13841 case FLASH_5752PAGE_SIZE_512:
13842 tp->nvram_pagesize = 512;
13843 break;
13844 case FLASH_5752PAGE_SIZE_1K:
13845 tp->nvram_pagesize = 1024;
13846 break;
13847 case FLASH_5752PAGE_SIZE_2K:
13848 tp->nvram_pagesize = 2048;
13849 break;
13850 case FLASH_5752PAGE_SIZE_4K:
13851 tp->nvram_pagesize = 4096;
13852 break;
13853 case FLASH_5752PAGE_SIZE_264:
13854 tp->nvram_pagesize = 264;
13855 break;
13856 case FLASH_5752PAGE_SIZE_528:
13857 tp->nvram_pagesize = 528;
13858 break;
13859 }
13860 }
13861
13862 static void tg3_get_5752_nvram_info(struct tg3 *tp)
13863 {
13864 u32 nvcfg1;
13865
13866 nvcfg1 = tr32(NVRAM_CFG1);
13867
13868 /* NVRAM protection for TPM */
13869 if (nvcfg1 & (1 << 27))
13870 tg3_flag_set(tp, PROTECTED_NVRAM);
13871
13872 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13873 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
13874 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
13875 tp->nvram_jedecnum = JEDEC_ATMEL;
13876 tg3_flag_set(tp, NVRAM_BUFFERED);
13877 break;
13878 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13879 tp->nvram_jedecnum = JEDEC_ATMEL;
13880 tg3_flag_set(tp, NVRAM_BUFFERED);
13881 tg3_flag_set(tp, FLASH);
13882 break;
13883 case FLASH_5752VENDOR_ST_M45PE10:
13884 case FLASH_5752VENDOR_ST_M45PE20:
13885 case FLASH_5752VENDOR_ST_M45PE40:
13886 tp->nvram_jedecnum = JEDEC_ST;
13887 tg3_flag_set(tp, NVRAM_BUFFERED);
13888 tg3_flag_set(tp, FLASH);
13889 break;
13890 }
13891
13892 if (tg3_flag(tp, FLASH)) {
13893 tg3_nvram_get_pagesize(tp, nvcfg1);
13894 } else {
13895 /* For eeprom, set pagesize to maximum eeprom size */
13896 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13897
13898 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13899 tw32(NVRAM_CFG1, nvcfg1);
13900 }
13901 }
13902
13903 static void tg3_get_5755_nvram_info(struct tg3 *tp)
13904 {
13905 u32 nvcfg1, protect = 0;
13906
13907 nvcfg1 = tr32(NVRAM_CFG1);
13908
13909 /* NVRAM protection for TPM */
13910 if (nvcfg1 & (1 << 27)) {
13911 tg3_flag_set(tp, PROTECTED_NVRAM);
13912 protect = 1;
13913 }
13914
13915 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
13916 switch (nvcfg1) {
13917 case FLASH_5755VENDOR_ATMEL_FLASH_1:
13918 case FLASH_5755VENDOR_ATMEL_FLASH_2:
13919 case FLASH_5755VENDOR_ATMEL_FLASH_3:
13920 case FLASH_5755VENDOR_ATMEL_FLASH_5:
13921 tp->nvram_jedecnum = JEDEC_ATMEL;
13922 tg3_flag_set(tp, NVRAM_BUFFERED);
13923 tg3_flag_set(tp, FLASH);
13924 tp->nvram_pagesize = 264;
13925 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
13926 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
13927 tp->nvram_size = (protect ? 0x3e200 :
13928 TG3_NVRAM_SIZE_512KB);
13929 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
13930 tp->nvram_size = (protect ? 0x1f200 :
13931 TG3_NVRAM_SIZE_256KB);
13932 else
13933 tp->nvram_size = (protect ? 0x1f200 :
13934 TG3_NVRAM_SIZE_128KB);
13935 break;
13936 case FLASH_5752VENDOR_ST_M45PE10:
13937 case FLASH_5752VENDOR_ST_M45PE20:
13938 case FLASH_5752VENDOR_ST_M45PE40:
13939 tp->nvram_jedecnum = JEDEC_ST;
13940 tg3_flag_set(tp, NVRAM_BUFFERED);
13941 tg3_flag_set(tp, FLASH);
13942 tp->nvram_pagesize = 256;
13943 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
13944 tp->nvram_size = (protect ?
13945 TG3_NVRAM_SIZE_64KB :
13946 TG3_NVRAM_SIZE_128KB);
13947 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
13948 tp->nvram_size = (protect ?
13949 TG3_NVRAM_SIZE_64KB :
13950 TG3_NVRAM_SIZE_256KB);
13951 else
13952 tp->nvram_size = (protect ?
13953 TG3_NVRAM_SIZE_128KB :
13954 TG3_NVRAM_SIZE_512KB);
13955 break;
13956 }
13957 }
13958
13959 static void tg3_get_5787_nvram_info(struct tg3 *tp)
13960 {
13961 u32 nvcfg1;
13962
13963 nvcfg1 = tr32(NVRAM_CFG1);
13964
13965 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13966 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
13967 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
13968 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
13969 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
13970 tp->nvram_jedecnum = JEDEC_ATMEL;
13971 tg3_flag_set(tp, NVRAM_BUFFERED);
13972 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13973
13974 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13975 tw32(NVRAM_CFG1, nvcfg1);
13976 break;
13977 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13978 case FLASH_5755VENDOR_ATMEL_FLASH_1:
13979 case FLASH_5755VENDOR_ATMEL_FLASH_2:
13980 case FLASH_5755VENDOR_ATMEL_FLASH_3:
13981 tp->nvram_jedecnum = JEDEC_ATMEL;
13982 tg3_flag_set(tp, NVRAM_BUFFERED);
13983 tg3_flag_set(tp, FLASH);
13984 tp->nvram_pagesize = 264;
13985 break;
13986 case FLASH_5752VENDOR_ST_M45PE10:
13987 case FLASH_5752VENDOR_ST_M45PE20:
13988 case FLASH_5752VENDOR_ST_M45PE40:
13989 tp->nvram_jedecnum = JEDEC_ST;
13990 tg3_flag_set(tp, NVRAM_BUFFERED);
13991 tg3_flag_set(tp, FLASH);
13992 tp->nvram_pagesize = 256;
13993 break;
13994 }
13995 }
13996
13997 static void tg3_get_5761_nvram_info(struct tg3 *tp)
13998 {
13999 u32 nvcfg1, protect = 0;
14000
14001 nvcfg1 = tr32(NVRAM_CFG1);
14002
14003 /* NVRAM protection for TPM */
14004 if (nvcfg1 & (1 << 27)) {
14005 tg3_flag_set(tp, PROTECTED_NVRAM);
14006 protect = 1;
14007 }
14008
14009 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14010 switch (nvcfg1) {
14011 case FLASH_5761VENDOR_ATMEL_ADB021D:
14012 case FLASH_5761VENDOR_ATMEL_ADB041D:
14013 case FLASH_5761VENDOR_ATMEL_ADB081D:
14014 case FLASH_5761VENDOR_ATMEL_ADB161D:
14015 case FLASH_5761VENDOR_ATMEL_MDB021D:
14016 case FLASH_5761VENDOR_ATMEL_MDB041D:
14017 case FLASH_5761VENDOR_ATMEL_MDB081D:
14018 case FLASH_5761VENDOR_ATMEL_MDB161D:
14019 tp->nvram_jedecnum = JEDEC_ATMEL;
14020 tg3_flag_set(tp, NVRAM_BUFFERED);
14021 tg3_flag_set(tp, FLASH);
14022 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14023 tp->nvram_pagesize = 256;
14024 break;
14025 case FLASH_5761VENDOR_ST_A_M45PE20:
14026 case FLASH_5761VENDOR_ST_A_M45PE40:
14027 case FLASH_5761VENDOR_ST_A_M45PE80:
14028 case FLASH_5761VENDOR_ST_A_M45PE16:
14029 case FLASH_5761VENDOR_ST_M_M45PE20:
14030 case FLASH_5761VENDOR_ST_M_M45PE40:
14031 case FLASH_5761VENDOR_ST_M_M45PE80:
14032 case FLASH_5761VENDOR_ST_M_M45PE16:
14033 tp->nvram_jedecnum = JEDEC_ST;
14034 tg3_flag_set(tp, NVRAM_BUFFERED);
14035 tg3_flag_set(tp, FLASH);
14036 tp->nvram_pagesize = 256;
14037 break;
14038 }
14039
14040 if (protect) {
14041 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
14042 } else {
14043 switch (nvcfg1) {
14044 case FLASH_5761VENDOR_ATMEL_ADB161D:
14045 case FLASH_5761VENDOR_ATMEL_MDB161D:
14046 case FLASH_5761VENDOR_ST_A_M45PE16:
14047 case FLASH_5761VENDOR_ST_M_M45PE16:
14048 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
14049 break;
14050 case FLASH_5761VENDOR_ATMEL_ADB081D:
14051 case FLASH_5761VENDOR_ATMEL_MDB081D:
14052 case FLASH_5761VENDOR_ST_A_M45PE80:
14053 case FLASH_5761VENDOR_ST_M_M45PE80:
14054 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14055 break;
14056 case FLASH_5761VENDOR_ATMEL_ADB041D:
14057 case FLASH_5761VENDOR_ATMEL_MDB041D:
14058 case FLASH_5761VENDOR_ST_A_M45PE40:
14059 case FLASH_5761VENDOR_ST_M_M45PE40:
14060 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14061 break;
14062 case FLASH_5761VENDOR_ATMEL_ADB021D:
14063 case FLASH_5761VENDOR_ATMEL_MDB021D:
14064 case FLASH_5761VENDOR_ST_A_M45PE20:
14065 case FLASH_5761VENDOR_ST_M_M45PE20:
14066 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14067 break;
14068 }
14069 }
14070 }
14071
14072 static void tg3_get_5906_nvram_info(struct tg3 *tp)
14073 {
14074 tp->nvram_jedecnum = JEDEC_ATMEL;
14075 tg3_flag_set(tp, NVRAM_BUFFERED);
14076 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14077 }
14078
14079 static void tg3_get_57780_nvram_info(struct tg3 *tp)
14080 {
14081 u32 nvcfg1;
14082
14083 nvcfg1 = tr32(NVRAM_CFG1);
14084
14085 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14086 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14087 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14088 tp->nvram_jedecnum = JEDEC_ATMEL;
14089 tg3_flag_set(tp, NVRAM_BUFFERED);
14090 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14091
14092 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14093 tw32(NVRAM_CFG1, nvcfg1);
14094 return;
14095 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14096 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14097 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14098 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14099 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14100 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14101 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14102 tp->nvram_jedecnum = JEDEC_ATMEL;
14103 tg3_flag_set(tp, NVRAM_BUFFERED);
14104 tg3_flag_set(tp, FLASH);
14105
14106 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14107 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14108 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14109 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14110 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14111 break;
14112 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14113 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14114 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14115 break;
14116 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14117 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14118 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14119 break;
14120 }
14121 break;
14122 case FLASH_5752VENDOR_ST_M45PE10:
14123 case FLASH_5752VENDOR_ST_M45PE20:
14124 case FLASH_5752VENDOR_ST_M45PE40:
14125 tp->nvram_jedecnum = JEDEC_ST;
14126 tg3_flag_set(tp, NVRAM_BUFFERED);
14127 tg3_flag_set(tp, FLASH);
14128
14129 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14130 case FLASH_5752VENDOR_ST_M45PE10:
14131 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14132 break;
14133 case FLASH_5752VENDOR_ST_M45PE20:
14134 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14135 break;
14136 case FLASH_5752VENDOR_ST_M45PE40:
14137 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14138 break;
14139 }
14140 break;
14141 default:
14142 tg3_flag_set(tp, NO_NVRAM);
14143 return;
14144 }
14145
14146 tg3_nvram_get_pagesize(tp, nvcfg1);
14147 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14148 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14149 }
14150
14151
14152 static void tg3_get_5717_nvram_info(struct tg3 *tp)
14153 {
14154 u32 nvcfg1;
14155
14156 nvcfg1 = tr32(NVRAM_CFG1);
14157
14158 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14159 case FLASH_5717VENDOR_ATMEL_EEPROM:
14160 case FLASH_5717VENDOR_MICRO_EEPROM:
14161 tp->nvram_jedecnum = JEDEC_ATMEL;
14162 tg3_flag_set(tp, NVRAM_BUFFERED);
14163 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14164
14165 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14166 tw32(NVRAM_CFG1, nvcfg1);
14167 return;
14168 case FLASH_5717VENDOR_ATMEL_MDB011D:
14169 case FLASH_5717VENDOR_ATMEL_ADB011B:
14170 case FLASH_5717VENDOR_ATMEL_ADB011D:
14171 case FLASH_5717VENDOR_ATMEL_MDB021D:
14172 case FLASH_5717VENDOR_ATMEL_ADB021B:
14173 case FLASH_5717VENDOR_ATMEL_ADB021D:
14174 case FLASH_5717VENDOR_ATMEL_45USPT:
14175 tp->nvram_jedecnum = JEDEC_ATMEL;
14176 tg3_flag_set(tp, NVRAM_BUFFERED);
14177 tg3_flag_set(tp, FLASH);
14178
14179 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14180 case FLASH_5717VENDOR_ATMEL_MDB021D:
14181 /* Detect size with tg3_nvram_get_size() */
14182 break;
14183 case FLASH_5717VENDOR_ATMEL_ADB021B:
14184 case FLASH_5717VENDOR_ATMEL_ADB021D:
14185 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14186 break;
14187 default:
14188 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14189 break;
14190 }
14191 break;
14192 case FLASH_5717VENDOR_ST_M_M25PE10:
14193 case FLASH_5717VENDOR_ST_A_M25PE10:
14194 case FLASH_5717VENDOR_ST_M_M45PE10:
14195 case FLASH_5717VENDOR_ST_A_M45PE10:
14196 case FLASH_5717VENDOR_ST_M_M25PE20:
14197 case FLASH_5717VENDOR_ST_A_M25PE20:
14198 case FLASH_5717VENDOR_ST_M_M45PE20:
14199 case FLASH_5717VENDOR_ST_A_M45PE20:
14200 case FLASH_5717VENDOR_ST_25USPT:
14201 case FLASH_5717VENDOR_ST_45USPT:
14202 tp->nvram_jedecnum = JEDEC_ST;
14203 tg3_flag_set(tp, NVRAM_BUFFERED);
14204 tg3_flag_set(tp, FLASH);
14205
14206 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14207 case FLASH_5717VENDOR_ST_M_M25PE20:
14208 case FLASH_5717VENDOR_ST_M_M45PE20:
14209 /* Detect size with tg3_nvram_get_size() */
14210 break;
14211 case FLASH_5717VENDOR_ST_A_M25PE20:
14212 case FLASH_5717VENDOR_ST_A_M45PE20:
14213 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14214 break;
14215 default:
14216 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14217 break;
14218 }
14219 break;
14220 default:
14221 tg3_flag_set(tp, NO_NVRAM);
14222 return;
14223 }
14224
14225 tg3_nvram_get_pagesize(tp, nvcfg1);
14226 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14227 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14228 }
14229
14230 static void tg3_get_5720_nvram_info(struct tg3 *tp)
14231 {
14232 u32 nvcfg1, nvmpinstrp;
14233
14234 nvcfg1 = tr32(NVRAM_CFG1);
14235 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
14236
14237 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14238 if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
14239 tg3_flag_set(tp, NO_NVRAM);
14240 return;
14241 }
14242
14243 switch (nvmpinstrp) {
14244 case FLASH_5762_EEPROM_HD:
14245 nvmpinstrp = FLASH_5720_EEPROM_HD;
14246 break;
14247 case FLASH_5762_EEPROM_LD:
14248 nvmpinstrp = FLASH_5720_EEPROM_LD;
14249 break;
14250 case FLASH_5720VENDOR_M_ST_M45PE20:
14251 /* This pinstrap supports multiple sizes, so force it
14252 * to read the actual size from location 0xf0.
14253 */
14254 nvmpinstrp = FLASH_5720VENDOR_ST_45USPT;
14255 break;
14256 }
14257 }
14258
14259 switch (nvmpinstrp) {
14260 case FLASH_5720_EEPROM_HD:
14261 case FLASH_5720_EEPROM_LD:
14262 tp->nvram_jedecnum = JEDEC_ATMEL;
14263 tg3_flag_set(tp, NVRAM_BUFFERED);
14264
14265 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14266 tw32(NVRAM_CFG1, nvcfg1);
14267 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
14268 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14269 else
14270 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
14271 return;
14272 case FLASH_5720VENDOR_M_ATMEL_DB011D:
14273 case FLASH_5720VENDOR_A_ATMEL_DB011B:
14274 case FLASH_5720VENDOR_A_ATMEL_DB011D:
14275 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14276 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14277 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14278 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14279 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14280 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14281 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14282 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14283 case FLASH_5720VENDOR_ATMEL_45USPT:
14284 tp->nvram_jedecnum = JEDEC_ATMEL;
14285 tg3_flag_set(tp, NVRAM_BUFFERED);
14286 tg3_flag_set(tp, FLASH);
14287
14288 switch (nvmpinstrp) {
14289 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14290 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14291 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14292 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14293 break;
14294 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14295 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14296 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14297 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14298 break;
14299 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14300 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14301 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14302 break;
14303 default:
14304 if (tg3_asic_rev(tp) != ASIC_REV_5762)
14305 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14306 break;
14307 }
14308 break;
14309 case FLASH_5720VENDOR_M_ST_M25PE10:
14310 case FLASH_5720VENDOR_M_ST_M45PE10:
14311 case FLASH_5720VENDOR_A_ST_M25PE10:
14312 case FLASH_5720VENDOR_A_ST_M45PE10:
14313 case FLASH_5720VENDOR_M_ST_M25PE20:
14314 case FLASH_5720VENDOR_M_ST_M45PE20:
14315 case FLASH_5720VENDOR_A_ST_M25PE20:
14316 case FLASH_5720VENDOR_A_ST_M45PE20:
14317 case FLASH_5720VENDOR_M_ST_M25PE40:
14318 case FLASH_5720VENDOR_M_ST_M45PE40:
14319 case FLASH_5720VENDOR_A_ST_M25PE40:
14320 case FLASH_5720VENDOR_A_ST_M45PE40:
14321 case FLASH_5720VENDOR_M_ST_M25PE80:
14322 case FLASH_5720VENDOR_M_ST_M45PE80:
14323 case FLASH_5720VENDOR_A_ST_M25PE80:
14324 case FLASH_5720VENDOR_A_ST_M45PE80:
14325 case FLASH_5720VENDOR_ST_25USPT:
14326 case FLASH_5720VENDOR_ST_45USPT:
14327 tp->nvram_jedecnum = JEDEC_ST;
14328 tg3_flag_set(tp, NVRAM_BUFFERED);
14329 tg3_flag_set(tp, FLASH);
14330
14331 switch (nvmpinstrp) {
14332 case FLASH_5720VENDOR_M_ST_M25PE20:
14333 case FLASH_5720VENDOR_M_ST_M45PE20:
14334 case FLASH_5720VENDOR_A_ST_M25PE20:
14335 case FLASH_5720VENDOR_A_ST_M45PE20:
14336 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14337 break;
14338 case FLASH_5720VENDOR_M_ST_M25PE40:
14339 case FLASH_5720VENDOR_M_ST_M45PE40:
14340 case FLASH_5720VENDOR_A_ST_M25PE40:
14341 case FLASH_5720VENDOR_A_ST_M45PE40:
14342 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14343 break;
14344 case FLASH_5720VENDOR_M_ST_M25PE80:
14345 case FLASH_5720VENDOR_M_ST_M45PE80:
14346 case FLASH_5720VENDOR_A_ST_M25PE80:
14347 case FLASH_5720VENDOR_A_ST_M45PE80:
14348 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14349 break;
14350 default:
14351 if (tg3_asic_rev(tp) != ASIC_REV_5762)
14352 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14353 break;
14354 }
14355 break;
14356 default:
14357 tg3_flag_set(tp, NO_NVRAM);
14358 return;
14359 }
14360
14361 tg3_nvram_get_pagesize(tp, nvcfg1);
14362 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14363 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14364
14365 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14366 u32 val;
14367
14368 if (tg3_nvram_read(tp, 0, &val))
14369 return;
14370
14371 if (val != TG3_EEPROM_MAGIC &&
14372 (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
14373 tg3_flag_set(tp, NO_NVRAM);
14374 }
14375 }
14376
14377 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
14378 static void tg3_nvram_init(struct tg3 *tp)
14379 {
14380 if (tg3_flag(tp, IS_SSB_CORE)) {
14381 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
14382 tg3_flag_clear(tp, NVRAM);
14383 tg3_flag_clear(tp, NVRAM_BUFFERED);
14384 tg3_flag_set(tp, NO_NVRAM);
14385 return;
14386 }
14387
14388 tw32_f(GRC_EEPROM_ADDR,
14389 (EEPROM_ADDR_FSM_RESET |
14390 (EEPROM_DEFAULT_CLOCK_PERIOD <<
14391 EEPROM_ADDR_CLKPERD_SHIFT)));
14392
14393 msleep(1);
14394
14395 /* Enable seeprom accesses. */
14396 tw32_f(GRC_LOCAL_CTRL,
14397 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
14398 udelay(100);
14399
14400 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14401 tg3_asic_rev(tp) != ASIC_REV_5701) {
14402 tg3_flag_set(tp, NVRAM);
14403
14404 if (tg3_nvram_lock(tp)) {
14405 netdev_warn(tp->dev,
14406 "Cannot get nvram lock, %s failed\n",
14407 __func__);
14408 return;
14409 }
14410 tg3_enable_nvram_access(tp);
14411
14412 tp->nvram_size = 0;
14413
14414 if (tg3_asic_rev(tp) == ASIC_REV_5752)
14415 tg3_get_5752_nvram_info(tp);
14416 else if (tg3_asic_rev(tp) == ASIC_REV_5755)
14417 tg3_get_5755_nvram_info(tp);
14418 else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
14419 tg3_asic_rev(tp) == ASIC_REV_5784 ||
14420 tg3_asic_rev(tp) == ASIC_REV_5785)
14421 tg3_get_5787_nvram_info(tp);
14422 else if (tg3_asic_rev(tp) == ASIC_REV_5761)
14423 tg3_get_5761_nvram_info(tp);
14424 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
14425 tg3_get_5906_nvram_info(tp);
14426 else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
14427 tg3_flag(tp, 57765_CLASS))
14428 tg3_get_57780_nvram_info(tp);
14429 else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
14430 tg3_asic_rev(tp) == ASIC_REV_5719)
14431 tg3_get_5717_nvram_info(tp);
14432 else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
14433 tg3_asic_rev(tp) == ASIC_REV_5762)
14434 tg3_get_5720_nvram_info(tp);
14435 else
14436 tg3_get_nvram_info(tp);
14437
14438 if (tp->nvram_size == 0)
14439 tg3_get_nvram_size(tp);
14440
14441 tg3_disable_nvram_access(tp);
14442 tg3_nvram_unlock(tp);
14443
14444 } else {
14445 tg3_flag_clear(tp, NVRAM);
14446 tg3_flag_clear(tp, NVRAM_BUFFERED);
14447
14448 tg3_get_eeprom_size(tp);
14449 }
14450 }
14451
14452 struct subsys_tbl_ent {
14453 u16 subsys_vendor, subsys_devid;
14454 u32 phy_id;
14455 };
14456
14457 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
14458 /* Broadcom boards. */
14459 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14460 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
14461 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14462 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
14463 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14464 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
14465 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14466 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
14467 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14468 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
14469 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14470 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
14471 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14472 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
14473 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14474 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
14475 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14476 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
14477 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14478 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
14479 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14480 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
14481
14482 /* 3com boards. */
14483 { TG3PCI_SUBVENDOR_ID_3COM,
14484 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
14485 { TG3PCI_SUBVENDOR_ID_3COM,
14486 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
14487 { TG3PCI_SUBVENDOR_ID_3COM,
14488 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
14489 { TG3PCI_SUBVENDOR_ID_3COM,
14490 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
14491 { TG3PCI_SUBVENDOR_ID_3COM,
14492 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
14493
14494 /* DELL boards. */
14495 { TG3PCI_SUBVENDOR_ID_DELL,
14496 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
14497 { TG3PCI_SUBVENDOR_ID_DELL,
14498 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
14499 { TG3PCI_SUBVENDOR_ID_DELL,
14500 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
14501 { TG3PCI_SUBVENDOR_ID_DELL,
14502 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
14503
14504 /* Compaq boards. */
14505 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14506 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
14507 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14508 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
14509 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14510 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
14511 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14512 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
14513 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14514 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
14515
14516 /* IBM boards. */
14517 { TG3PCI_SUBVENDOR_ID_IBM,
14518 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
14519 };
14520
14521 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
14522 {
14523 int i;
14524
14525 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
14526 if ((subsys_id_to_phy_id[i].subsys_vendor ==
14527 tp->pdev->subsystem_vendor) &&
14528 (subsys_id_to_phy_id[i].subsys_devid ==
14529 tp->pdev->subsystem_device))
14530 return &subsys_id_to_phy_id[i];
14531 }
14532 return NULL;
14533 }
14534
14535 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
14536 {
14537 u32 val;
14538
14539 tp->phy_id = TG3_PHY_ID_INVALID;
14540 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14541
14542 /* Assume an onboard device and WOL capable by default. */
14543 tg3_flag_set(tp, EEPROM_WRITE_PROT);
14544 tg3_flag_set(tp, WOL_CAP);
14545
14546 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
14547 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
14548 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14549 tg3_flag_set(tp, IS_NIC);
14550 }
14551 val = tr32(VCPU_CFGSHDW);
14552 if (val & VCPU_CFGSHDW_ASPM_DBNC)
14553 tg3_flag_set(tp, ASPM_WORKAROUND);
14554 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
14555 (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
14556 tg3_flag_set(tp, WOL_ENABLE);
14557 device_set_wakeup_enable(&tp->pdev->dev, true);
14558 }
14559 goto done;
14560 }
14561
14562 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
14563 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
14564 u32 nic_cfg, led_cfg;
14565 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
14566 int eeprom_phy_serdes = 0;
14567
14568 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
14569 tp->nic_sram_data_cfg = nic_cfg;
14570
14571 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
14572 ver >>= NIC_SRAM_DATA_VER_SHIFT;
14573 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14574 tg3_asic_rev(tp) != ASIC_REV_5701 &&
14575 tg3_asic_rev(tp) != ASIC_REV_5703 &&
14576 (ver > 0) && (ver < 0x100))
14577 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
14578
14579 if (tg3_asic_rev(tp) == ASIC_REV_5785)
14580 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
14581
14582 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
14583 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
14584 eeprom_phy_serdes = 1;
14585
14586 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
14587 if (nic_phy_id != 0) {
14588 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
14589 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
14590
14591 eeprom_phy_id = (id1 >> 16) << 10;
14592 eeprom_phy_id |= (id2 & 0xfc00) << 16;
14593 eeprom_phy_id |= (id2 & 0x03ff) << 0;
14594 } else
14595 eeprom_phy_id = 0;
14596
14597 tp->phy_id = eeprom_phy_id;
14598 if (eeprom_phy_serdes) {
14599 if (!tg3_flag(tp, 5705_PLUS))
14600 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14601 else
14602 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
14603 }
14604
14605 if (tg3_flag(tp, 5750_PLUS))
14606 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
14607 SHASTA_EXT_LED_MODE_MASK);
14608 else
14609 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
14610
14611 switch (led_cfg) {
14612 default:
14613 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
14614 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14615 break;
14616
14617 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
14618 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14619 break;
14620
14621 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
14622 tp->led_ctrl = LED_CTRL_MODE_MAC;
14623
14624 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
14625 * read on some older 5700/5701 bootcode.
14626 */
14627 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
14628 tg3_asic_rev(tp) == ASIC_REV_5701)
14629 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14630
14631 break;
14632
14633 case SHASTA_EXT_LED_SHARED:
14634 tp->led_ctrl = LED_CTRL_MODE_SHARED;
14635 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
14636 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
14637 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14638 LED_CTRL_MODE_PHY_2);
14639 break;
14640
14641 case SHASTA_EXT_LED_MAC:
14642 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
14643 break;
14644
14645 case SHASTA_EXT_LED_COMBO:
14646 tp->led_ctrl = LED_CTRL_MODE_COMBO;
14647 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
14648 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14649 LED_CTRL_MODE_PHY_2);
14650 break;
14651
14652 }
14653
14654 if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
14655 tg3_asic_rev(tp) == ASIC_REV_5701) &&
14656 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
14657 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14658
14659 if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
14660 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14661
14662 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
14663 tg3_flag_set(tp, EEPROM_WRITE_PROT);
14664 if ((tp->pdev->subsystem_vendor ==
14665 PCI_VENDOR_ID_ARIMA) &&
14666 (tp->pdev->subsystem_device == 0x205a ||
14667 tp->pdev->subsystem_device == 0x2063))
14668 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14669 } else {
14670 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14671 tg3_flag_set(tp, IS_NIC);
14672 }
14673
14674 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
14675 tg3_flag_set(tp, ENABLE_ASF);
14676 if (tg3_flag(tp, 5750_PLUS))
14677 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
14678 }
14679
14680 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
14681 tg3_flag(tp, 5750_PLUS))
14682 tg3_flag_set(tp, ENABLE_APE);
14683
14684 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
14685 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
14686 tg3_flag_clear(tp, WOL_CAP);
14687
14688 if (tg3_flag(tp, WOL_CAP) &&
14689 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
14690 tg3_flag_set(tp, WOL_ENABLE);
14691 device_set_wakeup_enable(&tp->pdev->dev, true);
14692 }
14693
14694 if (cfg2 & (1 << 17))
14695 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
14696
14697 /* serdes signal pre-emphasis in register 0x590 set by */
14698 /* bootcode if bit 18 is set */
14699 if (cfg2 & (1 << 18))
14700 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
14701
14702 if ((tg3_flag(tp, 57765_PLUS) ||
14703 (tg3_asic_rev(tp) == ASIC_REV_5784 &&
14704 tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
14705 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
14706 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
14707
14708 if (tg3_flag(tp, PCI_EXPRESS)) {
14709 u32 cfg3;
14710
14711 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
14712 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
14713 !tg3_flag(tp, 57765_PLUS) &&
14714 (cfg3 & NIC_SRAM_ASPM_DEBOUNCE))
14715 tg3_flag_set(tp, ASPM_WORKAROUND);
14716 if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID)
14717 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
14718 if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK)
14719 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
14720 }
14721
14722 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
14723 tg3_flag_set(tp, RGMII_INBAND_DISABLE);
14724 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
14725 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
14726 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
14727 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
14728 }
14729 done:
14730 if (tg3_flag(tp, WOL_CAP))
14731 device_set_wakeup_enable(&tp->pdev->dev,
14732 tg3_flag(tp, WOL_ENABLE));
14733 else
14734 device_set_wakeup_capable(&tp->pdev->dev, false);
14735 }
14736
14737 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
14738 {
14739 int i, err;
14740 u32 val2, off = offset * 8;
14741
14742 err = tg3_nvram_lock(tp);
14743 if (err)
14744 return err;
14745
14746 tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
14747 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
14748 APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
14749 tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
14750 udelay(10);
14751
14752 for (i = 0; i < 100; i++) {
14753 val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
14754 if (val2 & APE_OTP_STATUS_CMD_DONE) {
14755 *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
14756 break;
14757 }
14758 udelay(10);
14759 }
14760
14761 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
14762
14763 tg3_nvram_unlock(tp);
14764 if (val2 & APE_OTP_STATUS_CMD_DONE)
14765 return 0;
14766
14767 return -EBUSY;
14768 }
14769
14770 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
14771 {
14772 int i;
14773 u32 val;
14774
14775 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
14776 tw32(OTP_CTRL, cmd);
14777
14778 /* Wait for up to 1 ms for command to execute. */
14779 for (i = 0; i < 100; i++) {
14780 val = tr32(OTP_STATUS);
14781 if (val & OTP_STATUS_CMD_DONE)
14782 break;
14783 udelay(10);
14784 }
14785
14786 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
14787 }
14788
14789 /* Read the gphy configuration from the OTP region of the chip. The gphy
14790 * configuration is a 32-bit value that straddles the alignment boundary.
14791 * We do two 32-bit reads and then shift and merge the results.
14792 */
14793 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
14794 {
14795 u32 bhalf_otp, thalf_otp;
14796
14797 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
14798
14799 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
14800 return 0;
14801
14802 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
14803
14804 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
14805 return 0;
14806
14807 thalf_otp = tr32(OTP_READ_DATA);
14808
14809 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
14810
14811 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
14812 return 0;
14813
14814 bhalf_otp = tr32(OTP_READ_DATA);
14815
14816 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
14817 }
14818
14819 static void tg3_phy_init_link_config(struct tg3 *tp)
14820 {
14821 u32 adv = ADVERTISED_Autoneg;
14822
14823 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
14824 adv |= ADVERTISED_1000baseT_Half |
14825 ADVERTISED_1000baseT_Full;
14826
14827 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14828 adv |= ADVERTISED_100baseT_Half |
14829 ADVERTISED_100baseT_Full |
14830 ADVERTISED_10baseT_Half |
14831 ADVERTISED_10baseT_Full |
14832 ADVERTISED_TP;
14833 else
14834 adv |= ADVERTISED_FIBRE;
14835
14836 tp->link_config.advertising = adv;
14837 tp->link_config.speed = SPEED_UNKNOWN;
14838 tp->link_config.duplex = DUPLEX_UNKNOWN;
14839 tp->link_config.autoneg = AUTONEG_ENABLE;
14840 tp->link_config.active_speed = SPEED_UNKNOWN;
14841 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
14842
14843 tp->old_link = -1;
14844 }
14845
14846 static int tg3_phy_probe(struct tg3 *tp)
14847 {
14848 u32 hw_phy_id_1, hw_phy_id_2;
14849 u32 hw_phy_id, hw_phy_id_masked;
14850 int err;
14851
14852 /* flow control autonegotiation is default behavior */
14853 tg3_flag_set(tp, PAUSE_AUTONEG);
14854 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
14855
14856 if (tg3_flag(tp, ENABLE_APE)) {
14857 switch (tp->pci_fn) {
14858 case 0:
14859 tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
14860 break;
14861 case 1:
14862 tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
14863 break;
14864 case 2:
14865 tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
14866 break;
14867 case 3:
14868 tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
14869 break;
14870 }
14871 }
14872
14873 if (!tg3_flag(tp, ENABLE_ASF) &&
14874 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
14875 !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
14876 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
14877 TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
14878
14879 if (tg3_flag(tp, USE_PHYLIB))
14880 return tg3_phy_init(tp);
14881
14882 /* Reading the PHY ID register can conflict with ASF
14883 * firmware access to the PHY hardware.
14884 */
14885 err = 0;
14886 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
14887 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
14888 } else {
14889 /* Now read the physical PHY_ID from the chip and verify
14890 * that it is sane. If it doesn't look good, we fall back
14891 * to either the hard-coded table based PHY_ID and failing
14892 * that the value found in the eeprom area.
14893 */
14894 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
14895 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
14896
14897 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
14898 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
14899 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
14900
14901 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
14902 }
14903
14904 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
14905 tp->phy_id = hw_phy_id;
14906 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
14907 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14908 else
14909 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
14910 } else {
14911 if (tp->phy_id != TG3_PHY_ID_INVALID) {
14912 /* Do nothing, phy ID already set up in
14913 * tg3_get_eeprom_hw_cfg().
14914 */
14915 } else {
14916 struct subsys_tbl_ent *p;
14917
14918 /* No eeprom signature? Try the hardcoded
14919 * subsys device table.
14920 */
14921 p = tg3_lookup_by_subsys(tp);
14922 if (p) {
14923 tp->phy_id = p->phy_id;
14924 } else if (!tg3_flag(tp, IS_SSB_CORE)) {
14925 /* For now we saw the IDs 0xbc050cd0,
14926 * 0xbc050f80 and 0xbc050c30 on devices
14927 * connected to an BCM4785 and there are
14928 * probably more. Just assume that the phy is
14929 * supported when it is connected to a SSB core
14930 * for now.
14931 */
14932 return -ENODEV;
14933 }
14934
14935 if (!tp->phy_id ||
14936 tp->phy_id == TG3_PHY_ID_BCM8002)
14937 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14938 }
14939 }
14940
14941 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
14942 (tg3_asic_rev(tp) == ASIC_REV_5719 ||
14943 tg3_asic_rev(tp) == ASIC_REV_5720 ||
14944 tg3_asic_rev(tp) == ASIC_REV_57766 ||
14945 tg3_asic_rev(tp) == ASIC_REV_5762 ||
14946 (tg3_asic_rev(tp) == ASIC_REV_5717 &&
14947 tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
14948 (tg3_asic_rev(tp) == ASIC_REV_57765 &&
14949 tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0)))
14950 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
14951
14952 tg3_phy_init_link_config(tp);
14953
14954 if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
14955 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
14956 !tg3_flag(tp, ENABLE_APE) &&
14957 !tg3_flag(tp, ENABLE_ASF)) {
14958 u32 bmsr, dummy;
14959
14960 tg3_readphy(tp, MII_BMSR, &bmsr);
14961 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
14962 (bmsr & BMSR_LSTATUS))
14963 goto skip_phy_reset;
14964
14965 err = tg3_phy_reset(tp);
14966 if (err)
14967 return err;
14968
14969 tg3_phy_set_wirespeed(tp);
14970
14971 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
14972 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
14973 tp->link_config.flowctrl);
14974
14975 tg3_writephy(tp, MII_BMCR,
14976 BMCR_ANENABLE | BMCR_ANRESTART);
14977 }
14978 }
14979
14980 skip_phy_reset:
14981 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
14982 err = tg3_init_5401phy_dsp(tp);
14983 if (err)
14984 return err;
14985
14986 err = tg3_init_5401phy_dsp(tp);
14987 }
14988
14989 return err;
14990 }
14991
14992 static void tg3_read_vpd(struct tg3 *tp)
14993 {
14994 u8 *vpd_data;
14995 unsigned int block_end, rosize, len;
14996 u32 vpdlen;
14997 int j, i = 0;
14998
14999 vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
15000 if (!vpd_data)
15001 goto out_no_vpd;
15002
15003 i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
15004 if (i < 0)
15005 goto out_not_found;
15006
15007 rosize = pci_vpd_lrdt_size(&vpd_data[i]);
15008 block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
15009 i += PCI_VPD_LRDT_TAG_SIZE;
15010
15011 if (block_end > vpdlen)
15012 goto out_not_found;
15013
15014 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15015 PCI_VPD_RO_KEYWORD_MFR_ID);
15016 if (j > 0) {
15017 len = pci_vpd_info_field_size(&vpd_data[j]);
15018
15019 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15020 if (j + len > block_end || len != 4 ||
15021 memcmp(&vpd_data[j], "1028", 4))
15022 goto partno;
15023
15024 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15025 PCI_VPD_RO_KEYWORD_VENDOR0);
15026 if (j < 0)
15027 goto partno;
15028
15029 len = pci_vpd_info_field_size(&vpd_data[j]);
15030
15031 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15032 if (j + len > block_end)
15033 goto partno;
15034
15035 if (len >= sizeof(tp->fw_ver))
15036 len = sizeof(tp->fw_ver) - 1;
15037 memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
15038 snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len,
15039 &vpd_data[j]);
15040 }
15041
15042 partno:
15043 i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15044 PCI_VPD_RO_KEYWORD_PARTNO);
15045 if (i < 0)
15046 goto out_not_found;
15047
15048 len = pci_vpd_info_field_size(&vpd_data[i]);
15049
15050 i += PCI_VPD_INFO_FLD_HDR_SIZE;
15051 if (len > TG3_BPN_SIZE ||
15052 (len + i) > vpdlen)
15053 goto out_not_found;
15054
15055 memcpy(tp->board_part_number, &vpd_data[i], len);
15056
15057 out_not_found:
15058 kfree(vpd_data);
15059 if (tp->board_part_number[0])
15060 return;
15061
15062 out_no_vpd:
15063 if (tg3_asic_rev(tp) == ASIC_REV_5717) {
15064 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15065 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
15066 strcpy(tp->board_part_number, "BCM5717");
15067 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
15068 strcpy(tp->board_part_number, "BCM5718");
15069 else
15070 goto nomatch;
15071 } else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
15072 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
15073 strcpy(tp->board_part_number, "BCM57780");
15074 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
15075 strcpy(tp->board_part_number, "BCM57760");
15076 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
15077 strcpy(tp->board_part_number, "BCM57790");
15078 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
15079 strcpy(tp->board_part_number, "BCM57788");
15080 else
15081 goto nomatch;
15082 } else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
15083 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
15084 strcpy(tp->board_part_number, "BCM57761");
15085 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
15086 strcpy(tp->board_part_number, "BCM57765");
15087 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
15088 strcpy(tp->board_part_number, "BCM57781");
15089 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
15090 strcpy(tp->board_part_number, "BCM57785");
15091 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
15092 strcpy(tp->board_part_number, "BCM57791");
15093 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
15094 strcpy(tp->board_part_number, "BCM57795");
15095 else
15096 goto nomatch;
15097 } else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
15098 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
15099 strcpy(tp->board_part_number, "BCM57762");
15100 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
15101 strcpy(tp->board_part_number, "BCM57766");
15102 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
15103 strcpy(tp->board_part_number, "BCM57782");
15104 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15105 strcpy(tp->board_part_number, "BCM57786");
15106 else
15107 goto nomatch;
15108 } else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15109 strcpy(tp->board_part_number, "BCM95906");
15110 } else {
15111 nomatch:
15112 strcpy(tp->board_part_number, "none");
15113 }
15114 }
15115
15116 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
15117 {
15118 u32 val;
15119
15120 if (tg3_nvram_read(tp, offset, &val) ||
15121 (val & 0xfc000000) != 0x0c000000 ||
15122 tg3_nvram_read(tp, offset + 4, &val) ||
15123 val != 0)
15124 return 0;
15125
15126 return 1;
15127 }
15128
15129 static void tg3_read_bc_ver(struct tg3 *tp)
15130 {
15131 u32 val, offset, start, ver_offset;
15132 int i, dst_off;
15133 bool newver = false;
15134
15135 if (tg3_nvram_read(tp, 0xc, &offset) ||
15136 tg3_nvram_read(tp, 0x4, &start))
15137 return;
15138
15139 offset = tg3_nvram_logical_addr(tp, offset);
15140
15141 if (tg3_nvram_read(tp, offset, &val))
15142 return;
15143
15144 if ((val & 0xfc000000) == 0x0c000000) {
15145 if (tg3_nvram_read(tp, offset + 4, &val))
15146 return;
15147
15148 if (val == 0)
15149 newver = true;
15150 }
15151
15152 dst_off = strlen(tp->fw_ver);
15153
15154 if (newver) {
15155 if (TG3_VER_SIZE - dst_off < 16 ||
15156 tg3_nvram_read(tp, offset + 8, &ver_offset))
15157 return;
15158
15159 offset = offset + ver_offset - start;
15160 for (i = 0; i < 16; i += 4) {
15161 __be32 v;
15162 if (tg3_nvram_read_be32(tp, offset + i, &v))
15163 return;
15164
15165 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
15166 }
15167 } else {
15168 u32 major, minor;
15169
15170 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
15171 return;
15172
15173 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
15174 TG3_NVM_BCVER_MAJSFT;
15175 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
15176 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
15177 "v%d.%02d", major, minor);
15178 }
15179 }
15180
15181 static void tg3_read_hwsb_ver(struct tg3 *tp)
15182 {
15183 u32 val, major, minor;
15184
15185 /* Use native endian representation */
15186 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
15187 return;
15188
15189 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
15190 TG3_NVM_HWSB_CFG1_MAJSFT;
15191 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
15192 TG3_NVM_HWSB_CFG1_MINSFT;
15193
15194 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
15195 }
15196
15197 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
15198 {
15199 u32 offset, major, minor, build;
15200
15201 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
15202
15203 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
15204 return;
15205
15206 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
15207 case TG3_EEPROM_SB_REVISION_0:
15208 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
15209 break;
15210 case TG3_EEPROM_SB_REVISION_2:
15211 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
15212 break;
15213 case TG3_EEPROM_SB_REVISION_3:
15214 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
15215 break;
15216 case TG3_EEPROM_SB_REVISION_4:
15217 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
15218 break;
15219 case TG3_EEPROM_SB_REVISION_5:
15220 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
15221 break;
15222 case TG3_EEPROM_SB_REVISION_6:
15223 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
15224 break;
15225 default:
15226 return;
15227 }
15228
15229 if (tg3_nvram_read(tp, offset, &val))
15230 return;
15231
15232 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
15233 TG3_EEPROM_SB_EDH_BLD_SHFT;
15234 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
15235 TG3_EEPROM_SB_EDH_MAJ_SHFT;
15236 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
15237
15238 if (minor > 99 || build > 26)
15239 return;
15240
15241 offset = strlen(tp->fw_ver);
15242 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
15243 " v%d.%02d", major, minor);
15244
15245 if (build > 0) {
15246 offset = strlen(tp->fw_ver);
15247 if (offset < TG3_VER_SIZE - 1)
15248 tp->fw_ver[offset] = 'a' + build - 1;
15249 }
15250 }
15251
15252 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
15253 {
15254 u32 val, offset, start;
15255 int i, vlen;
15256
15257 for (offset = TG3_NVM_DIR_START;
15258 offset < TG3_NVM_DIR_END;
15259 offset += TG3_NVM_DIRENT_SIZE) {
15260 if (tg3_nvram_read(tp, offset, &val))
15261 return;
15262
15263 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
15264 break;
15265 }
15266
15267 if (offset == TG3_NVM_DIR_END)
15268 return;
15269
15270 if (!tg3_flag(tp, 5705_PLUS))
15271 start = 0x08000000;
15272 else if (tg3_nvram_read(tp, offset - 4, &start))
15273 return;
15274
15275 if (tg3_nvram_read(tp, offset + 4, &offset) ||
15276 !tg3_fw_img_is_valid(tp, offset) ||
15277 tg3_nvram_read(tp, offset + 8, &val))
15278 return;
15279
15280 offset += val - start;
15281
15282 vlen = strlen(tp->fw_ver);
15283
15284 tp->fw_ver[vlen++] = ',';
15285 tp->fw_ver[vlen++] = ' ';
15286
15287 for (i = 0; i < 4; i++) {
15288 __be32 v;
15289 if (tg3_nvram_read_be32(tp, offset, &v))
15290 return;
15291
15292 offset += sizeof(v);
15293
15294 if (vlen > TG3_VER_SIZE - sizeof(v)) {
15295 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
15296 break;
15297 }
15298
15299 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
15300 vlen += sizeof(v);
15301 }
15302 }
15303
15304 static void tg3_probe_ncsi(struct tg3 *tp)
15305 {
15306 u32 apedata;
15307
15308 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
15309 if (apedata != APE_SEG_SIG_MAGIC)
15310 return;
15311
15312 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
15313 if (!(apedata & APE_FW_STATUS_READY))
15314 return;
15315
15316 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
15317 tg3_flag_set(tp, APE_HAS_NCSI);
15318 }
15319
15320 static void tg3_read_dash_ver(struct tg3 *tp)
15321 {
15322 int vlen;
15323 u32 apedata;
15324 char *fwtype;
15325
15326 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
15327
15328 if (tg3_flag(tp, APE_HAS_NCSI))
15329 fwtype = "NCSI";
15330 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
15331 fwtype = "SMASH";
15332 else
15333 fwtype = "DASH";
15334
15335 vlen = strlen(tp->fw_ver);
15336
15337 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
15338 fwtype,
15339 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
15340 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
15341 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
15342 (apedata & APE_FW_VERSION_BLDMSK));
15343 }
15344
15345 static void tg3_read_otp_ver(struct tg3 *tp)
15346 {
15347 u32 val, val2;
15348
15349 if (tg3_asic_rev(tp) != ASIC_REV_5762)
15350 return;
15351
15352 if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
15353 !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
15354 TG3_OTP_MAGIC0_VALID(val)) {
15355 u64 val64 = (u64) val << 32 | val2;
15356 u32 ver = 0;
15357 int i, vlen;
15358
15359 for (i = 0; i < 7; i++) {
15360 if ((val64 & 0xff) == 0)
15361 break;
15362 ver = val64 & 0xff;
15363 val64 >>= 8;
15364 }
15365 vlen = strlen(tp->fw_ver);
15366 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
15367 }
15368 }
15369
15370 static void tg3_read_fw_ver(struct tg3 *tp)
15371 {
15372 u32 val;
15373 bool vpd_vers = false;
15374
15375 if (tp->fw_ver[0] != 0)
15376 vpd_vers = true;
15377
15378 if (tg3_flag(tp, NO_NVRAM)) {
15379 strcat(tp->fw_ver, "sb");
15380 tg3_read_otp_ver(tp);
15381 return;
15382 }
15383
15384 if (tg3_nvram_read(tp, 0, &val))
15385 return;
15386
15387 if (val == TG3_EEPROM_MAGIC)
15388 tg3_read_bc_ver(tp);
15389 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
15390 tg3_read_sb_ver(tp, val);
15391 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
15392 tg3_read_hwsb_ver(tp);
15393
15394 if (tg3_flag(tp, ENABLE_ASF)) {
15395 if (tg3_flag(tp, ENABLE_APE)) {
15396 tg3_probe_ncsi(tp);
15397 if (!vpd_vers)
15398 tg3_read_dash_ver(tp);
15399 } else if (!vpd_vers) {
15400 tg3_read_mgmtfw_ver(tp);
15401 }
15402 }
15403
15404 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
15405 }
15406
15407 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
15408 {
15409 if (tg3_flag(tp, LRG_PROD_RING_CAP))
15410 return TG3_RX_RET_MAX_SIZE_5717;
15411 else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
15412 return TG3_RX_RET_MAX_SIZE_5700;
15413 else
15414 return TG3_RX_RET_MAX_SIZE_5705;
15415 }
15416
15417 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
15418 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
15419 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
15420 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
15421 { },
15422 };
15423
15424 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
15425 {
15426 struct pci_dev *peer;
15427 unsigned int func, devnr = tp->pdev->devfn & ~7;
15428
15429 for (func = 0; func < 8; func++) {
15430 peer = pci_get_slot(tp->pdev->bus, devnr | func);
15431 if (peer && peer != tp->pdev)
15432 break;
15433 pci_dev_put(peer);
15434 }
15435 /* 5704 can be configured in single-port mode, set peer to
15436 * tp->pdev in that case.
15437 */
15438 if (!peer) {
15439 peer = tp->pdev;
15440 return peer;
15441 }
15442
15443 /*
15444 * We don't need to keep the refcount elevated; there's no way
15445 * to remove one half of this device without removing the other
15446 */
15447 pci_dev_put(peer);
15448
15449 return peer;
15450 }
15451
15452 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
15453 {
15454 tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
15455 if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
15456 u32 reg;
15457
15458 /* All devices that use the alternate
15459 * ASIC REV location have a CPMU.
15460 */
15461 tg3_flag_set(tp, CPMU_PRESENT);
15462
15463 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15464 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
15465 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15466 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15467 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
15468 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
15469 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
15470 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727)
15471 reg = TG3PCI_GEN2_PRODID_ASICREV;
15472 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
15473 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
15474 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
15475 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
15476 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
15477 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
15478 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
15479 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
15480 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
15481 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15482 reg = TG3PCI_GEN15_PRODID_ASICREV;
15483 else
15484 reg = TG3PCI_PRODID_ASICREV;
15485
15486 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
15487 }
15488
15489 /* Wrong chip ID in 5752 A0. This code can be removed later
15490 * as A0 is not in production.
15491 */
15492 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
15493 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
15494
15495 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
15496 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
15497
15498 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15499 tg3_asic_rev(tp) == ASIC_REV_5719 ||
15500 tg3_asic_rev(tp) == ASIC_REV_5720)
15501 tg3_flag_set(tp, 5717_PLUS);
15502
15503 if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
15504 tg3_asic_rev(tp) == ASIC_REV_57766)
15505 tg3_flag_set(tp, 57765_CLASS);
15506
15507 if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
15508 tg3_asic_rev(tp) == ASIC_REV_5762)
15509 tg3_flag_set(tp, 57765_PLUS);
15510
15511 /* Intentionally exclude ASIC_REV_5906 */
15512 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
15513 tg3_asic_rev(tp) == ASIC_REV_5787 ||
15514 tg3_asic_rev(tp) == ASIC_REV_5784 ||
15515 tg3_asic_rev(tp) == ASIC_REV_5761 ||
15516 tg3_asic_rev(tp) == ASIC_REV_5785 ||
15517 tg3_asic_rev(tp) == ASIC_REV_57780 ||
15518 tg3_flag(tp, 57765_PLUS))
15519 tg3_flag_set(tp, 5755_PLUS);
15520
15521 if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
15522 tg3_asic_rev(tp) == ASIC_REV_5714)
15523 tg3_flag_set(tp, 5780_CLASS);
15524
15525 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
15526 tg3_asic_rev(tp) == ASIC_REV_5752 ||
15527 tg3_asic_rev(tp) == ASIC_REV_5906 ||
15528 tg3_flag(tp, 5755_PLUS) ||
15529 tg3_flag(tp, 5780_CLASS))
15530 tg3_flag_set(tp, 5750_PLUS);
15531
15532 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
15533 tg3_flag(tp, 5750_PLUS))
15534 tg3_flag_set(tp, 5705_PLUS);
15535 }
15536
15537 static bool tg3_10_100_only_device(struct tg3 *tp,
15538 const struct pci_device_id *ent)
15539 {
15540 u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
15541
15542 if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
15543 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
15544 (tp->phy_flags & TG3_PHYFLG_IS_FET))
15545 return true;
15546
15547 if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
15548 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
15549 if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
15550 return true;
15551 } else {
15552 return true;
15553 }
15554 }
15555
15556 return false;
15557 }
15558
15559 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
15560 {
15561 u32 misc_ctrl_reg;
15562 u32 pci_state_reg, grc_misc_cfg;
15563 u32 val;
15564 u16 pci_cmd;
15565 int err;
15566
15567 /* Force memory write invalidate off. If we leave it on,
15568 * then on 5700_BX chips we have to enable a workaround.
15569 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
15570 * to match the cacheline size. The Broadcom driver have this
15571 * workaround but turns MWI off all the times so never uses
15572 * it. This seems to suggest that the workaround is insufficient.
15573 */
15574 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15575 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
15576 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15577
15578 /* Important! -- Make sure register accesses are byteswapped
15579 * correctly. Also, for those chips that require it, make
15580 * sure that indirect register accesses are enabled before
15581 * the first operation.
15582 */
15583 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15584 &misc_ctrl_reg);
15585 tp->misc_host_ctrl |= (misc_ctrl_reg &
15586 MISC_HOST_CTRL_CHIPREV);
15587 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15588 tp->misc_host_ctrl);
15589
15590 tg3_detect_asic_rev(tp, misc_ctrl_reg);
15591
15592 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
15593 * we need to disable memory and use config. cycles
15594 * only to access all registers. The 5702/03 chips
15595 * can mistakenly decode the special cycles from the
15596 * ICH chipsets as memory write cycles, causing corruption
15597 * of register and memory space. Only certain ICH bridges
15598 * will drive special cycles with non-zero data during the
15599 * address phase which can fall within the 5703's address
15600 * range. This is not an ICH bug as the PCI spec allows
15601 * non-zero address during special cycles. However, only
15602 * these ICH bridges are known to drive non-zero addresses
15603 * during special cycles.
15604 *
15605 * Since special cycles do not cross PCI bridges, we only
15606 * enable this workaround if the 5703 is on the secondary
15607 * bus of these ICH bridges.
15608 */
15609 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
15610 (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
15611 static struct tg3_dev_id {
15612 u32 vendor;
15613 u32 device;
15614 u32 rev;
15615 } ich_chipsets[] = {
15616 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
15617 PCI_ANY_ID },
15618 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
15619 PCI_ANY_ID },
15620 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
15621 0xa },
15622 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
15623 PCI_ANY_ID },
15624 { },
15625 };
15626 struct tg3_dev_id *pci_id = &ich_chipsets[0];
15627 struct pci_dev *bridge = NULL;
15628
15629 while (pci_id->vendor != 0) {
15630 bridge = pci_get_device(pci_id->vendor, pci_id->device,
15631 bridge);
15632 if (!bridge) {
15633 pci_id++;
15634 continue;
15635 }
15636 if (pci_id->rev != PCI_ANY_ID) {
15637 if (bridge->revision > pci_id->rev)
15638 continue;
15639 }
15640 if (bridge->subordinate &&
15641 (bridge->subordinate->number ==
15642 tp->pdev->bus->number)) {
15643 tg3_flag_set(tp, ICH_WORKAROUND);
15644 pci_dev_put(bridge);
15645 break;
15646 }
15647 }
15648 }
15649
15650 if (tg3_asic_rev(tp) == ASIC_REV_5701) {
15651 static struct tg3_dev_id {
15652 u32 vendor;
15653 u32 device;
15654 } bridge_chipsets[] = {
15655 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
15656 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
15657 { },
15658 };
15659 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
15660 struct pci_dev *bridge = NULL;
15661
15662 while (pci_id->vendor != 0) {
15663 bridge = pci_get_device(pci_id->vendor,
15664 pci_id->device,
15665 bridge);
15666 if (!bridge) {
15667 pci_id++;
15668 continue;
15669 }
15670 if (bridge->subordinate &&
15671 (bridge->subordinate->number <=
15672 tp->pdev->bus->number) &&
15673 (bridge->subordinate->busn_res.end >=
15674 tp->pdev->bus->number)) {
15675 tg3_flag_set(tp, 5701_DMA_BUG);
15676 pci_dev_put(bridge);
15677 break;
15678 }
15679 }
15680 }
15681
15682 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
15683 * DMA addresses > 40-bit. This bridge may have other additional
15684 * 57xx devices behind it in some 4-port NIC designs for example.
15685 * Any tg3 device found behind the bridge will also need the 40-bit
15686 * DMA workaround.
15687 */
15688 if (tg3_flag(tp, 5780_CLASS)) {
15689 tg3_flag_set(tp, 40BIT_DMA_BUG);
15690 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
15691 } else {
15692 struct pci_dev *bridge = NULL;
15693
15694 do {
15695 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
15696 PCI_DEVICE_ID_SERVERWORKS_EPB,
15697 bridge);
15698 if (bridge && bridge->subordinate &&
15699 (bridge->subordinate->number <=
15700 tp->pdev->bus->number) &&
15701 (bridge->subordinate->busn_res.end >=
15702 tp->pdev->bus->number)) {
15703 tg3_flag_set(tp, 40BIT_DMA_BUG);
15704 pci_dev_put(bridge);
15705 break;
15706 }
15707 } while (bridge);
15708 }
15709
15710 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
15711 tg3_asic_rev(tp) == ASIC_REV_5714)
15712 tp->pdev_peer = tg3_find_peer(tp);
15713
15714 /* Determine TSO capabilities */
15715 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
15716 ; /* Do nothing. HW bug. */
15717 else if (tg3_flag(tp, 57765_PLUS))
15718 tg3_flag_set(tp, HW_TSO_3);
15719 else if (tg3_flag(tp, 5755_PLUS) ||
15720 tg3_asic_rev(tp) == ASIC_REV_5906)
15721 tg3_flag_set(tp, HW_TSO_2);
15722 else if (tg3_flag(tp, 5750_PLUS)) {
15723 tg3_flag_set(tp, HW_TSO_1);
15724 tg3_flag_set(tp, TSO_BUG);
15725 if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
15726 tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
15727 tg3_flag_clear(tp, TSO_BUG);
15728 } else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15729 tg3_asic_rev(tp) != ASIC_REV_5701 &&
15730 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
15731 tg3_flag_set(tp, FW_TSO);
15732 tg3_flag_set(tp, TSO_BUG);
15733 if (tg3_asic_rev(tp) == ASIC_REV_5705)
15734 tp->fw_needed = FIRMWARE_TG3TSO5;
15735 else
15736 tp->fw_needed = FIRMWARE_TG3TSO;
15737 }
15738
15739 /* Selectively allow TSO based on operating conditions */
15740 if (tg3_flag(tp, HW_TSO_1) ||
15741 tg3_flag(tp, HW_TSO_2) ||
15742 tg3_flag(tp, HW_TSO_3) ||
15743 tg3_flag(tp, FW_TSO)) {
15744 /* For firmware TSO, assume ASF is disabled.
15745 * We'll disable TSO later if we discover ASF
15746 * is enabled in tg3_get_eeprom_hw_cfg().
15747 */
15748 tg3_flag_set(tp, TSO_CAPABLE);
15749 } else {
15750 tg3_flag_clear(tp, TSO_CAPABLE);
15751 tg3_flag_clear(tp, TSO_BUG);
15752 tp->fw_needed = NULL;
15753 }
15754
15755 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
15756 tp->fw_needed = FIRMWARE_TG3;
15757
15758 if (tg3_asic_rev(tp) == ASIC_REV_57766)
15759 tp->fw_needed = FIRMWARE_TG357766;
15760
15761 tp->irq_max = 1;
15762
15763 if (tg3_flag(tp, 5750_PLUS)) {
15764 tg3_flag_set(tp, SUPPORT_MSI);
15765 if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
15766 tg3_chip_rev(tp) == CHIPREV_5750_BX ||
15767 (tg3_asic_rev(tp) == ASIC_REV_5714 &&
15768 tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
15769 tp->pdev_peer == tp->pdev))
15770 tg3_flag_clear(tp, SUPPORT_MSI);
15771
15772 if (tg3_flag(tp, 5755_PLUS) ||
15773 tg3_asic_rev(tp) == ASIC_REV_5906) {
15774 tg3_flag_set(tp, 1SHOT_MSI);
15775 }
15776
15777 if (tg3_flag(tp, 57765_PLUS)) {
15778 tg3_flag_set(tp, SUPPORT_MSIX);
15779 tp->irq_max = TG3_IRQ_MAX_VECS;
15780 }
15781 }
15782
15783 tp->txq_max = 1;
15784 tp->rxq_max = 1;
15785 if (tp->irq_max > 1) {
15786 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
15787 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
15788
15789 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15790 tg3_asic_rev(tp) == ASIC_REV_5720)
15791 tp->txq_max = tp->irq_max - 1;
15792 }
15793
15794 if (tg3_flag(tp, 5755_PLUS) ||
15795 tg3_asic_rev(tp) == ASIC_REV_5906)
15796 tg3_flag_set(tp, SHORT_DMA_BUG);
15797
15798 if (tg3_asic_rev(tp) == ASIC_REV_5719)
15799 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
15800
15801 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15802 tg3_asic_rev(tp) == ASIC_REV_5719 ||
15803 tg3_asic_rev(tp) == ASIC_REV_5720 ||
15804 tg3_asic_rev(tp) == ASIC_REV_5762)
15805 tg3_flag_set(tp, LRG_PROD_RING_CAP);
15806
15807 if (tg3_flag(tp, 57765_PLUS) &&
15808 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
15809 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
15810
15811 if (!tg3_flag(tp, 5705_PLUS) ||
15812 tg3_flag(tp, 5780_CLASS) ||
15813 tg3_flag(tp, USE_JUMBO_BDFLAG))
15814 tg3_flag_set(tp, JUMBO_CAPABLE);
15815
15816 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
15817 &pci_state_reg);
15818
15819 if (pci_is_pcie(tp->pdev)) {
15820 u16 lnkctl;
15821
15822 tg3_flag_set(tp, PCI_EXPRESS);
15823
15824 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
15825 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
15826 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15827 tg3_flag_clear(tp, HW_TSO_2);
15828 tg3_flag_clear(tp, TSO_CAPABLE);
15829 }
15830 if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
15831 tg3_asic_rev(tp) == ASIC_REV_5761 ||
15832 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
15833 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
15834 tg3_flag_set(tp, CLKREQ_BUG);
15835 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
15836 tg3_flag_set(tp, L1PLLPD_EN);
15837 }
15838 } else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
15839 /* BCM5785 devices are effectively PCIe devices, and should
15840 * follow PCIe codepaths, but do not have a PCIe capabilities
15841 * section.
15842 */
15843 tg3_flag_set(tp, PCI_EXPRESS);
15844 } else if (!tg3_flag(tp, 5705_PLUS) ||
15845 tg3_flag(tp, 5780_CLASS)) {
15846 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
15847 if (!tp->pcix_cap) {
15848 dev_err(&tp->pdev->dev,
15849 "Cannot find PCI-X capability, aborting\n");
15850 return -EIO;
15851 }
15852
15853 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
15854 tg3_flag_set(tp, PCIX_MODE);
15855 }
15856
15857 /* If we have an AMD 762 or VIA K8T800 chipset, write
15858 * reordering to the mailbox registers done by the host
15859 * controller can cause major troubles. We read back from
15860 * every mailbox register write to force the writes to be
15861 * posted to the chip in order.
15862 */
15863 if (pci_dev_present(tg3_write_reorder_chipsets) &&
15864 !tg3_flag(tp, PCI_EXPRESS))
15865 tg3_flag_set(tp, MBOX_WRITE_REORDER);
15866
15867 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
15868 &tp->pci_cacheline_sz);
15869 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
15870 &tp->pci_lat_timer);
15871 if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
15872 tp->pci_lat_timer < 64) {
15873 tp->pci_lat_timer = 64;
15874 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
15875 tp->pci_lat_timer);
15876 }
15877
15878 /* Important! -- It is critical that the PCI-X hw workaround
15879 * situation is decided before the first MMIO register access.
15880 */
15881 if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
15882 /* 5700 BX chips need to have their TX producer index
15883 * mailboxes written twice to workaround a bug.
15884 */
15885 tg3_flag_set(tp, TXD_MBOX_HWBUG);
15886
15887 /* If we are in PCI-X mode, enable register write workaround.
15888 *
15889 * The workaround is to use indirect register accesses
15890 * for all chip writes not to mailbox registers.
15891 */
15892 if (tg3_flag(tp, PCIX_MODE)) {
15893 u32 pm_reg;
15894
15895 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
15896
15897 /* The chip can have it's power management PCI config
15898 * space registers clobbered due to this bug.
15899 * So explicitly force the chip into D0 here.
15900 */
15901 pci_read_config_dword(tp->pdev,
15902 tp->pm_cap + PCI_PM_CTRL,
15903 &pm_reg);
15904 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
15905 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
15906 pci_write_config_dword(tp->pdev,
15907 tp->pm_cap + PCI_PM_CTRL,
15908 pm_reg);
15909
15910 /* Also, force SERR#/PERR# in PCI command. */
15911 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15912 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
15913 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15914 }
15915 }
15916
15917 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
15918 tg3_flag_set(tp, PCI_HIGH_SPEED);
15919 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
15920 tg3_flag_set(tp, PCI_32BIT);
15921
15922 /* Chip-specific fixup from Broadcom driver */
15923 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
15924 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
15925 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
15926 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
15927 }
15928
15929 /* Default fast path register access methods */
15930 tp->read32 = tg3_read32;
15931 tp->write32 = tg3_write32;
15932 tp->read32_mbox = tg3_read32;
15933 tp->write32_mbox = tg3_write32;
15934 tp->write32_tx_mbox = tg3_write32;
15935 tp->write32_rx_mbox = tg3_write32;
15936
15937 /* Various workaround register access methods */
15938 if (tg3_flag(tp, PCIX_TARGET_HWBUG))
15939 tp->write32 = tg3_write_indirect_reg32;
15940 else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
15941 (tg3_flag(tp, PCI_EXPRESS) &&
15942 tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
15943 /*
15944 * Back to back register writes can cause problems on these
15945 * chips, the workaround is to read back all reg writes
15946 * except those to mailbox regs.
15947 *
15948 * See tg3_write_indirect_reg32().
15949 */
15950 tp->write32 = tg3_write_flush_reg32;
15951 }
15952
15953 if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
15954 tp->write32_tx_mbox = tg3_write32_tx_mbox;
15955 if (tg3_flag(tp, MBOX_WRITE_REORDER))
15956 tp->write32_rx_mbox = tg3_write_flush_reg32;
15957 }
15958
15959 if (tg3_flag(tp, ICH_WORKAROUND)) {
15960 tp->read32 = tg3_read_indirect_reg32;
15961 tp->write32 = tg3_write_indirect_reg32;
15962 tp->read32_mbox = tg3_read_indirect_mbox;
15963 tp->write32_mbox = tg3_write_indirect_mbox;
15964 tp->write32_tx_mbox = tg3_write_indirect_mbox;
15965 tp->write32_rx_mbox = tg3_write_indirect_mbox;
15966
15967 iounmap(tp->regs);
15968 tp->regs = NULL;
15969
15970 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15971 pci_cmd &= ~PCI_COMMAND_MEMORY;
15972 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15973 }
15974 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15975 tp->read32_mbox = tg3_read32_mbox_5906;
15976 tp->write32_mbox = tg3_write32_mbox_5906;
15977 tp->write32_tx_mbox = tg3_write32_mbox_5906;
15978 tp->write32_rx_mbox = tg3_write32_mbox_5906;
15979 }
15980
15981 if (tp->write32 == tg3_write_indirect_reg32 ||
15982 (tg3_flag(tp, PCIX_MODE) &&
15983 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15984 tg3_asic_rev(tp) == ASIC_REV_5701)))
15985 tg3_flag_set(tp, SRAM_USE_CONFIG);
15986
15987 /* The memory arbiter has to be enabled in order for SRAM accesses
15988 * to succeed. Normally on powerup the tg3 chip firmware will make
15989 * sure it is enabled, but other entities such as system netboot
15990 * code might disable it.
15991 */
15992 val = tr32(MEMARB_MODE);
15993 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
15994
15995 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
15996 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
15997 tg3_flag(tp, 5780_CLASS)) {
15998 if (tg3_flag(tp, PCIX_MODE)) {
15999 pci_read_config_dword(tp->pdev,
16000 tp->pcix_cap + PCI_X_STATUS,
16001 &val);
16002 tp->pci_fn = val & 0x7;
16003 }
16004 } else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16005 tg3_asic_rev(tp) == ASIC_REV_5719 ||
16006 tg3_asic_rev(tp) == ASIC_REV_5720) {
16007 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
16008 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
16009 val = tr32(TG3_CPMU_STATUS);
16010
16011 if (tg3_asic_rev(tp) == ASIC_REV_5717)
16012 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
16013 else
16014 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
16015 TG3_CPMU_STATUS_FSHFT_5719;
16016 }
16017
16018 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
16019 tp->write32_tx_mbox = tg3_write_flush_reg32;
16020 tp->write32_rx_mbox = tg3_write_flush_reg32;
16021 }
16022
16023 /* Get eeprom hw config before calling tg3_set_power_state().
16024 * In particular, the TG3_FLAG_IS_NIC flag must be
16025 * determined before calling tg3_set_power_state() so that
16026 * we know whether or not to switch out of Vaux power.
16027 * When the flag is set, it means that GPIO1 is used for eeprom
16028 * write protect and also implies that it is a LOM where GPIOs
16029 * are not used to switch power.
16030 */
16031 tg3_get_eeprom_hw_cfg(tp);
16032
16033 if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
16034 tg3_flag_clear(tp, TSO_CAPABLE);
16035 tg3_flag_clear(tp, TSO_BUG);
16036 tp->fw_needed = NULL;
16037 }
16038
16039 if (tg3_flag(tp, ENABLE_APE)) {
16040 /* Allow reads and writes to the
16041 * APE register and memory space.
16042 */
16043 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
16044 PCISTATE_ALLOW_APE_SHMEM_WR |
16045 PCISTATE_ALLOW_APE_PSPACE_WR;
16046 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
16047 pci_state_reg);
16048
16049 tg3_ape_lock_init(tp);
16050 }
16051
16052 /* Set up tp->grc_local_ctrl before calling
16053 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
16054 * will bring 5700's external PHY out of reset.
16055 * It is also used as eeprom write protect on LOMs.
16056 */
16057 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
16058 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16059 tg3_flag(tp, EEPROM_WRITE_PROT))
16060 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
16061 GRC_LCLCTRL_GPIO_OUTPUT1);
16062 /* Unused GPIO3 must be driven as output on 5752 because there
16063 * are no pull-up resistors on unused GPIO pins.
16064 */
16065 else if (tg3_asic_rev(tp) == ASIC_REV_5752)
16066 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
16067
16068 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16069 tg3_asic_rev(tp) == ASIC_REV_57780 ||
16070 tg3_flag(tp, 57765_CLASS))
16071 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16072
16073 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16074 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
16075 /* Turn off the debug UART. */
16076 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16077 if (tg3_flag(tp, IS_NIC))
16078 /* Keep VMain power. */
16079 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
16080 GRC_LCLCTRL_GPIO_OUTPUT0;
16081 }
16082
16083 if (tg3_asic_rev(tp) == ASIC_REV_5762)
16084 tp->grc_local_ctrl |=
16085 tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
16086
16087 /* Switch out of Vaux if it is a NIC */
16088 tg3_pwrsrc_switch_to_vmain(tp);
16089
16090 /* Derive initial jumbo mode from MTU assigned in
16091 * ether_setup() via the alloc_etherdev() call
16092 */
16093 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
16094 tg3_flag_set(tp, JUMBO_RING_ENABLE);
16095
16096 /* Determine WakeOnLan speed to use. */
16097 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16098 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16099 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16100 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
16101 tg3_flag_clear(tp, WOL_SPEED_100MB);
16102 } else {
16103 tg3_flag_set(tp, WOL_SPEED_100MB);
16104 }
16105
16106 if (tg3_asic_rev(tp) == ASIC_REV_5906)
16107 tp->phy_flags |= TG3_PHYFLG_IS_FET;
16108
16109 /* A few boards don't want Ethernet@WireSpeed phy feature */
16110 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16111 (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16112 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
16113 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
16114 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
16115 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
16116 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
16117
16118 if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
16119 tg3_chip_rev(tp) == CHIPREV_5704_AX)
16120 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
16121 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
16122 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
16123
16124 if (tg3_flag(tp, 5705_PLUS) &&
16125 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
16126 tg3_asic_rev(tp) != ASIC_REV_5785 &&
16127 tg3_asic_rev(tp) != ASIC_REV_57780 &&
16128 !tg3_flag(tp, 57765_PLUS)) {
16129 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16130 tg3_asic_rev(tp) == ASIC_REV_5787 ||
16131 tg3_asic_rev(tp) == ASIC_REV_5784 ||
16132 tg3_asic_rev(tp) == ASIC_REV_5761) {
16133 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
16134 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
16135 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
16136 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
16137 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
16138 } else
16139 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
16140 }
16141
16142 if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
16143 tg3_chip_rev(tp) != CHIPREV_5784_AX) {
16144 tp->phy_otp = tg3_read_otp_phycfg(tp);
16145 if (tp->phy_otp == 0)
16146 tp->phy_otp = TG3_OTP_DEFAULT;
16147 }
16148
16149 if (tg3_flag(tp, CPMU_PRESENT))
16150 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
16151 else
16152 tp->mi_mode = MAC_MI_MODE_BASE;
16153
16154 tp->coalesce_mode = 0;
16155 if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
16156 tg3_chip_rev(tp) != CHIPREV_5700_BX)
16157 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
16158
16159 /* Set these bits to enable statistics workaround. */
16160 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16161 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
16162 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
16163 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
16164 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
16165 }
16166
16167 if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
16168 tg3_asic_rev(tp) == ASIC_REV_57780)
16169 tg3_flag_set(tp, USE_PHYLIB);
16170
16171 err = tg3_mdio_init(tp);
16172 if (err)
16173 return err;
16174
16175 /* Initialize data/descriptor byte/word swapping. */
16176 val = tr32(GRC_MODE);
16177 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
16178 tg3_asic_rev(tp) == ASIC_REV_5762)
16179 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
16180 GRC_MODE_WORD_SWAP_B2HRX_DATA |
16181 GRC_MODE_B2HRX_ENABLE |
16182 GRC_MODE_HTX2B_ENABLE |
16183 GRC_MODE_HOST_STACKUP);
16184 else
16185 val &= GRC_MODE_HOST_STACKUP;
16186
16187 tw32(GRC_MODE, val | tp->grc_mode);
16188
16189 tg3_switch_clocks(tp);
16190
16191 /* Clear this out for sanity. */
16192 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
16193
16194 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16195 &pci_state_reg);
16196 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
16197 !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
16198 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16199 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16200 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
16201 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
16202 void __iomem *sram_base;
16203
16204 /* Write some dummy words into the SRAM status block
16205 * area, see if it reads back correctly. If the return
16206 * value is bad, force enable the PCIX workaround.
16207 */
16208 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
16209
16210 writel(0x00000000, sram_base);
16211 writel(0x00000000, sram_base + 4);
16212 writel(0xffffffff, sram_base + 4);
16213 if (readl(sram_base) != 0x00000000)
16214 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16215 }
16216 }
16217
16218 udelay(50);
16219 tg3_nvram_init(tp);
16220
16221 /* If the device has an NVRAM, no need to load patch firmware */
16222 if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
16223 !tg3_flag(tp, NO_NVRAM))
16224 tp->fw_needed = NULL;
16225
16226 grc_misc_cfg = tr32(GRC_MISC_CFG);
16227 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
16228
16229 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16230 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
16231 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
16232 tg3_flag_set(tp, IS_5788);
16233
16234 if (!tg3_flag(tp, IS_5788) &&
16235 tg3_asic_rev(tp) != ASIC_REV_5700)
16236 tg3_flag_set(tp, TAGGED_STATUS);
16237 if (tg3_flag(tp, TAGGED_STATUS)) {
16238 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
16239 HOSTCC_MODE_CLRTICK_TXBD);
16240
16241 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
16242 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16243 tp->misc_host_ctrl);
16244 }
16245
16246 /* Preserve the APE MAC_MODE bits */
16247 if (tg3_flag(tp, ENABLE_APE))
16248 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
16249 else
16250 tp->mac_mode = 0;
16251
16252 if (tg3_10_100_only_device(tp, ent))
16253 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
16254
16255 err = tg3_phy_probe(tp);
16256 if (err) {
16257 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
16258 /* ... but do not return immediately ... */
16259 tg3_mdio_fini(tp);
16260 }
16261
16262 tg3_read_vpd(tp);
16263 tg3_read_fw_ver(tp);
16264
16265 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
16266 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16267 } else {
16268 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16269 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16270 else
16271 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16272 }
16273
16274 /* 5700 {AX,BX} chips have a broken status block link
16275 * change bit implementation, so we must use the
16276 * status register in those cases.
16277 */
16278 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16279 tg3_flag_set(tp, USE_LINKCHG_REG);
16280 else
16281 tg3_flag_clear(tp, USE_LINKCHG_REG);
16282
16283 /* The led_ctrl is set during tg3_phy_probe, here we might
16284 * have to force the link status polling mechanism based
16285 * upon subsystem IDs.
16286 */
16287 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
16288 tg3_asic_rev(tp) == ASIC_REV_5701 &&
16289 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
16290 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16291 tg3_flag_set(tp, USE_LINKCHG_REG);
16292 }
16293
16294 /* For all SERDES we poll the MAC status register. */
16295 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
16296 tg3_flag_set(tp, POLL_SERDES);
16297 else
16298 tg3_flag_clear(tp, POLL_SERDES);
16299
16300 tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
16301 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
16302 if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
16303 tg3_flag(tp, PCIX_MODE)) {
16304 tp->rx_offset = NET_SKB_PAD;
16305 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
16306 tp->rx_copy_thresh = ~(u16)0;
16307 #endif
16308 }
16309
16310 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
16311 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
16312 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
16313
16314 tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
16315
16316 /* Increment the rx prod index on the rx std ring by at most
16317 * 8 for these chips to workaround hw errata.
16318 */
16319 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16320 tg3_asic_rev(tp) == ASIC_REV_5752 ||
16321 tg3_asic_rev(tp) == ASIC_REV_5755)
16322 tp->rx_std_max_post = 8;
16323
16324 if (tg3_flag(tp, ASPM_WORKAROUND))
16325 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
16326 PCIE_PWR_MGMT_L1_THRESH_MSK;
16327
16328 return err;
16329 }
16330
16331 #ifdef CONFIG_SPARC
16332 static int tg3_get_macaddr_sparc(struct tg3 *tp)
16333 {
16334 struct net_device *dev = tp->dev;
16335 struct pci_dev *pdev = tp->pdev;
16336 struct device_node *dp = pci_device_to_OF_node(pdev);
16337 const unsigned char *addr;
16338 int len;
16339
16340 addr = of_get_property(dp, "local-mac-address", &len);
16341 if (addr && len == 6) {
16342 memcpy(dev->dev_addr, addr, 6);
16343 return 0;
16344 }
16345 return -ENODEV;
16346 }
16347
16348 static int tg3_get_default_macaddr_sparc(struct tg3 *tp)
16349 {
16350 struct net_device *dev = tp->dev;
16351
16352 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
16353 return 0;
16354 }
16355 #endif
16356
16357 static int tg3_get_device_address(struct tg3 *tp)
16358 {
16359 struct net_device *dev = tp->dev;
16360 u32 hi, lo, mac_offset;
16361 int addr_ok = 0;
16362 int err;
16363
16364 #ifdef CONFIG_SPARC
16365 if (!tg3_get_macaddr_sparc(tp))
16366 return 0;
16367 #endif
16368
16369 if (tg3_flag(tp, IS_SSB_CORE)) {
16370 err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]);
16371 if (!err && is_valid_ether_addr(&dev->dev_addr[0]))
16372 return 0;
16373 }
16374
16375 mac_offset = 0x7c;
16376 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16377 tg3_flag(tp, 5780_CLASS)) {
16378 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
16379 mac_offset = 0xcc;
16380 if (tg3_nvram_lock(tp))
16381 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
16382 else
16383 tg3_nvram_unlock(tp);
16384 } else if (tg3_flag(tp, 5717_PLUS)) {
16385 if (tp->pci_fn & 1)
16386 mac_offset = 0xcc;
16387 if (tp->pci_fn > 1)
16388 mac_offset += 0x18c;
16389 } else if (tg3_asic_rev(tp) == ASIC_REV_5906)
16390 mac_offset = 0x10;
16391
16392 /* First try to get it from MAC address mailbox. */
16393 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
16394 if ((hi >> 16) == 0x484b) {
16395 dev->dev_addr[0] = (hi >> 8) & 0xff;
16396 dev->dev_addr[1] = (hi >> 0) & 0xff;
16397
16398 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
16399 dev->dev_addr[2] = (lo >> 24) & 0xff;
16400 dev->dev_addr[3] = (lo >> 16) & 0xff;
16401 dev->dev_addr[4] = (lo >> 8) & 0xff;
16402 dev->dev_addr[5] = (lo >> 0) & 0xff;
16403
16404 /* Some old bootcode may report a 0 MAC address in SRAM */
16405 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
16406 }
16407 if (!addr_ok) {
16408 /* Next, try NVRAM. */
16409 if (!tg3_flag(tp, NO_NVRAM) &&
16410 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
16411 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
16412 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
16413 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
16414 }
16415 /* Finally just fetch it out of the MAC control regs. */
16416 else {
16417 hi = tr32(MAC_ADDR_0_HIGH);
16418 lo = tr32(MAC_ADDR_0_LOW);
16419
16420 dev->dev_addr[5] = lo & 0xff;
16421 dev->dev_addr[4] = (lo >> 8) & 0xff;
16422 dev->dev_addr[3] = (lo >> 16) & 0xff;
16423 dev->dev_addr[2] = (lo >> 24) & 0xff;
16424 dev->dev_addr[1] = hi & 0xff;
16425 dev->dev_addr[0] = (hi >> 8) & 0xff;
16426 }
16427 }
16428
16429 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
16430 #ifdef CONFIG_SPARC
16431 if (!tg3_get_default_macaddr_sparc(tp))
16432 return 0;
16433 #endif
16434 return -EINVAL;
16435 }
16436 return 0;
16437 }
16438
16439 #define BOUNDARY_SINGLE_CACHELINE 1
16440 #define BOUNDARY_MULTI_CACHELINE 2
16441
16442 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
16443 {
16444 int cacheline_size;
16445 u8 byte;
16446 int goal;
16447
16448 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
16449 if (byte == 0)
16450 cacheline_size = 1024;
16451 else
16452 cacheline_size = (int) byte * 4;
16453
16454 /* On 5703 and later chips, the boundary bits have no
16455 * effect.
16456 */
16457 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16458 tg3_asic_rev(tp) != ASIC_REV_5701 &&
16459 !tg3_flag(tp, PCI_EXPRESS))
16460 goto out;
16461
16462 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
16463 goal = BOUNDARY_MULTI_CACHELINE;
16464 #else
16465 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
16466 goal = BOUNDARY_SINGLE_CACHELINE;
16467 #else
16468 goal = 0;
16469 #endif
16470 #endif
16471
16472 if (tg3_flag(tp, 57765_PLUS)) {
16473 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
16474 goto out;
16475 }
16476
16477 if (!goal)
16478 goto out;
16479
16480 /* PCI controllers on most RISC systems tend to disconnect
16481 * when a device tries to burst across a cache-line boundary.
16482 * Therefore, letting tg3 do so just wastes PCI bandwidth.
16483 *
16484 * Unfortunately, for PCI-E there are only limited
16485 * write-side controls for this, and thus for reads
16486 * we will still get the disconnects. We'll also waste
16487 * these PCI cycles for both read and write for chips
16488 * other than 5700 and 5701 which do not implement the
16489 * boundary bits.
16490 */
16491 if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
16492 switch (cacheline_size) {
16493 case 16:
16494 case 32:
16495 case 64:
16496 case 128:
16497 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16498 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
16499 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
16500 } else {
16501 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
16502 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
16503 }
16504 break;
16505
16506 case 256:
16507 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
16508 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
16509 break;
16510
16511 default:
16512 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
16513 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
16514 break;
16515 }
16516 } else if (tg3_flag(tp, PCI_EXPRESS)) {
16517 switch (cacheline_size) {
16518 case 16:
16519 case 32:
16520 case 64:
16521 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16522 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
16523 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
16524 break;
16525 }
16526 /* fallthrough */
16527 case 128:
16528 default:
16529 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
16530 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
16531 break;
16532 }
16533 } else {
16534 switch (cacheline_size) {
16535 case 16:
16536 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16537 val |= (DMA_RWCTRL_READ_BNDRY_16 |
16538 DMA_RWCTRL_WRITE_BNDRY_16);
16539 break;
16540 }
16541 /* fallthrough */
16542 case 32:
16543 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16544 val |= (DMA_RWCTRL_READ_BNDRY_32 |
16545 DMA_RWCTRL_WRITE_BNDRY_32);
16546 break;
16547 }
16548 /* fallthrough */
16549 case 64:
16550 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16551 val |= (DMA_RWCTRL_READ_BNDRY_64 |
16552 DMA_RWCTRL_WRITE_BNDRY_64);
16553 break;
16554 }
16555 /* fallthrough */
16556 case 128:
16557 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16558 val |= (DMA_RWCTRL_READ_BNDRY_128 |
16559 DMA_RWCTRL_WRITE_BNDRY_128);
16560 break;
16561 }
16562 /* fallthrough */
16563 case 256:
16564 val |= (DMA_RWCTRL_READ_BNDRY_256 |
16565 DMA_RWCTRL_WRITE_BNDRY_256);
16566 break;
16567 case 512:
16568 val |= (DMA_RWCTRL_READ_BNDRY_512 |
16569 DMA_RWCTRL_WRITE_BNDRY_512);
16570 break;
16571 case 1024:
16572 default:
16573 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
16574 DMA_RWCTRL_WRITE_BNDRY_1024);
16575 break;
16576 }
16577 }
16578
16579 out:
16580 return val;
16581 }
16582
16583 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
16584 int size, bool to_device)
16585 {
16586 struct tg3_internal_buffer_desc test_desc;
16587 u32 sram_dma_descs;
16588 int i, ret;
16589
16590 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
16591
16592 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
16593 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
16594 tw32(RDMAC_STATUS, 0);
16595 tw32(WDMAC_STATUS, 0);
16596
16597 tw32(BUFMGR_MODE, 0);
16598 tw32(FTQ_RESET, 0);
16599
16600 test_desc.addr_hi = ((u64) buf_dma) >> 32;
16601 test_desc.addr_lo = buf_dma & 0xffffffff;
16602 test_desc.nic_mbuf = 0x00002100;
16603 test_desc.len = size;
16604
16605 /*
16606 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
16607 * the *second* time the tg3 driver was getting loaded after an
16608 * initial scan.
16609 *
16610 * Broadcom tells me:
16611 * ...the DMA engine is connected to the GRC block and a DMA
16612 * reset may affect the GRC block in some unpredictable way...
16613 * The behavior of resets to individual blocks has not been tested.
16614 *
16615 * Broadcom noted the GRC reset will also reset all sub-components.
16616 */
16617 if (to_device) {
16618 test_desc.cqid_sqid = (13 << 8) | 2;
16619
16620 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
16621 udelay(40);
16622 } else {
16623 test_desc.cqid_sqid = (16 << 8) | 7;
16624
16625 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
16626 udelay(40);
16627 }
16628 test_desc.flags = 0x00000005;
16629
16630 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
16631 u32 val;
16632
16633 val = *(((u32 *)&test_desc) + i);
16634 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
16635 sram_dma_descs + (i * sizeof(u32)));
16636 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
16637 }
16638 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
16639
16640 if (to_device)
16641 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
16642 else
16643 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
16644
16645 ret = -ENODEV;
16646 for (i = 0; i < 40; i++) {
16647 u32 val;
16648
16649 if (to_device)
16650 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
16651 else
16652 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
16653 if ((val & 0xffff) == sram_dma_descs) {
16654 ret = 0;
16655 break;
16656 }
16657
16658 udelay(100);
16659 }
16660
16661 return ret;
16662 }
16663
16664 #define TEST_BUFFER_SIZE 0x2000
16665
16666 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
16667 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
16668 { },
16669 };
16670
16671 static int tg3_test_dma(struct tg3 *tp)
16672 {
16673 dma_addr_t buf_dma;
16674 u32 *buf, saved_dma_rwctrl;
16675 int ret = 0;
16676
16677 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
16678 &buf_dma, GFP_KERNEL);
16679 if (!buf) {
16680 ret = -ENOMEM;
16681 goto out_nofree;
16682 }
16683
16684 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
16685 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
16686
16687 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
16688
16689 if (tg3_flag(tp, 57765_PLUS))
16690 goto out;
16691
16692 if (tg3_flag(tp, PCI_EXPRESS)) {
16693 /* DMA read watermark not used on PCIE */
16694 tp->dma_rwctrl |= 0x00180000;
16695 } else if (!tg3_flag(tp, PCIX_MODE)) {
16696 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
16697 tg3_asic_rev(tp) == ASIC_REV_5750)
16698 tp->dma_rwctrl |= 0x003f0000;
16699 else
16700 tp->dma_rwctrl |= 0x003f000f;
16701 } else {
16702 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
16703 tg3_asic_rev(tp) == ASIC_REV_5704) {
16704 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
16705 u32 read_water = 0x7;
16706
16707 /* If the 5704 is behind the EPB bridge, we can
16708 * do the less restrictive ONE_DMA workaround for
16709 * better performance.
16710 */
16711 if (tg3_flag(tp, 40BIT_DMA_BUG) &&
16712 tg3_asic_rev(tp) == ASIC_REV_5704)
16713 tp->dma_rwctrl |= 0x8000;
16714 else if (ccval == 0x6 || ccval == 0x7)
16715 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
16716
16717 if (tg3_asic_rev(tp) == ASIC_REV_5703)
16718 read_water = 4;
16719 /* Set bit 23 to enable PCIX hw bug fix */
16720 tp->dma_rwctrl |=
16721 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
16722 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
16723 (1 << 23);
16724 } else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
16725 /* 5780 always in PCIX mode */
16726 tp->dma_rwctrl |= 0x00144000;
16727 } else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
16728 /* 5714 always in PCIX mode */
16729 tp->dma_rwctrl |= 0x00148000;
16730 } else {
16731 tp->dma_rwctrl |= 0x001b000f;
16732 }
16733 }
16734 if (tg3_flag(tp, ONE_DMA_AT_ONCE))
16735 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
16736
16737 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
16738 tg3_asic_rev(tp) == ASIC_REV_5704)
16739 tp->dma_rwctrl &= 0xfffffff0;
16740
16741 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16742 tg3_asic_rev(tp) == ASIC_REV_5701) {
16743 /* Remove this if it causes problems for some boards. */
16744 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
16745
16746 /* On 5700/5701 chips, we need to set this bit.
16747 * Otherwise the chip will issue cacheline transactions
16748 * to streamable DMA memory with not all the byte
16749 * enables turned on. This is an error on several
16750 * RISC PCI controllers, in particular sparc64.
16751 *
16752 * On 5703/5704 chips, this bit has been reassigned
16753 * a different meaning. In particular, it is used
16754 * on those chips to enable a PCI-X workaround.
16755 */
16756 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
16757 }
16758
16759 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16760
16761 #if 0
16762 /* Unneeded, already done by tg3_get_invariants. */
16763 tg3_switch_clocks(tp);
16764 #endif
16765
16766 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16767 tg3_asic_rev(tp) != ASIC_REV_5701)
16768 goto out;
16769
16770 /* It is best to perform DMA test with maximum write burst size
16771 * to expose the 5700/5701 write DMA bug.
16772 */
16773 saved_dma_rwctrl = tp->dma_rwctrl;
16774 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16775 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16776
16777 while (1) {
16778 u32 *p = buf, i;
16779
16780 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
16781 p[i] = i;
16782
16783 /* Send the buffer to the chip. */
16784 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true);
16785 if (ret) {
16786 dev_err(&tp->pdev->dev,
16787 "%s: Buffer write failed. err = %d\n",
16788 __func__, ret);
16789 break;
16790 }
16791
16792 #if 0
16793 /* validate data reached card RAM correctly. */
16794 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
16795 u32 val;
16796 tg3_read_mem(tp, 0x2100 + (i*4), &val);
16797 if (le32_to_cpu(val) != p[i]) {
16798 dev_err(&tp->pdev->dev,
16799 "%s: Buffer corrupted on device! "
16800 "(%d != %d)\n", __func__, val, i);
16801 /* ret = -ENODEV here? */
16802 }
16803 p[i] = 0;
16804 }
16805 #endif
16806 /* Now read it back. */
16807 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false);
16808 if (ret) {
16809 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
16810 "err = %d\n", __func__, ret);
16811 break;
16812 }
16813
16814 /* Verify it. */
16815 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
16816 if (p[i] == i)
16817 continue;
16818
16819 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
16820 DMA_RWCTRL_WRITE_BNDRY_16) {
16821 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16822 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
16823 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16824 break;
16825 } else {
16826 dev_err(&tp->pdev->dev,
16827 "%s: Buffer corrupted on read back! "
16828 "(%d != %d)\n", __func__, p[i], i);
16829 ret = -ENODEV;
16830 goto out;
16831 }
16832 }
16833
16834 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
16835 /* Success. */
16836 ret = 0;
16837 break;
16838 }
16839 }
16840 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
16841 DMA_RWCTRL_WRITE_BNDRY_16) {
16842 /* DMA test passed without adjusting DMA boundary,
16843 * now look for chipsets that are known to expose the
16844 * DMA bug without failing the test.
16845 */
16846 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
16847 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16848 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
16849 } else {
16850 /* Safe to use the calculated DMA boundary. */
16851 tp->dma_rwctrl = saved_dma_rwctrl;
16852 }
16853
16854 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16855 }
16856
16857 out:
16858 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
16859 out_nofree:
16860 return ret;
16861 }
16862
16863 static void tg3_init_bufmgr_config(struct tg3 *tp)
16864 {
16865 if (tg3_flag(tp, 57765_PLUS)) {
16866 tp->bufmgr_config.mbuf_read_dma_low_water =
16867 DEFAULT_MB_RDMA_LOW_WATER_5705;
16868 tp->bufmgr_config.mbuf_mac_rx_low_water =
16869 DEFAULT_MB_MACRX_LOW_WATER_57765;
16870 tp->bufmgr_config.mbuf_high_water =
16871 DEFAULT_MB_HIGH_WATER_57765;
16872
16873 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16874 DEFAULT_MB_RDMA_LOW_WATER_5705;
16875 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16876 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
16877 tp->bufmgr_config.mbuf_high_water_jumbo =
16878 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
16879 } else if (tg3_flag(tp, 5705_PLUS)) {
16880 tp->bufmgr_config.mbuf_read_dma_low_water =
16881 DEFAULT_MB_RDMA_LOW_WATER_5705;
16882 tp->bufmgr_config.mbuf_mac_rx_low_water =
16883 DEFAULT_MB_MACRX_LOW_WATER_5705;
16884 tp->bufmgr_config.mbuf_high_water =
16885 DEFAULT_MB_HIGH_WATER_5705;
16886 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16887 tp->bufmgr_config.mbuf_mac_rx_low_water =
16888 DEFAULT_MB_MACRX_LOW_WATER_5906;
16889 tp->bufmgr_config.mbuf_high_water =
16890 DEFAULT_MB_HIGH_WATER_5906;
16891 }
16892
16893 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16894 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
16895 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16896 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
16897 tp->bufmgr_config.mbuf_high_water_jumbo =
16898 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
16899 } else {
16900 tp->bufmgr_config.mbuf_read_dma_low_water =
16901 DEFAULT_MB_RDMA_LOW_WATER;
16902 tp->bufmgr_config.mbuf_mac_rx_low_water =
16903 DEFAULT_MB_MACRX_LOW_WATER;
16904 tp->bufmgr_config.mbuf_high_water =
16905 DEFAULT_MB_HIGH_WATER;
16906
16907 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16908 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
16909 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16910 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
16911 tp->bufmgr_config.mbuf_high_water_jumbo =
16912 DEFAULT_MB_HIGH_WATER_JUMBO;
16913 }
16914
16915 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
16916 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
16917 }
16918
16919 static char *tg3_phy_string(struct tg3 *tp)
16920 {
16921 switch (tp->phy_id & TG3_PHY_ID_MASK) {
16922 case TG3_PHY_ID_BCM5400: return "5400";
16923 case TG3_PHY_ID_BCM5401: return "5401";
16924 case TG3_PHY_ID_BCM5411: return "5411";
16925 case TG3_PHY_ID_BCM5701: return "5701";
16926 case TG3_PHY_ID_BCM5703: return "5703";
16927 case TG3_PHY_ID_BCM5704: return "5704";
16928 case TG3_PHY_ID_BCM5705: return "5705";
16929 case TG3_PHY_ID_BCM5750: return "5750";
16930 case TG3_PHY_ID_BCM5752: return "5752";
16931 case TG3_PHY_ID_BCM5714: return "5714";
16932 case TG3_PHY_ID_BCM5780: return "5780";
16933 case TG3_PHY_ID_BCM5755: return "5755";
16934 case TG3_PHY_ID_BCM5787: return "5787";
16935 case TG3_PHY_ID_BCM5784: return "5784";
16936 case TG3_PHY_ID_BCM5756: return "5722/5756";
16937 case TG3_PHY_ID_BCM5906: return "5906";
16938 case TG3_PHY_ID_BCM5761: return "5761";
16939 case TG3_PHY_ID_BCM5718C: return "5718C";
16940 case TG3_PHY_ID_BCM5718S: return "5718S";
16941 case TG3_PHY_ID_BCM57765: return "57765";
16942 case TG3_PHY_ID_BCM5719C: return "5719C";
16943 case TG3_PHY_ID_BCM5720C: return "5720C";
16944 case TG3_PHY_ID_BCM5762: return "5762C";
16945 case TG3_PHY_ID_BCM8002: return "8002/serdes";
16946 case 0: return "serdes";
16947 default: return "unknown";
16948 }
16949 }
16950
16951 static char *tg3_bus_string(struct tg3 *tp, char *str)
16952 {
16953 if (tg3_flag(tp, PCI_EXPRESS)) {
16954 strcpy(str, "PCI Express");
16955 return str;
16956 } else if (tg3_flag(tp, PCIX_MODE)) {
16957 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
16958
16959 strcpy(str, "PCIX:");
16960
16961 if ((clock_ctrl == 7) ||
16962 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
16963 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
16964 strcat(str, "133MHz");
16965 else if (clock_ctrl == 0)
16966 strcat(str, "33MHz");
16967 else if (clock_ctrl == 2)
16968 strcat(str, "50MHz");
16969 else if (clock_ctrl == 4)
16970 strcat(str, "66MHz");
16971 else if (clock_ctrl == 6)
16972 strcat(str, "100MHz");
16973 } else {
16974 strcpy(str, "PCI:");
16975 if (tg3_flag(tp, PCI_HIGH_SPEED))
16976 strcat(str, "66MHz");
16977 else
16978 strcat(str, "33MHz");
16979 }
16980 if (tg3_flag(tp, PCI_32BIT))
16981 strcat(str, ":32-bit");
16982 else
16983 strcat(str, ":64-bit");
16984 return str;
16985 }
16986
16987 static void tg3_init_coal(struct tg3 *tp)
16988 {
16989 struct ethtool_coalesce *ec = &tp->coal;
16990
16991 memset(ec, 0, sizeof(*ec));
16992 ec->cmd = ETHTOOL_GCOALESCE;
16993 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
16994 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
16995 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
16996 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
16997 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
16998 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
16999 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
17000 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
17001 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
17002
17003 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
17004 HOSTCC_MODE_CLRTICK_TXBD)) {
17005 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
17006 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
17007 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
17008 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
17009 }
17010
17011 if (tg3_flag(tp, 5705_PLUS)) {
17012 ec->rx_coalesce_usecs_irq = 0;
17013 ec->tx_coalesce_usecs_irq = 0;
17014 ec->stats_block_coalesce_usecs = 0;
17015 }
17016 }
17017
17018 static int tg3_init_one(struct pci_dev *pdev,
17019 const struct pci_device_id *ent)
17020 {
17021 struct net_device *dev;
17022 struct tg3 *tp;
17023 int i, err, pm_cap;
17024 u32 sndmbx, rcvmbx, intmbx;
17025 char str[40];
17026 u64 dma_mask, persist_dma_mask;
17027 netdev_features_t features = 0;
17028
17029 printk_once(KERN_INFO "%s\n", version);
17030
17031 err = pci_enable_device(pdev);
17032 if (err) {
17033 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
17034 return err;
17035 }
17036
17037 err = pci_request_regions(pdev, DRV_MODULE_NAME);
17038 if (err) {
17039 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
17040 goto err_out_disable_pdev;
17041 }
17042
17043 pci_set_master(pdev);
17044
17045 /* Find power-management capability. */
17046 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
17047 if (pm_cap == 0) {
17048 dev_err(&pdev->dev,
17049 "Cannot find Power Management capability, aborting\n");
17050 err = -EIO;
17051 goto err_out_free_res;
17052 }
17053
17054 err = pci_set_power_state(pdev, PCI_D0);
17055 if (err) {
17056 dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
17057 goto err_out_free_res;
17058 }
17059
17060 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
17061 if (!dev) {
17062 err = -ENOMEM;
17063 goto err_out_power_down;
17064 }
17065
17066 SET_NETDEV_DEV(dev, &pdev->dev);
17067
17068 tp = netdev_priv(dev);
17069 tp->pdev = pdev;
17070 tp->dev = dev;
17071 tp->pm_cap = pm_cap;
17072 tp->rx_mode = TG3_DEF_RX_MODE;
17073 tp->tx_mode = TG3_DEF_TX_MODE;
17074 tp->irq_sync = 1;
17075
17076 if (tg3_debug > 0)
17077 tp->msg_enable = tg3_debug;
17078 else
17079 tp->msg_enable = TG3_DEF_MSG_ENABLE;
17080
17081 if (pdev_is_ssb_gige_core(pdev)) {
17082 tg3_flag_set(tp, IS_SSB_CORE);
17083 if (ssb_gige_must_flush_posted_writes(pdev))
17084 tg3_flag_set(tp, FLUSH_POSTED_WRITES);
17085 if (ssb_gige_one_dma_at_once(pdev))
17086 tg3_flag_set(tp, ONE_DMA_AT_ONCE);
17087 if (ssb_gige_have_roboswitch(pdev))
17088 tg3_flag_set(tp, ROBOSWITCH);
17089 if (ssb_gige_is_rgmii(pdev))
17090 tg3_flag_set(tp, RGMII_MODE);
17091 }
17092
17093 /* The word/byte swap controls here control register access byte
17094 * swapping. DMA data byte swapping is controlled in the GRC_MODE
17095 * setting below.
17096 */
17097 tp->misc_host_ctrl =
17098 MISC_HOST_CTRL_MASK_PCI_INT |
17099 MISC_HOST_CTRL_WORD_SWAP |
17100 MISC_HOST_CTRL_INDIR_ACCESS |
17101 MISC_HOST_CTRL_PCISTATE_RW;
17102
17103 /* The NONFRM (non-frame) byte/word swap controls take effect
17104 * on descriptor entries, anything which isn't packet data.
17105 *
17106 * The StrongARM chips on the board (one for tx, one for rx)
17107 * are running in big-endian mode.
17108 */
17109 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
17110 GRC_MODE_WSWAP_NONFRM_DATA);
17111 #ifdef __BIG_ENDIAN
17112 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
17113 #endif
17114 spin_lock_init(&tp->lock);
17115 spin_lock_init(&tp->indirect_lock);
17116 INIT_WORK(&tp->reset_task, tg3_reset_task);
17117
17118 tp->regs = pci_ioremap_bar(pdev, BAR_0);
17119 if (!tp->regs) {
17120 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
17121 err = -ENOMEM;
17122 goto err_out_free_dev;
17123 }
17124
17125 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
17126 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
17127 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
17128 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
17129 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
17130 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
17131 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
17132 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
17133 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
17134 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
17135 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
17136 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727) {
17137 tg3_flag_set(tp, ENABLE_APE);
17138 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
17139 if (!tp->aperegs) {
17140 dev_err(&pdev->dev,
17141 "Cannot map APE registers, aborting\n");
17142 err = -ENOMEM;
17143 goto err_out_iounmap;
17144 }
17145 }
17146
17147 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
17148 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
17149
17150 dev->ethtool_ops = &tg3_ethtool_ops;
17151 dev->watchdog_timeo = TG3_TX_TIMEOUT;
17152 dev->netdev_ops = &tg3_netdev_ops;
17153 dev->irq = pdev->irq;
17154
17155 err = tg3_get_invariants(tp, ent);
17156 if (err) {
17157 dev_err(&pdev->dev,
17158 "Problem fetching invariants of chip, aborting\n");
17159 goto err_out_apeunmap;
17160 }
17161
17162 /* The EPB bridge inside 5714, 5715, and 5780 and any
17163 * device behind the EPB cannot support DMA addresses > 40-bit.
17164 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
17165 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
17166 * do DMA address check in tg3_start_xmit().
17167 */
17168 if (tg3_flag(tp, IS_5788))
17169 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
17170 else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
17171 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
17172 #ifdef CONFIG_HIGHMEM
17173 dma_mask = DMA_BIT_MASK(64);
17174 #endif
17175 } else
17176 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
17177
17178 /* Configure DMA attributes. */
17179 if (dma_mask > DMA_BIT_MASK(32)) {
17180 err = pci_set_dma_mask(pdev, dma_mask);
17181 if (!err) {
17182 features |= NETIF_F_HIGHDMA;
17183 err = pci_set_consistent_dma_mask(pdev,
17184 persist_dma_mask);
17185 if (err < 0) {
17186 dev_err(&pdev->dev, "Unable to obtain 64 bit "
17187 "DMA for consistent allocations\n");
17188 goto err_out_apeunmap;
17189 }
17190 }
17191 }
17192 if (err || dma_mask == DMA_BIT_MASK(32)) {
17193 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
17194 if (err) {
17195 dev_err(&pdev->dev,
17196 "No usable DMA configuration, aborting\n");
17197 goto err_out_apeunmap;
17198 }
17199 }
17200
17201 tg3_init_bufmgr_config(tp);
17202
17203 features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
17204
17205 /* 5700 B0 chips do not support checksumming correctly due
17206 * to hardware bugs.
17207 */
17208 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
17209 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
17210
17211 if (tg3_flag(tp, 5755_PLUS))
17212 features |= NETIF_F_IPV6_CSUM;
17213 }
17214
17215 /* TSO is on by default on chips that support hardware TSO.
17216 * Firmware TSO on older chips gives lower performance, so it
17217 * is off by default, but can be enabled using ethtool.
17218 */
17219 if ((tg3_flag(tp, HW_TSO_1) ||
17220 tg3_flag(tp, HW_TSO_2) ||
17221 tg3_flag(tp, HW_TSO_3)) &&
17222 (features & NETIF_F_IP_CSUM))
17223 features |= NETIF_F_TSO;
17224 if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
17225 if (features & NETIF_F_IPV6_CSUM)
17226 features |= NETIF_F_TSO6;
17227 if (tg3_flag(tp, HW_TSO_3) ||
17228 tg3_asic_rev(tp) == ASIC_REV_5761 ||
17229 (tg3_asic_rev(tp) == ASIC_REV_5784 &&
17230 tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
17231 tg3_asic_rev(tp) == ASIC_REV_5785 ||
17232 tg3_asic_rev(tp) == ASIC_REV_57780)
17233 features |= NETIF_F_TSO_ECN;
17234 }
17235
17236 dev->features |= features;
17237 dev->vlan_features |= features;
17238
17239 /*
17240 * Add loopback capability only for a subset of devices that support
17241 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
17242 * loopback for the remaining devices.
17243 */
17244 if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
17245 !tg3_flag(tp, CPMU_PRESENT))
17246 /* Add the loopback capability */
17247 features |= NETIF_F_LOOPBACK;
17248
17249 dev->hw_features |= features;
17250
17251 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
17252 !tg3_flag(tp, TSO_CAPABLE) &&
17253 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
17254 tg3_flag_set(tp, MAX_RXPEND_64);
17255 tp->rx_pending = 63;
17256 }
17257
17258 err = tg3_get_device_address(tp);
17259 if (err) {
17260 dev_err(&pdev->dev,
17261 "Could not obtain valid ethernet address, aborting\n");
17262 goto err_out_apeunmap;
17263 }
17264
17265 /*
17266 * Reset chip in case UNDI or EFI driver did not shutdown
17267 * DMA self test will enable WDMAC and we'll see (spurious)
17268 * pending DMA on the PCI bus at that point.
17269 */
17270 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
17271 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
17272 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
17273 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17274 }
17275
17276 err = tg3_test_dma(tp);
17277 if (err) {
17278 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
17279 goto err_out_apeunmap;
17280 }
17281
17282 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
17283 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
17284 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
17285 for (i = 0; i < tp->irq_max; i++) {
17286 struct tg3_napi *tnapi = &tp->napi[i];
17287
17288 tnapi->tp = tp;
17289 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
17290
17291 tnapi->int_mbox = intmbx;
17292 if (i <= 4)
17293 intmbx += 0x8;
17294 else
17295 intmbx += 0x4;
17296
17297 tnapi->consmbox = rcvmbx;
17298 tnapi->prodmbox = sndmbx;
17299
17300 if (i)
17301 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
17302 else
17303 tnapi->coal_now = HOSTCC_MODE_NOW;
17304
17305 if (!tg3_flag(tp, SUPPORT_MSIX))
17306 break;
17307
17308 /*
17309 * If we support MSIX, we'll be using RSS. If we're using
17310 * RSS, the first vector only handles link interrupts and the
17311 * remaining vectors handle rx and tx interrupts. Reuse the
17312 * mailbox values for the next iteration. The values we setup
17313 * above are still useful for the single vectored mode.
17314 */
17315 if (!i)
17316 continue;
17317
17318 rcvmbx += 0x8;
17319
17320 if (sndmbx & 0x4)
17321 sndmbx -= 0x4;
17322 else
17323 sndmbx += 0xc;
17324 }
17325
17326 tg3_init_coal(tp);
17327
17328 pci_set_drvdata(pdev, dev);
17329
17330 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
17331 tg3_asic_rev(tp) == ASIC_REV_5720 ||
17332 tg3_asic_rev(tp) == ASIC_REV_5762)
17333 tg3_flag_set(tp, PTP_CAPABLE);
17334
17335 if (tg3_flag(tp, 5717_PLUS)) {
17336 /* Resume a low-power mode */
17337 tg3_frob_aux_power(tp, false);
17338 }
17339
17340 tg3_timer_init(tp);
17341
17342 tg3_carrier_off(tp);
17343
17344 err = register_netdev(dev);
17345 if (err) {
17346 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
17347 goto err_out_apeunmap;
17348 }
17349
17350 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17351 tp->board_part_number,
17352 tg3_chip_rev_id(tp),
17353 tg3_bus_string(tp, str),
17354 dev->dev_addr);
17355
17356 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
17357 struct phy_device *phydev;
17358 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
17359 netdev_info(dev,
17360 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
17361 phydev->drv->name, dev_name(&phydev->dev));
17362 } else {
17363 char *ethtype;
17364
17365 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
17366 ethtype = "10/100Base-TX";
17367 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
17368 ethtype = "1000Base-SX";
17369 else
17370 ethtype = "10/100/1000Base-T";
17371
17372 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
17373 "(WireSpeed[%d], EEE[%d])\n",
17374 tg3_phy_string(tp), ethtype,
17375 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
17376 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
17377 }
17378
17379 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
17380 (dev->features & NETIF_F_RXCSUM) != 0,
17381 tg3_flag(tp, USE_LINKCHG_REG) != 0,
17382 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
17383 tg3_flag(tp, ENABLE_ASF) != 0,
17384 tg3_flag(tp, TSO_CAPABLE) != 0);
17385 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
17386 tp->dma_rwctrl,
17387 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
17388 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
17389
17390 pci_save_state(pdev);
17391
17392 return 0;
17393
17394 err_out_apeunmap:
17395 if (tp->aperegs) {
17396 iounmap(tp->aperegs);
17397 tp->aperegs = NULL;
17398 }
17399
17400 err_out_iounmap:
17401 if (tp->regs) {
17402 iounmap(tp->regs);
17403 tp->regs = NULL;
17404 }
17405
17406 err_out_free_dev:
17407 free_netdev(dev);
17408
17409 err_out_power_down:
17410 pci_set_power_state(pdev, PCI_D3hot);
17411
17412 err_out_free_res:
17413 pci_release_regions(pdev);
17414
17415 err_out_disable_pdev:
17416 pci_disable_device(pdev);
17417 pci_set_drvdata(pdev, NULL);
17418 return err;
17419 }
17420
17421 static void tg3_remove_one(struct pci_dev *pdev)
17422 {
17423 struct net_device *dev = pci_get_drvdata(pdev);
17424
17425 if (dev) {
17426 struct tg3 *tp = netdev_priv(dev);
17427
17428 release_firmware(tp->fw);
17429
17430 tg3_reset_task_cancel(tp);
17431
17432 if (tg3_flag(tp, USE_PHYLIB)) {
17433 tg3_phy_fini(tp);
17434 tg3_mdio_fini(tp);
17435 }
17436
17437 unregister_netdev(dev);
17438 if (tp->aperegs) {
17439 iounmap(tp->aperegs);
17440 tp->aperegs = NULL;
17441 }
17442 if (tp->regs) {
17443 iounmap(tp->regs);
17444 tp->regs = NULL;
17445 }
17446 free_netdev(dev);
17447 pci_release_regions(pdev);
17448 pci_disable_device(pdev);
17449 pci_set_drvdata(pdev, NULL);
17450 }
17451 }
17452
17453 #ifdef CONFIG_PM_SLEEP
17454 static int tg3_suspend(struct device *device)
17455 {
17456 struct pci_dev *pdev = to_pci_dev(device);
17457 struct net_device *dev = pci_get_drvdata(pdev);
17458 struct tg3 *tp = netdev_priv(dev);
17459 int err;
17460
17461 if (!netif_running(dev))
17462 return 0;
17463
17464 tg3_reset_task_cancel(tp);
17465 tg3_phy_stop(tp);
17466 tg3_netif_stop(tp);
17467
17468 tg3_timer_stop(tp);
17469
17470 tg3_full_lock(tp, 1);
17471 tg3_disable_ints(tp);
17472 tg3_full_unlock(tp);
17473
17474 netif_device_detach(dev);
17475
17476 tg3_full_lock(tp, 0);
17477 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17478 tg3_flag_clear(tp, INIT_COMPLETE);
17479 tg3_full_unlock(tp);
17480
17481 err = tg3_power_down_prepare(tp);
17482 if (err) {
17483 int err2;
17484
17485 tg3_full_lock(tp, 0);
17486
17487 tg3_flag_set(tp, INIT_COMPLETE);
17488 err2 = tg3_restart_hw(tp, true);
17489 if (err2)
17490 goto out;
17491
17492 tg3_timer_start(tp);
17493
17494 netif_device_attach(dev);
17495 tg3_netif_start(tp);
17496
17497 out:
17498 tg3_full_unlock(tp);
17499
17500 if (!err2)
17501 tg3_phy_start(tp);
17502 }
17503
17504 return err;
17505 }
17506
17507 static int tg3_resume(struct device *device)
17508 {
17509 struct pci_dev *pdev = to_pci_dev(device);
17510 struct net_device *dev = pci_get_drvdata(pdev);
17511 struct tg3 *tp = netdev_priv(dev);
17512 int err;
17513
17514 if (!netif_running(dev))
17515 return 0;
17516
17517 netif_device_attach(dev);
17518
17519 tg3_full_lock(tp, 0);
17520
17521 tg3_flag_set(tp, INIT_COMPLETE);
17522 err = tg3_restart_hw(tp,
17523 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN));
17524 if (err)
17525 goto out;
17526
17527 tg3_timer_start(tp);
17528
17529 tg3_netif_start(tp);
17530
17531 out:
17532 tg3_full_unlock(tp);
17533
17534 if (!err)
17535 tg3_phy_start(tp);
17536
17537 return err;
17538 }
17539 #endif /* CONFIG_PM_SLEEP */
17540
17541 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
17542
17543 /**
17544 * tg3_io_error_detected - called when PCI error is detected
17545 * @pdev: Pointer to PCI device
17546 * @state: The current pci connection state
17547 *
17548 * This function is called after a PCI bus error affecting
17549 * this device has been detected.
17550 */
17551 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
17552 pci_channel_state_t state)
17553 {
17554 struct net_device *netdev = pci_get_drvdata(pdev);
17555 struct tg3 *tp = netdev_priv(netdev);
17556 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
17557
17558 netdev_info(netdev, "PCI I/O error detected\n");
17559
17560 rtnl_lock();
17561
17562 if (!netif_running(netdev))
17563 goto done;
17564
17565 tg3_phy_stop(tp);
17566
17567 tg3_netif_stop(tp);
17568
17569 tg3_timer_stop(tp);
17570
17571 /* Want to make sure that the reset task doesn't run */
17572 tg3_reset_task_cancel(tp);
17573
17574 netif_device_detach(netdev);
17575
17576 /* Clean up software state, even if MMIO is blocked */
17577 tg3_full_lock(tp, 0);
17578 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
17579 tg3_full_unlock(tp);
17580
17581 done:
17582 if (state == pci_channel_io_perm_failure)
17583 err = PCI_ERS_RESULT_DISCONNECT;
17584 else
17585 pci_disable_device(pdev);
17586
17587 rtnl_unlock();
17588
17589 return err;
17590 }
17591
17592 /**
17593 * tg3_io_slot_reset - called after the pci bus has been reset.
17594 * @pdev: Pointer to PCI device
17595 *
17596 * Restart the card from scratch, as if from a cold-boot.
17597 * At this point, the card has exprienced a hard reset,
17598 * followed by fixups by BIOS, and has its config space
17599 * set up identically to what it was at cold boot.
17600 */
17601 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
17602 {
17603 struct net_device *netdev = pci_get_drvdata(pdev);
17604 struct tg3 *tp = netdev_priv(netdev);
17605 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
17606 int err;
17607
17608 rtnl_lock();
17609
17610 if (pci_enable_device(pdev)) {
17611 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
17612 goto done;
17613 }
17614
17615 pci_set_master(pdev);
17616 pci_restore_state(pdev);
17617 pci_save_state(pdev);
17618
17619 if (!netif_running(netdev)) {
17620 rc = PCI_ERS_RESULT_RECOVERED;
17621 goto done;
17622 }
17623
17624 err = tg3_power_up(tp);
17625 if (err)
17626 goto done;
17627
17628 rc = PCI_ERS_RESULT_RECOVERED;
17629
17630 done:
17631 rtnl_unlock();
17632
17633 return rc;
17634 }
17635
17636 /**
17637 * tg3_io_resume - called when traffic can start flowing again.
17638 * @pdev: Pointer to PCI device
17639 *
17640 * This callback is called when the error recovery driver tells
17641 * us that its OK to resume normal operation.
17642 */
17643 static void tg3_io_resume(struct pci_dev *pdev)
17644 {
17645 struct net_device *netdev = pci_get_drvdata(pdev);
17646 struct tg3 *tp = netdev_priv(netdev);
17647 int err;
17648
17649 rtnl_lock();
17650
17651 if (!netif_running(netdev))
17652 goto done;
17653
17654 tg3_full_lock(tp, 0);
17655 tg3_flag_set(tp, INIT_COMPLETE);
17656 err = tg3_restart_hw(tp, true);
17657 if (err) {
17658 tg3_full_unlock(tp);
17659 netdev_err(netdev, "Cannot restart hardware after reset.\n");
17660 goto done;
17661 }
17662
17663 netif_device_attach(netdev);
17664
17665 tg3_timer_start(tp);
17666
17667 tg3_netif_start(tp);
17668
17669 tg3_full_unlock(tp);
17670
17671 tg3_phy_start(tp);
17672
17673 done:
17674 rtnl_unlock();
17675 }
17676
17677 static const struct pci_error_handlers tg3_err_handler = {
17678 .error_detected = tg3_io_error_detected,
17679 .slot_reset = tg3_io_slot_reset,
17680 .resume = tg3_io_resume
17681 };
17682
17683 static struct pci_driver tg3_driver = {
17684 .name = DRV_MODULE_NAME,
17685 .id_table = tg3_pci_tbl,
17686 .probe = tg3_init_one,
17687 .remove = tg3_remove_one,
17688 .err_handler = &tg3_err_handler,
17689 .driver.pm = &tg3_pm_ops,
17690 };
17691
17692 static int __init tg3_init(void)
17693 {
17694 return pci_register_driver(&tg3_driver);
17695 }
17696
17697 static void __exit tg3_cleanup(void)
17698 {
17699 pci_unregister_driver(&tg3_driver);
17700 }
17701
17702 module_init(tg3_init);
17703 module_exit(tg3_cleanup);