]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - drivers/net/ethernet/broadcom/tg3.c
Merge tag 'arm64-stable' of git://git.kernel.org/pub/scm/linux/kernel/git/cmarinas...
[mirror_ubuntu-zesty-kernel.git] / drivers / net / ethernet / broadcom / tg3.c
1 /*
2 * tg3.c: Broadcom Tigon3 ethernet driver.
3 *
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2013 Broadcom Corporation.
8 *
9 * Firmware is:
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
12 *
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
16 */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47 #include <linux/ssb/ssb_driver_gige.h>
48 #include <linux/hwmon.h>
49 #include <linux/hwmon-sysfs.h>
50
51 #include <net/checksum.h>
52 #include <net/ip.h>
53
54 #include <linux/io.h>
55 #include <asm/byteorder.h>
56 #include <linux/uaccess.h>
57
58 #include <uapi/linux/net_tstamp.h>
59 #include <linux/ptp_clock_kernel.h>
60
61 #ifdef CONFIG_SPARC
62 #include <asm/idprom.h>
63 #include <asm/prom.h>
64 #endif
65
66 #define BAR_0 0
67 #define BAR_2 2
68
69 #include "tg3.h"
70
71 /* Functions & macros to verify TG3_FLAGS types */
72
73 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
74 {
75 return test_bit(flag, bits);
76 }
77
78 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
79 {
80 set_bit(flag, bits);
81 }
82
83 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
84 {
85 clear_bit(flag, bits);
86 }
87
88 #define tg3_flag(tp, flag) \
89 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
90 #define tg3_flag_set(tp, flag) \
91 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
92 #define tg3_flag_clear(tp, flag) \
93 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
94
95 #define DRV_MODULE_NAME "tg3"
96 #define TG3_MAJ_NUM 3
97 #define TG3_MIN_NUM 132
98 #define DRV_MODULE_VERSION \
99 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
100 #define DRV_MODULE_RELDATE "May 21, 2013"
101
102 #define RESET_KIND_SHUTDOWN 0
103 #define RESET_KIND_INIT 1
104 #define RESET_KIND_SUSPEND 2
105
106 #define TG3_DEF_RX_MODE 0
107 #define TG3_DEF_TX_MODE 0
108 #define TG3_DEF_MSG_ENABLE \
109 (NETIF_MSG_DRV | \
110 NETIF_MSG_PROBE | \
111 NETIF_MSG_LINK | \
112 NETIF_MSG_TIMER | \
113 NETIF_MSG_IFDOWN | \
114 NETIF_MSG_IFUP | \
115 NETIF_MSG_RX_ERR | \
116 NETIF_MSG_TX_ERR)
117
118 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100
119
120 /* length of time before we decide the hardware is borked,
121 * and dev->tx_timeout() should be called to fix the problem
122 */
123
124 #define TG3_TX_TIMEOUT (5 * HZ)
125
126 /* hardware minimum and maximum for a single frame's data payload */
127 #define TG3_MIN_MTU 60
128 #define TG3_MAX_MTU(tp) \
129 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
130
131 /* These numbers seem to be hard coded in the NIC firmware somehow.
132 * You can't change the ring sizes, but you can change where you place
133 * them in the NIC onboard memory.
134 */
135 #define TG3_RX_STD_RING_SIZE(tp) \
136 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
137 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
138 #define TG3_DEF_RX_RING_PENDING 200
139 #define TG3_RX_JMB_RING_SIZE(tp) \
140 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
141 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
142 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
143
144 /* Do not place this n-ring entries value into the tp struct itself,
145 * we really want to expose these constants to GCC so that modulo et
146 * al. operations are done with shifts and masks instead of with
147 * hw multiply/modulo instructions. Another solution would be to
148 * replace things like '% foo' with '& (foo - 1)'.
149 */
150
151 #define TG3_TX_RING_SIZE 512
152 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
153
154 #define TG3_RX_STD_RING_BYTES(tp) \
155 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
156 #define TG3_RX_JMB_RING_BYTES(tp) \
157 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
158 #define TG3_RX_RCB_RING_BYTES(tp) \
159 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
160 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
161 TG3_TX_RING_SIZE)
162 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
163
164 #define TG3_DMA_BYTE_ENAB 64
165
166 #define TG3_RX_STD_DMA_SZ 1536
167 #define TG3_RX_JMB_DMA_SZ 9046
168
169 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
170
171 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
172 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
173
174 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
175 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
176
177 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
178 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
179
180 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
181 * that are at least dword aligned when used in PCIX mode. The driver
182 * works around this bug by double copying the packet. This workaround
183 * is built into the normal double copy length check for efficiency.
184 *
185 * However, the double copy is only necessary on those architectures
186 * where unaligned memory accesses are inefficient. For those architectures
187 * where unaligned memory accesses incur little penalty, we can reintegrate
188 * the 5701 in the normal rx path. Doing so saves a device structure
189 * dereference by hardcoding the double copy threshold in place.
190 */
191 #define TG3_RX_COPY_THRESHOLD 256
192 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
193 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
194 #else
195 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
196 #endif
197
198 #if (NET_IP_ALIGN != 0)
199 #define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
200 #else
201 #define TG3_RX_OFFSET(tp) (NET_SKB_PAD)
202 #endif
203
204 /* minimum number of free TX descriptors required to wake up TX process */
205 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
206 #define TG3_TX_BD_DMA_MAX_2K 2048
207 #define TG3_TX_BD_DMA_MAX_4K 4096
208
209 #define TG3_RAW_IP_ALIGN 2
210
211 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
212 #define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
213
214 #define FIRMWARE_TG3 "tigon/tg3.bin"
215 #define FIRMWARE_TG357766 "tigon/tg357766.bin"
216 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
217 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
218
219 static char version[] =
220 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
221
222 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
223 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
224 MODULE_LICENSE("GPL");
225 MODULE_VERSION(DRV_MODULE_VERSION);
226 MODULE_FIRMWARE(FIRMWARE_TG3);
227 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
228 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
229
230 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
231 module_param(tg3_debug, int, 0);
232 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
233
234 #define TG3_DRV_DATA_FLAG_10_100_ONLY 0x0001
235 #define TG3_DRV_DATA_FLAG_5705_10_100 0x0002
236
237 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
257 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
258 TG3_DRV_DATA_FLAG_5705_10_100},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
260 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
261 TG3_DRV_DATA_FLAG_5705_10_100},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
263 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
264 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
265 TG3_DRV_DATA_FLAG_5705_10_100},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
272 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
278 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
286 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
287 PCI_VENDOR_ID_LENOVO,
288 TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
289 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
290 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
291 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
292 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
293 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
294 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
295 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
297 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
298 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
299 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
300 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
301 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
302 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
303 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
304 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
305 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
306 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
307 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
308 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
309 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
310 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
311 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
312 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
313 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
314 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
315 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
316 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
317 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
318 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
319 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
320 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
321 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
322 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
323 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
324 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
325 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
326 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
327 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
328 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
329 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
330 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
331 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
332 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
333 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
334 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
335 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
336 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
337 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
338 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
339 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
340 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
341 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
342 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
343 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
344 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
345 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
346 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
347 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
348 {}
349 };
350
351 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
352
353 static const struct {
354 const char string[ETH_GSTRING_LEN];
355 } ethtool_stats_keys[] = {
356 { "rx_octets" },
357 { "rx_fragments" },
358 { "rx_ucast_packets" },
359 { "rx_mcast_packets" },
360 { "rx_bcast_packets" },
361 { "rx_fcs_errors" },
362 { "rx_align_errors" },
363 { "rx_xon_pause_rcvd" },
364 { "rx_xoff_pause_rcvd" },
365 { "rx_mac_ctrl_rcvd" },
366 { "rx_xoff_entered" },
367 { "rx_frame_too_long_errors" },
368 { "rx_jabbers" },
369 { "rx_undersize_packets" },
370 { "rx_in_length_errors" },
371 { "rx_out_length_errors" },
372 { "rx_64_or_less_octet_packets" },
373 { "rx_65_to_127_octet_packets" },
374 { "rx_128_to_255_octet_packets" },
375 { "rx_256_to_511_octet_packets" },
376 { "rx_512_to_1023_octet_packets" },
377 { "rx_1024_to_1522_octet_packets" },
378 { "rx_1523_to_2047_octet_packets" },
379 { "rx_2048_to_4095_octet_packets" },
380 { "rx_4096_to_8191_octet_packets" },
381 { "rx_8192_to_9022_octet_packets" },
382
383 { "tx_octets" },
384 { "tx_collisions" },
385
386 { "tx_xon_sent" },
387 { "tx_xoff_sent" },
388 { "tx_flow_control" },
389 { "tx_mac_errors" },
390 { "tx_single_collisions" },
391 { "tx_mult_collisions" },
392 { "tx_deferred" },
393 { "tx_excessive_collisions" },
394 { "tx_late_collisions" },
395 { "tx_collide_2times" },
396 { "tx_collide_3times" },
397 { "tx_collide_4times" },
398 { "tx_collide_5times" },
399 { "tx_collide_6times" },
400 { "tx_collide_7times" },
401 { "tx_collide_8times" },
402 { "tx_collide_9times" },
403 { "tx_collide_10times" },
404 { "tx_collide_11times" },
405 { "tx_collide_12times" },
406 { "tx_collide_13times" },
407 { "tx_collide_14times" },
408 { "tx_collide_15times" },
409 { "tx_ucast_packets" },
410 { "tx_mcast_packets" },
411 { "tx_bcast_packets" },
412 { "tx_carrier_sense_errors" },
413 { "tx_discards" },
414 { "tx_errors" },
415
416 { "dma_writeq_full" },
417 { "dma_write_prioq_full" },
418 { "rxbds_empty" },
419 { "rx_discards" },
420 { "rx_errors" },
421 { "rx_threshold_hit" },
422
423 { "dma_readq_full" },
424 { "dma_read_prioq_full" },
425 { "tx_comp_queue_full" },
426
427 { "ring_set_send_prod_index" },
428 { "ring_status_update" },
429 { "nic_irqs" },
430 { "nic_avoided_irqs" },
431 { "nic_tx_threshold_hit" },
432
433 { "mbuf_lwm_thresh_hit" },
434 };
435
436 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
437 #define TG3_NVRAM_TEST 0
438 #define TG3_LINK_TEST 1
439 #define TG3_REGISTER_TEST 2
440 #define TG3_MEMORY_TEST 3
441 #define TG3_MAC_LOOPB_TEST 4
442 #define TG3_PHY_LOOPB_TEST 5
443 #define TG3_EXT_LOOPB_TEST 6
444 #define TG3_INTERRUPT_TEST 7
445
446
447 static const struct {
448 const char string[ETH_GSTRING_LEN];
449 } ethtool_test_keys[] = {
450 [TG3_NVRAM_TEST] = { "nvram test (online) " },
451 [TG3_LINK_TEST] = { "link test (online) " },
452 [TG3_REGISTER_TEST] = { "register test (offline)" },
453 [TG3_MEMORY_TEST] = { "memory test (offline)" },
454 [TG3_MAC_LOOPB_TEST] = { "mac loopback test (offline)" },
455 [TG3_PHY_LOOPB_TEST] = { "phy loopback test (offline)" },
456 [TG3_EXT_LOOPB_TEST] = { "ext loopback test (offline)" },
457 [TG3_INTERRUPT_TEST] = { "interrupt test (offline)" },
458 };
459
460 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
461
462
463 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
464 {
465 writel(val, tp->regs + off);
466 }
467
468 static u32 tg3_read32(struct tg3 *tp, u32 off)
469 {
470 return readl(tp->regs + off);
471 }
472
473 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
474 {
475 writel(val, tp->aperegs + off);
476 }
477
478 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
479 {
480 return readl(tp->aperegs + off);
481 }
482
483 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
484 {
485 unsigned long flags;
486
487 spin_lock_irqsave(&tp->indirect_lock, flags);
488 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
489 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
490 spin_unlock_irqrestore(&tp->indirect_lock, flags);
491 }
492
493 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
494 {
495 writel(val, tp->regs + off);
496 readl(tp->regs + off);
497 }
498
499 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
500 {
501 unsigned long flags;
502 u32 val;
503
504 spin_lock_irqsave(&tp->indirect_lock, flags);
505 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
506 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
507 spin_unlock_irqrestore(&tp->indirect_lock, flags);
508 return val;
509 }
510
511 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
512 {
513 unsigned long flags;
514
515 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
516 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
517 TG3_64BIT_REG_LOW, val);
518 return;
519 }
520 if (off == TG3_RX_STD_PROD_IDX_REG) {
521 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
522 TG3_64BIT_REG_LOW, val);
523 return;
524 }
525
526 spin_lock_irqsave(&tp->indirect_lock, flags);
527 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
528 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
529 spin_unlock_irqrestore(&tp->indirect_lock, flags);
530
531 /* In indirect mode when disabling interrupts, we also need
532 * to clear the interrupt bit in the GRC local ctrl register.
533 */
534 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
535 (val == 0x1)) {
536 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
537 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
538 }
539 }
540
541 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
542 {
543 unsigned long flags;
544 u32 val;
545
546 spin_lock_irqsave(&tp->indirect_lock, flags);
547 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
548 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
549 spin_unlock_irqrestore(&tp->indirect_lock, flags);
550 return val;
551 }
552
553 /* usec_wait specifies the wait time in usec when writing to certain registers
554 * where it is unsafe to read back the register without some delay.
555 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
556 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
557 */
558 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
559 {
560 if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
561 /* Non-posted methods */
562 tp->write32(tp, off, val);
563 else {
564 /* Posted method */
565 tg3_write32(tp, off, val);
566 if (usec_wait)
567 udelay(usec_wait);
568 tp->read32(tp, off);
569 }
570 /* Wait again after the read for the posted method to guarantee that
571 * the wait time is met.
572 */
573 if (usec_wait)
574 udelay(usec_wait);
575 }
576
577 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
578 {
579 tp->write32_mbox(tp, off, val);
580 if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
581 (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
582 !tg3_flag(tp, ICH_WORKAROUND)))
583 tp->read32_mbox(tp, off);
584 }
585
586 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
587 {
588 void __iomem *mbox = tp->regs + off;
589 writel(val, mbox);
590 if (tg3_flag(tp, TXD_MBOX_HWBUG))
591 writel(val, mbox);
592 if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
593 tg3_flag(tp, FLUSH_POSTED_WRITES))
594 readl(mbox);
595 }
596
597 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
598 {
599 return readl(tp->regs + off + GRCMBOX_BASE);
600 }
601
602 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
603 {
604 writel(val, tp->regs + off + GRCMBOX_BASE);
605 }
606
607 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
608 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
609 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
610 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
611 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
612
613 #define tw32(reg, val) tp->write32(tp, reg, val)
614 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
615 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
616 #define tr32(reg) tp->read32(tp, reg)
617
618 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
619 {
620 unsigned long flags;
621
622 if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
623 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
624 return;
625
626 spin_lock_irqsave(&tp->indirect_lock, flags);
627 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
628 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
629 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
630
631 /* Always leave this as zero. */
632 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
633 } else {
634 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
635 tw32_f(TG3PCI_MEM_WIN_DATA, val);
636
637 /* Always leave this as zero. */
638 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
639 }
640 spin_unlock_irqrestore(&tp->indirect_lock, flags);
641 }
642
643 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
644 {
645 unsigned long flags;
646
647 if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
648 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
649 *val = 0;
650 return;
651 }
652
653 spin_lock_irqsave(&tp->indirect_lock, flags);
654 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
655 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
656 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
657
658 /* Always leave this as zero. */
659 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
660 } else {
661 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
662 *val = tr32(TG3PCI_MEM_WIN_DATA);
663
664 /* Always leave this as zero. */
665 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
666 }
667 spin_unlock_irqrestore(&tp->indirect_lock, flags);
668 }
669
670 static void tg3_ape_lock_init(struct tg3 *tp)
671 {
672 int i;
673 u32 regbase, bit;
674
675 if (tg3_asic_rev(tp) == ASIC_REV_5761)
676 regbase = TG3_APE_LOCK_GRANT;
677 else
678 regbase = TG3_APE_PER_LOCK_GRANT;
679
680 /* Make sure the driver hasn't any stale locks. */
681 for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
682 switch (i) {
683 case TG3_APE_LOCK_PHY0:
684 case TG3_APE_LOCK_PHY1:
685 case TG3_APE_LOCK_PHY2:
686 case TG3_APE_LOCK_PHY3:
687 bit = APE_LOCK_GRANT_DRIVER;
688 break;
689 default:
690 if (!tp->pci_fn)
691 bit = APE_LOCK_GRANT_DRIVER;
692 else
693 bit = 1 << tp->pci_fn;
694 }
695 tg3_ape_write32(tp, regbase + 4 * i, bit);
696 }
697
698 }
699
700 static int tg3_ape_lock(struct tg3 *tp, int locknum)
701 {
702 int i, off;
703 int ret = 0;
704 u32 status, req, gnt, bit;
705
706 if (!tg3_flag(tp, ENABLE_APE))
707 return 0;
708
709 switch (locknum) {
710 case TG3_APE_LOCK_GPIO:
711 if (tg3_asic_rev(tp) == ASIC_REV_5761)
712 return 0;
713 case TG3_APE_LOCK_GRC:
714 case TG3_APE_LOCK_MEM:
715 if (!tp->pci_fn)
716 bit = APE_LOCK_REQ_DRIVER;
717 else
718 bit = 1 << tp->pci_fn;
719 break;
720 case TG3_APE_LOCK_PHY0:
721 case TG3_APE_LOCK_PHY1:
722 case TG3_APE_LOCK_PHY2:
723 case TG3_APE_LOCK_PHY3:
724 bit = APE_LOCK_REQ_DRIVER;
725 break;
726 default:
727 return -EINVAL;
728 }
729
730 if (tg3_asic_rev(tp) == ASIC_REV_5761) {
731 req = TG3_APE_LOCK_REQ;
732 gnt = TG3_APE_LOCK_GRANT;
733 } else {
734 req = TG3_APE_PER_LOCK_REQ;
735 gnt = TG3_APE_PER_LOCK_GRANT;
736 }
737
738 off = 4 * locknum;
739
740 tg3_ape_write32(tp, req + off, bit);
741
742 /* Wait for up to 1 millisecond to acquire lock. */
743 for (i = 0; i < 100; i++) {
744 status = tg3_ape_read32(tp, gnt + off);
745 if (status == bit)
746 break;
747 udelay(10);
748 }
749
750 if (status != bit) {
751 /* Revoke the lock request. */
752 tg3_ape_write32(tp, gnt + off, bit);
753 ret = -EBUSY;
754 }
755
756 return ret;
757 }
758
759 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
760 {
761 u32 gnt, bit;
762
763 if (!tg3_flag(tp, ENABLE_APE))
764 return;
765
766 switch (locknum) {
767 case TG3_APE_LOCK_GPIO:
768 if (tg3_asic_rev(tp) == ASIC_REV_5761)
769 return;
770 case TG3_APE_LOCK_GRC:
771 case TG3_APE_LOCK_MEM:
772 if (!tp->pci_fn)
773 bit = APE_LOCK_GRANT_DRIVER;
774 else
775 bit = 1 << tp->pci_fn;
776 break;
777 case TG3_APE_LOCK_PHY0:
778 case TG3_APE_LOCK_PHY1:
779 case TG3_APE_LOCK_PHY2:
780 case TG3_APE_LOCK_PHY3:
781 bit = APE_LOCK_GRANT_DRIVER;
782 break;
783 default:
784 return;
785 }
786
787 if (tg3_asic_rev(tp) == ASIC_REV_5761)
788 gnt = TG3_APE_LOCK_GRANT;
789 else
790 gnt = TG3_APE_PER_LOCK_GRANT;
791
792 tg3_ape_write32(tp, gnt + 4 * locknum, bit);
793 }
794
795 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
796 {
797 u32 apedata;
798
799 while (timeout_us) {
800 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
801 return -EBUSY;
802
803 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
804 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
805 break;
806
807 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
808
809 udelay(10);
810 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
811 }
812
813 return timeout_us ? 0 : -EBUSY;
814 }
815
816 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
817 {
818 u32 i, apedata;
819
820 for (i = 0; i < timeout_us / 10; i++) {
821 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
822
823 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
824 break;
825
826 udelay(10);
827 }
828
829 return i == timeout_us / 10;
830 }
831
832 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
833 u32 len)
834 {
835 int err;
836 u32 i, bufoff, msgoff, maxlen, apedata;
837
838 if (!tg3_flag(tp, APE_HAS_NCSI))
839 return 0;
840
841 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
842 if (apedata != APE_SEG_SIG_MAGIC)
843 return -ENODEV;
844
845 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
846 if (!(apedata & APE_FW_STATUS_READY))
847 return -EAGAIN;
848
849 bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
850 TG3_APE_SHMEM_BASE;
851 msgoff = bufoff + 2 * sizeof(u32);
852 maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
853
854 while (len) {
855 u32 length;
856
857 /* Cap xfer sizes to scratchpad limits. */
858 length = (len > maxlen) ? maxlen : len;
859 len -= length;
860
861 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
862 if (!(apedata & APE_FW_STATUS_READY))
863 return -EAGAIN;
864
865 /* Wait for up to 1 msec for APE to service previous event. */
866 err = tg3_ape_event_lock(tp, 1000);
867 if (err)
868 return err;
869
870 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
871 APE_EVENT_STATUS_SCRTCHPD_READ |
872 APE_EVENT_STATUS_EVENT_PENDING;
873 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
874
875 tg3_ape_write32(tp, bufoff, base_off);
876 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
877
878 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
879 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
880
881 base_off += length;
882
883 if (tg3_ape_wait_for_event(tp, 30000))
884 return -EAGAIN;
885
886 for (i = 0; length; i += 4, length -= 4) {
887 u32 val = tg3_ape_read32(tp, msgoff + i);
888 memcpy(data, &val, sizeof(u32));
889 data++;
890 }
891 }
892
893 return 0;
894 }
895
896 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
897 {
898 int err;
899 u32 apedata;
900
901 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
902 if (apedata != APE_SEG_SIG_MAGIC)
903 return -EAGAIN;
904
905 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
906 if (!(apedata & APE_FW_STATUS_READY))
907 return -EAGAIN;
908
909 /* Wait for up to 1 millisecond for APE to service previous event. */
910 err = tg3_ape_event_lock(tp, 1000);
911 if (err)
912 return err;
913
914 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
915 event | APE_EVENT_STATUS_EVENT_PENDING);
916
917 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
918 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
919
920 return 0;
921 }
922
923 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
924 {
925 u32 event;
926 u32 apedata;
927
928 if (!tg3_flag(tp, ENABLE_APE))
929 return;
930
931 switch (kind) {
932 case RESET_KIND_INIT:
933 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
934 APE_HOST_SEG_SIG_MAGIC);
935 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
936 APE_HOST_SEG_LEN_MAGIC);
937 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
938 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
939 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
940 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
941 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
942 APE_HOST_BEHAV_NO_PHYLOCK);
943 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
944 TG3_APE_HOST_DRVR_STATE_START);
945
946 event = APE_EVENT_STATUS_STATE_START;
947 break;
948 case RESET_KIND_SHUTDOWN:
949 /* With the interface we are currently using,
950 * APE does not track driver state. Wiping
951 * out the HOST SEGMENT SIGNATURE forces
952 * the APE to assume OS absent status.
953 */
954 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
955
956 if (device_may_wakeup(&tp->pdev->dev) &&
957 tg3_flag(tp, WOL_ENABLE)) {
958 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
959 TG3_APE_HOST_WOL_SPEED_AUTO);
960 apedata = TG3_APE_HOST_DRVR_STATE_WOL;
961 } else
962 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
963
964 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
965
966 event = APE_EVENT_STATUS_STATE_UNLOAD;
967 break;
968 case RESET_KIND_SUSPEND:
969 event = APE_EVENT_STATUS_STATE_SUSPEND;
970 break;
971 default:
972 return;
973 }
974
975 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
976
977 tg3_ape_send_event(tp, event);
978 }
979
980 static void tg3_disable_ints(struct tg3 *tp)
981 {
982 int i;
983
984 tw32(TG3PCI_MISC_HOST_CTRL,
985 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
986 for (i = 0; i < tp->irq_max; i++)
987 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
988 }
989
990 static void tg3_enable_ints(struct tg3 *tp)
991 {
992 int i;
993
994 tp->irq_sync = 0;
995 wmb();
996
997 tw32(TG3PCI_MISC_HOST_CTRL,
998 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
999
1000 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
1001 for (i = 0; i < tp->irq_cnt; i++) {
1002 struct tg3_napi *tnapi = &tp->napi[i];
1003
1004 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1005 if (tg3_flag(tp, 1SHOT_MSI))
1006 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1007
1008 tp->coal_now |= tnapi->coal_now;
1009 }
1010
1011 /* Force an initial interrupt */
1012 if (!tg3_flag(tp, TAGGED_STATUS) &&
1013 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1014 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1015 else
1016 tw32(HOSTCC_MODE, tp->coal_now);
1017
1018 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1019 }
1020
1021 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1022 {
1023 struct tg3 *tp = tnapi->tp;
1024 struct tg3_hw_status *sblk = tnapi->hw_status;
1025 unsigned int work_exists = 0;
1026
1027 /* check for phy events */
1028 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1029 if (sblk->status & SD_STATUS_LINK_CHG)
1030 work_exists = 1;
1031 }
1032
1033 /* check for TX work to do */
1034 if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1035 work_exists = 1;
1036
1037 /* check for RX work to do */
1038 if (tnapi->rx_rcb_prod_idx &&
1039 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1040 work_exists = 1;
1041
1042 return work_exists;
1043 }
1044
1045 /* tg3_int_reenable
1046 * similar to tg3_enable_ints, but it accurately determines whether there
1047 * is new work pending and can return without flushing the PIO write
1048 * which reenables interrupts
1049 */
1050 static void tg3_int_reenable(struct tg3_napi *tnapi)
1051 {
1052 struct tg3 *tp = tnapi->tp;
1053
1054 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1055 mmiowb();
1056
1057 /* When doing tagged status, this work check is unnecessary.
1058 * The last_tag we write above tells the chip which piece of
1059 * work we've completed.
1060 */
1061 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1062 tw32(HOSTCC_MODE, tp->coalesce_mode |
1063 HOSTCC_MODE_ENABLE | tnapi->coal_now);
1064 }
1065
1066 static void tg3_switch_clocks(struct tg3 *tp)
1067 {
1068 u32 clock_ctrl;
1069 u32 orig_clock_ctrl;
1070
1071 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1072 return;
1073
1074 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1075
1076 orig_clock_ctrl = clock_ctrl;
1077 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1078 CLOCK_CTRL_CLKRUN_OENABLE |
1079 0x1f);
1080 tp->pci_clock_ctrl = clock_ctrl;
1081
1082 if (tg3_flag(tp, 5705_PLUS)) {
1083 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1084 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1085 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1086 }
1087 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1088 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1089 clock_ctrl |
1090 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1091 40);
1092 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1093 clock_ctrl | (CLOCK_CTRL_ALTCLK),
1094 40);
1095 }
1096 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1097 }
1098
1099 #define PHY_BUSY_LOOPS 5000
1100
1101 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1102 u32 *val)
1103 {
1104 u32 frame_val;
1105 unsigned int loops;
1106 int ret;
1107
1108 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1109 tw32_f(MAC_MI_MODE,
1110 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1111 udelay(80);
1112 }
1113
1114 tg3_ape_lock(tp, tp->phy_ape_lock);
1115
1116 *val = 0x0;
1117
1118 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1119 MI_COM_PHY_ADDR_MASK);
1120 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1121 MI_COM_REG_ADDR_MASK);
1122 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1123
1124 tw32_f(MAC_MI_COM, frame_val);
1125
1126 loops = PHY_BUSY_LOOPS;
1127 while (loops != 0) {
1128 udelay(10);
1129 frame_val = tr32(MAC_MI_COM);
1130
1131 if ((frame_val & MI_COM_BUSY) == 0) {
1132 udelay(5);
1133 frame_val = tr32(MAC_MI_COM);
1134 break;
1135 }
1136 loops -= 1;
1137 }
1138
1139 ret = -EBUSY;
1140 if (loops != 0) {
1141 *val = frame_val & MI_COM_DATA_MASK;
1142 ret = 0;
1143 }
1144
1145 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1146 tw32_f(MAC_MI_MODE, tp->mi_mode);
1147 udelay(80);
1148 }
1149
1150 tg3_ape_unlock(tp, tp->phy_ape_lock);
1151
1152 return ret;
1153 }
1154
1155 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1156 {
1157 return __tg3_readphy(tp, tp->phy_addr, reg, val);
1158 }
1159
1160 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1161 u32 val)
1162 {
1163 u32 frame_val;
1164 unsigned int loops;
1165 int ret;
1166
1167 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1168 (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1169 return 0;
1170
1171 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1172 tw32_f(MAC_MI_MODE,
1173 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1174 udelay(80);
1175 }
1176
1177 tg3_ape_lock(tp, tp->phy_ape_lock);
1178
1179 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1180 MI_COM_PHY_ADDR_MASK);
1181 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1182 MI_COM_REG_ADDR_MASK);
1183 frame_val |= (val & MI_COM_DATA_MASK);
1184 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1185
1186 tw32_f(MAC_MI_COM, frame_val);
1187
1188 loops = PHY_BUSY_LOOPS;
1189 while (loops != 0) {
1190 udelay(10);
1191 frame_val = tr32(MAC_MI_COM);
1192 if ((frame_val & MI_COM_BUSY) == 0) {
1193 udelay(5);
1194 frame_val = tr32(MAC_MI_COM);
1195 break;
1196 }
1197 loops -= 1;
1198 }
1199
1200 ret = -EBUSY;
1201 if (loops != 0)
1202 ret = 0;
1203
1204 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1205 tw32_f(MAC_MI_MODE, tp->mi_mode);
1206 udelay(80);
1207 }
1208
1209 tg3_ape_unlock(tp, tp->phy_ape_lock);
1210
1211 return ret;
1212 }
1213
1214 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1215 {
1216 return __tg3_writephy(tp, tp->phy_addr, reg, val);
1217 }
1218
1219 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1220 {
1221 int err;
1222
1223 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1224 if (err)
1225 goto done;
1226
1227 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1228 if (err)
1229 goto done;
1230
1231 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1232 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1233 if (err)
1234 goto done;
1235
1236 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1237
1238 done:
1239 return err;
1240 }
1241
1242 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1243 {
1244 int err;
1245
1246 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1247 if (err)
1248 goto done;
1249
1250 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1251 if (err)
1252 goto done;
1253
1254 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1255 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1256 if (err)
1257 goto done;
1258
1259 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1260
1261 done:
1262 return err;
1263 }
1264
1265 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1266 {
1267 int err;
1268
1269 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1270 if (!err)
1271 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1272
1273 return err;
1274 }
1275
1276 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1277 {
1278 int err;
1279
1280 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1281 if (!err)
1282 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1283
1284 return err;
1285 }
1286
1287 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1288 {
1289 int err;
1290
1291 err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1292 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1293 MII_TG3_AUXCTL_SHDWSEL_MISC);
1294 if (!err)
1295 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1296
1297 return err;
1298 }
1299
1300 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1301 {
1302 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1303 set |= MII_TG3_AUXCTL_MISC_WREN;
1304
1305 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1306 }
1307
1308 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1309 {
1310 u32 val;
1311 int err;
1312
1313 err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1314
1315 if (err)
1316 return err;
1317 if (enable)
1318
1319 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1320 else
1321 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1322
1323 err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1324 val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1325
1326 return err;
1327 }
1328
1329 static int tg3_bmcr_reset(struct tg3 *tp)
1330 {
1331 u32 phy_control;
1332 int limit, err;
1333
1334 /* OK, reset it, and poll the BMCR_RESET bit until it
1335 * clears or we time out.
1336 */
1337 phy_control = BMCR_RESET;
1338 err = tg3_writephy(tp, MII_BMCR, phy_control);
1339 if (err != 0)
1340 return -EBUSY;
1341
1342 limit = 5000;
1343 while (limit--) {
1344 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1345 if (err != 0)
1346 return -EBUSY;
1347
1348 if ((phy_control & BMCR_RESET) == 0) {
1349 udelay(40);
1350 break;
1351 }
1352 udelay(10);
1353 }
1354 if (limit < 0)
1355 return -EBUSY;
1356
1357 return 0;
1358 }
1359
1360 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1361 {
1362 struct tg3 *tp = bp->priv;
1363 u32 val;
1364
1365 spin_lock_bh(&tp->lock);
1366
1367 if (tg3_readphy(tp, reg, &val))
1368 val = -EIO;
1369
1370 spin_unlock_bh(&tp->lock);
1371
1372 return val;
1373 }
1374
1375 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1376 {
1377 struct tg3 *tp = bp->priv;
1378 u32 ret = 0;
1379
1380 spin_lock_bh(&tp->lock);
1381
1382 if (tg3_writephy(tp, reg, val))
1383 ret = -EIO;
1384
1385 spin_unlock_bh(&tp->lock);
1386
1387 return ret;
1388 }
1389
1390 static int tg3_mdio_reset(struct mii_bus *bp)
1391 {
1392 return 0;
1393 }
1394
1395 static void tg3_mdio_config_5785(struct tg3 *tp)
1396 {
1397 u32 val;
1398 struct phy_device *phydev;
1399
1400 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1401 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1402 case PHY_ID_BCM50610:
1403 case PHY_ID_BCM50610M:
1404 val = MAC_PHYCFG2_50610_LED_MODES;
1405 break;
1406 case PHY_ID_BCMAC131:
1407 val = MAC_PHYCFG2_AC131_LED_MODES;
1408 break;
1409 case PHY_ID_RTL8211C:
1410 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1411 break;
1412 case PHY_ID_RTL8201E:
1413 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1414 break;
1415 default:
1416 return;
1417 }
1418
1419 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1420 tw32(MAC_PHYCFG2, val);
1421
1422 val = tr32(MAC_PHYCFG1);
1423 val &= ~(MAC_PHYCFG1_RGMII_INT |
1424 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1425 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1426 tw32(MAC_PHYCFG1, val);
1427
1428 return;
1429 }
1430
1431 if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1432 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1433 MAC_PHYCFG2_FMODE_MASK_MASK |
1434 MAC_PHYCFG2_GMODE_MASK_MASK |
1435 MAC_PHYCFG2_ACT_MASK_MASK |
1436 MAC_PHYCFG2_QUAL_MASK_MASK |
1437 MAC_PHYCFG2_INBAND_ENABLE;
1438
1439 tw32(MAC_PHYCFG2, val);
1440
1441 val = tr32(MAC_PHYCFG1);
1442 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1443 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1444 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1445 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1446 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1447 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1448 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1449 }
1450 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1451 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1452 tw32(MAC_PHYCFG1, val);
1453
1454 val = tr32(MAC_EXT_RGMII_MODE);
1455 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1456 MAC_RGMII_MODE_RX_QUALITY |
1457 MAC_RGMII_MODE_RX_ACTIVITY |
1458 MAC_RGMII_MODE_RX_ENG_DET |
1459 MAC_RGMII_MODE_TX_ENABLE |
1460 MAC_RGMII_MODE_TX_LOWPWR |
1461 MAC_RGMII_MODE_TX_RESET);
1462 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1463 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1464 val |= MAC_RGMII_MODE_RX_INT_B |
1465 MAC_RGMII_MODE_RX_QUALITY |
1466 MAC_RGMII_MODE_RX_ACTIVITY |
1467 MAC_RGMII_MODE_RX_ENG_DET;
1468 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1469 val |= MAC_RGMII_MODE_TX_ENABLE |
1470 MAC_RGMII_MODE_TX_LOWPWR |
1471 MAC_RGMII_MODE_TX_RESET;
1472 }
1473 tw32(MAC_EXT_RGMII_MODE, val);
1474 }
1475
1476 static void tg3_mdio_start(struct tg3 *tp)
1477 {
1478 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1479 tw32_f(MAC_MI_MODE, tp->mi_mode);
1480 udelay(80);
1481
1482 if (tg3_flag(tp, MDIOBUS_INITED) &&
1483 tg3_asic_rev(tp) == ASIC_REV_5785)
1484 tg3_mdio_config_5785(tp);
1485 }
1486
1487 static int tg3_mdio_init(struct tg3 *tp)
1488 {
1489 int i;
1490 u32 reg;
1491 struct phy_device *phydev;
1492
1493 if (tg3_flag(tp, 5717_PLUS)) {
1494 u32 is_serdes;
1495
1496 tp->phy_addr = tp->pci_fn + 1;
1497
1498 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1499 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1500 else
1501 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1502 TG3_CPMU_PHY_STRAP_IS_SERDES;
1503 if (is_serdes)
1504 tp->phy_addr += 7;
1505 } else
1506 tp->phy_addr = TG3_PHY_MII_ADDR;
1507
1508 tg3_mdio_start(tp);
1509
1510 if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1511 return 0;
1512
1513 tp->mdio_bus = mdiobus_alloc();
1514 if (tp->mdio_bus == NULL)
1515 return -ENOMEM;
1516
1517 tp->mdio_bus->name = "tg3 mdio bus";
1518 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1519 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1520 tp->mdio_bus->priv = tp;
1521 tp->mdio_bus->parent = &tp->pdev->dev;
1522 tp->mdio_bus->read = &tg3_mdio_read;
1523 tp->mdio_bus->write = &tg3_mdio_write;
1524 tp->mdio_bus->reset = &tg3_mdio_reset;
1525 tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1526 tp->mdio_bus->irq = &tp->mdio_irq[0];
1527
1528 for (i = 0; i < PHY_MAX_ADDR; i++)
1529 tp->mdio_bus->irq[i] = PHY_POLL;
1530
1531 /* The bus registration will look for all the PHYs on the mdio bus.
1532 * Unfortunately, it does not ensure the PHY is powered up before
1533 * accessing the PHY ID registers. A chip reset is the
1534 * quickest way to bring the device back to an operational state..
1535 */
1536 if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1537 tg3_bmcr_reset(tp);
1538
1539 i = mdiobus_register(tp->mdio_bus);
1540 if (i) {
1541 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1542 mdiobus_free(tp->mdio_bus);
1543 return i;
1544 }
1545
1546 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1547
1548 if (!phydev || !phydev->drv) {
1549 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1550 mdiobus_unregister(tp->mdio_bus);
1551 mdiobus_free(tp->mdio_bus);
1552 return -ENODEV;
1553 }
1554
1555 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1556 case PHY_ID_BCM57780:
1557 phydev->interface = PHY_INTERFACE_MODE_GMII;
1558 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1559 break;
1560 case PHY_ID_BCM50610:
1561 case PHY_ID_BCM50610M:
1562 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1563 PHY_BRCM_RX_REFCLK_UNUSED |
1564 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1565 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1566 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1567 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1568 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1569 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1570 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1571 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1572 /* fallthru */
1573 case PHY_ID_RTL8211C:
1574 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1575 break;
1576 case PHY_ID_RTL8201E:
1577 case PHY_ID_BCMAC131:
1578 phydev->interface = PHY_INTERFACE_MODE_MII;
1579 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1580 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1581 break;
1582 }
1583
1584 tg3_flag_set(tp, MDIOBUS_INITED);
1585
1586 if (tg3_asic_rev(tp) == ASIC_REV_5785)
1587 tg3_mdio_config_5785(tp);
1588
1589 return 0;
1590 }
1591
1592 static void tg3_mdio_fini(struct tg3 *tp)
1593 {
1594 if (tg3_flag(tp, MDIOBUS_INITED)) {
1595 tg3_flag_clear(tp, MDIOBUS_INITED);
1596 mdiobus_unregister(tp->mdio_bus);
1597 mdiobus_free(tp->mdio_bus);
1598 }
1599 }
1600
1601 /* tp->lock is held. */
1602 static inline void tg3_generate_fw_event(struct tg3 *tp)
1603 {
1604 u32 val;
1605
1606 val = tr32(GRC_RX_CPU_EVENT);
1607 val |= GRC_RX_CPU_DRIVER_EVENT;
1608 tw32_f(GRC_RX_CPU_EVENT, val);
1609
1610 tp->last_event_jiffies = jiffies;
1611 }
1612
1613 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1614
1615 /* tp->lock is held. */
1616 static void tg3_wait_for_event_ack(struct tg3 *tp)
1617 {
1618 int i;
1619 unsigned int delay_cnt;
1620 long time_remain;
1621
1622 /* If enough time has passed, no wait is necessary. */
1623 time_remain = (long)(tp->last_event_jiffies + 1 +
1624 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1625 (long)jiffies;
1626 if (time_remain < 0)
1627 return;
1628
1629 /* Check if we can shorten the wait time. */
1630 delay_cnt = jiffies_to_usecs(time_remain);
1631 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1632 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1633 delay_cnt = (delay_cnt >> 3) + 1;
1634
1635 for (i = 0; i < delay_cnt; i++) {
1636 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1637 break;
1638 udelay(8);
1639 }
1640 }
1641
1642 /* tp->lock is held. */
1643 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1644 {
1645 u32 reg, val;
1646
1647 val = 0;
1648 if (!tg3_readphy(tp, MII_BMCR, &reg))
1649 val = reg << 16;
1650 if (!tg3_readphy(tp, MII_BMSR, &reg))
1651 val |= (reg & 0xffff);
1652 *data++ = val;
1653
1654 val = 0;
1655 if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1656 val = reg << 16;
1657 if (!tg3_readphy(tp, MII_LPA, &reg))
1658 val |= (reg & 0xffff);
1659 *data++ = val;
1660
1661 val = 0;
1662 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1663 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1664 val = reg << 16;
1665 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1666 val |= (reg & 0xffff);
1667 }
1668 *data++ = val;
1669
1670 if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1671 val = reg << 16;
1672 else
1673 val = 0;
1674 *data++ = val;
1675 }
1676
1677 /* tp->lock is held. */
1678 static void tg3_ump_link_report(struct tg3 *tp)
1679 {
1680 u32 data[4];
1681
1682 if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1683 return;
1684
1685 tg3_phy_gather_ump_data(tp, data);
1686
1687 tg3_wait_for_event_ack(tp);
1688
1689 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1690 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1691 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1692 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1693 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1694 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1695
1696 tg3_generate_fw_event(tp);
1697 }
1698
1699 /* tp->lock is held. */
1700 static void tg3_stop_fw(struct tg3 *tp)
1701 {
1702 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1703 /* Wait for RX cpu to ACK the previous event. */
1704 tg3_wait_for_event_ack(tp);
1705
1706 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1707
1708 tg3_generate_fw_event(tp);
1709
1710 /* Wait for RX cpu to ACK this event. */
1711 tg3_wait_for_event_ack(tp);
1712 }
1713 }
1714
1715 /* tp->lock is held. */
1716 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1717 {
1718 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1719 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1720
1721 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1722 switch (kind) {
1723 case RESET_KIND_INIT:
1724 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1725 DRV_STATE_START);
1726 break;
1727
1728 case RESET_KIND_SHUTDOWN:
1729 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1730 DRV_STATE_UNLOAD);
1731 break;
1732
1733 case RESET_KIND_SUSPEND:
1734 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1735 DRV_STATE_SUSPEND);
1736 break;
1737
1738 default:
1739 break;
1740 }
1741 }
1742
1743 if (kind == RESET_KIND_INIT ||
1744 kind == RESET_KIND_SUSPEND)
1745 tg3_ape_driver_state_change(tp, kind);
1746 }
1747
1748 /* tp->lock is held. */
1749 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1750 {
1751 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1752 switch (kind) {
1753 case RESET_KIND_INIT:
1754 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1755 DRV_STATE_START_DONE);
1756 break;
1757
1758 case RESET_KIND_SHUTDOWN:
1759 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1760 DRV_STATE_UNLOAD_DONE);
1761 break;
1762
1763 default:
1764 break;
1765 }
1766 }
1767
1768 if (kind == RESET_KIND_SHUTDOWN)
1769 tg3_ape_driver_state_change(tp, kind);
1770 }
1771
1772 /* tp->lock is held. */
1773 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1774 {
1775 if (tg3_flag(tp, ENABLE_ASF)) {
1776 switch (kind) {
1777 case RESET_KIND_INIT:
1778 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1779 DRV_STATE_START);
1780 break;
1781
1782 case RESET_KIND_SHUTDOWN:
1783 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1784 DRV_STATE_UNLOAD);
1785 break;
1786
1787 case RESET_KIND_SUSPEND:
1788 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1789 DRV_STATE_SUSPEND);
1790 break;
1791
1792 default:
1793 break;
1794 }
1795 }
1796 }
1797
1798 static int tg3_poll_fw(struct tg3 *tp)
1799 {
1800 int i;
1801 u32 val;
1802
1803 if (tg3_flag(tp, IS_SSB_CORE)) {
1804 /* We don't use firmware. */
1805 return 0;
1806 }
1807
1808 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1809 /* Wait up to 20ms for init done. */
1810 for (i = 0; i < 200; i++) {
1811 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1812 return 0;
1813 udelay(100);
1814 }
1815 return -ENODEV;
1816 }
1817
1818 /* Wait for firmware initialization to complete. */
1819 for (i = 0; i < 100000; i++) {
1820 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1821 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1822 break;
1823 udelay(10);
1824 }
1825
1826 /* Chip might not be fitted with firmware. Some Sun onboard
1827 * parts are configured like that. So don't signal the timeout
1828 * of the above loop as an error, but do report the lack of
1829 * running firmware once.
1830 */
1831 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1832 tg3_flag_set(tp, NO_FWARE_REPORTED);
1833
1834 netdev_info(tp->dev, "No firmware running\n");
1835 }
1836
1837 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1838 /* The 57765 A0 needs a little more
1839 * time to do some important work.
1840 */
1841 mdelay(10);
1842 }
1843
1844 return 0;
1845 }
1846
1847 static void tg3_link_report(struct tg3 *tp)
1848 {
1849 if (!netif_carrier_ok(tp->dev)) {
1850 netif_info(tp, link, tp->dev, "Link is down\n");
1851 tg3_ump_link_report(tp);
1852 } else if (netif_msg_link(tp)) {
1853 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1854 (tp->link_config.active_speed == SPEED_1000 ?
1855 1000 :
1856 (tp->link_config.active_speed == SPEED_100 ?
1857 100 : 10)),
1858 (tp->link_config.active_duplex == DUPLEX_FULL ?
1859 "full" : "half"));
1860
1861 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1862 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1863 "on" : "off",
1864 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1865 "on" : "off");
1866
1867 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1868 netdev_info(tp->dev, "EEE is %s\n",
1869 tp->setlpicnt ? "enabled" : "disabled");
1870
1871 tg3_ump_link_report(tp);
1872 }
1873
1874 tp->link_up = netif_carrier_ok(tp->dev);
1875 }
1876
1877 static u32 tg3_decode_flowctrl_1000T(u32 adv)
1878 {
1879 u32 flowctrl = 0;
1880
1881 if (adv & ADVERTISE_PAUSE_CAP) {
1882 flowctrl |= FLOW_CTRL_RX;
1883 if (!(adv & ADVERTISE_PAUSE_ASYM))
1884 flowctrl |= FLOW_CTRL_TX;
1885 } else if (adv & ADVERTISE_PAUSE_ASYM)
1886 flowctrl |= FLOW_CTRL_TX;
1887
1888 return flowctrl;
1889 }
1890
1891 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1892 {
1893 u16 miireg;
1894
1895 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1896 miireg = ADVERTISE_1000XPAUSE;
1897 else if (flow_ctrl & FLOW_CTRL_TX)
1898 miireg = ADVERTISE_1000XPSE_ASYM;
1899 else if (flow_ctrl & FLOW_CTRL_RX)
1900 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1901 else
1902 miireg = 0;
1903
1904 return miireg;
1905 }
1906
1907 static u32 tg3_decode_flowctrl_1000X(u32 adv)
1908 {
1909 u32 flowctrl = 0;
1910
1911 if (adv & ADVERTISE_1000XPAUSE) {
1912 flowctrl |= FLOW_CTRL_RX;
1913 if (!(adv & ADVERTISE_1000XPSE_ASYM))
1914 flowctrl |= FLOW_CTRL_TX;
1915 } else if (adv & ADVERTISE_1000XPSE_ASYM)
1916 flowctrl |= FLOW_CTRL_TX;
1917
1918 return flowctrl;
1919 }
1920
1921 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1922 {
1923 u8 cap = 0;
1924
1925 if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1926 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1927 } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1928 if (lcladv & ADVERTISE_1000XPAUSE)
1929 cap = FLOW_CTRL_RX;
1930 if (rmtadv & ADVERTISE_1000XPAUSE)
1931 cap = FLOW_CTRL_TX;
1932 }
1933
1934 return cap;
1935 }
1936
1937 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1938 {
1939 u8 autoneg;
1940 u8 flowctrl = 0;
1941 u32 old_rx_mode = tp->rx_mode;
1942 u32 old_tx_mode = tp->tx_mode;
1943
1944 if (tg3_flag(tp, USE_PHYLIB))
1945 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1946 else
1947 autoneg = tp->link_config.autoneg;
1948
1949 if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1950 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1951 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1952 else
1953 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1954 } else
1955 flowctrl = tp->link_config.flowctrl;
1956
1957 tp->link_config.active_flowctrl = flowctrl;
1958
1959 if (flowctrl & FLOW_CTRL_RX)
1960 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1961 else
1962 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1963
1964 if (old_rx_mode != tp->rx_mode)
1965 tw32_f(MAC_RX_MODE, tp->rx_mode);
1966
1967 if (flowctrl & FLOW_CTRL_TX)
1968 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1969 else
1970 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1971
1972 if (old_tx_mode != tp->tx_mode)
1973 tw32_f(MAC_TX_MODE, tp->tx_mode);
1974 }
1975
1976 static void tg3_adjust_link(struct net_device *dev)
1977 {
1978 u8 oldflowctrl, linkmesg = 0;
1979 u32 mac_mode, lcl_adv, rmt_adv;
1980 struct tg3 *tp = netdev_priv(dev);
1981 struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1982
1983 spin_lock_bh(&tp->lock);
1984
1985 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1986 MAC_MODE_HALF_DUPLEX);
1987
1988 oldflowctrl = tp->link_config.active_flowctrl;
1989
1990 if (phydev->link) {
1991 lcl_adv = 0;
1992 rmt_adv = 0;
1993
1994 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1995 mac_mode |= MAC_MODE_PORT_MODE_MII;
1996 else if (phydev->speed == SPEED_1000 ||
1997 tg3_asic_rev(tp) != ASIC_REV_5785)
1998 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1999 else
2000 mac_mode |= MAC_MODE_PORT_MODE_MII;
2001
2002 if (phydev->duplex == DUPLEX_HALF)
2003 mac_mode |= MAC_MODE_HALF_DUPLEX;
2004 else {
2005 lcl_adv = mii_advertise_flowctrl(
2006 tp->link_config.flowctrl);
2007
2008 if (phydev->pause)
2009 rmt_adv = LPA_PAUSE_CAP;
2010 if (phydev->asym_pause)
2011 rmt_adv |= LPA_PAUSE_ASYM;
2012 }
2013
2014 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2015 } else
2016 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2017
2018 if (mac_mode != tp->mac_mode) {
2019 tp->mac_mode = mac_mode;
2020 tw32_f(MAC_MODE, tp->mac_mode);
2021 udelay(40);
2022 }
2023
2024 if (tg3_asic_rev(tp) == ASIC_REV_5785) {
2025 if (phydev->speed == SPEED_10)
2026 tw32(MAC_MI_STAT,
2027 MAC_MI_STAT_10MBPS_MODE |
2028 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2029 else
2030 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2031 }
2032
2033 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2034 tw32(MAC_TX_LENGTHS,
2035 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2036 (6 << TX_LENGTHS_IPG_SHIFT) |
2037 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2038 else
2039 tw32(MAC_TX_LENGTHS,
2040 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2041 (6 << TX_LENGTHS_IPG_SHIFT) |
2042 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2043
2044 if (phydev->link != tp->old_link ||
2045 phydev->speed != tp->link_config.active_speed ||
2046 phydev->duplex != tp->link_config.active_duplex ||
2047 oldflowctrl != tp->link_config.active_flowctrl)
2048 linkmesg = 1;
2049
2050 tp->old_link = phydev->link;
2051 tp->link_config.active_speed = phydev->speed;
2052 tp->link_config.active_duplex = phydev->duplex;
2053
2054 spin_unlock_bh(&tp->lock);
2055
2056 if (linkmesg)
2057 tg3_link_report(tp);
2058 }
2059
2060 static int tg3_phy_init(struct tg3 *tp)
2061 {
2062 struct phy_device *phydev;
2063
2064 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2065 return 0;
2066
2067 /* Bring the PHY back to a known state. */
2068 tg3_bmcr_reset(tp);
2069
2070 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2071
2072 /* Attach the MAC to the PHY. */
2073 phydev = phy_connect(tp->dev, dev_name(&phydev->dev),
2074 tg3_adjust_link, phydev->interface);
2075 if (IS_ERR(phydev)) {
2076 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2077 return PTR_ERR(phydev);
2078 }
2079
2080 /* Mask with MAC supported features. */
2081 switch (phydev->interface) {
2082 case PHY_INTERFACE_MODE_GMII:
2083 case PHY_INTERFACE_MODE_RGMII:
2084 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2085 phydev->supported &= (PHY_GBIT_FEATURES |
2086 SUPPORTED_Pause |
2087 SUPPORTED_Asym_Pause);
2088 break;
2089 }
2090 /* fallthru */
2091 case PHY_INTERFACE_MODE_MII:
2092 phydev->supported &= (PHY_BASIC_FEATURES |
2093 SUPPORTED_Pause |
2094 SUPPORTED_Asym_Pause);
2095 break;
2096 default:
2097 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2098 return -EINVAL;
2099 }
2100
2101 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2102
2103 phydev->advertising = phydev->supported;
2104
2105 return 0;
2106 }
2107
2108 static void tg3_phy_start(struct tg3 *tp)
2109 {
2110 struct phy_device *phydev;
2111
2112 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2113 return;
2114
2115 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2116
2117 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2118 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2119 phydev->speed = tp->link_config.speed;
2120 phydev->duplex = tp->link_config.duplex;
2121 phydev->autoneg = tp->link_config.autoneg;
2122 phydev->advertising = tp->link_config.advertising;
2123 }
2124
2125 phy_start(phydev);
2126
2127 phy_start_aneg(phydev);
2128 }
2129
2130 static void tg3_phy_stop(struct tg3 *tp)
2131 {
2132 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2133 return;
2134
2135 phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2136 }
2137
2138 static void tg3_phy_fini(struct tg3 *tp)
2139 {
2140 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2141 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2142 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2143 }
2144 }
2145
2146 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2147 {
2148 int err;
2149 u32 val;
2150
2151 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2152 return 0;
2153
2154 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2155 /* Cannot do read-modify-write on 5401 */
2156 err = tg3_phy_auxctl_write(tp,
2157 MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2158 MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2159 0x4c20);
2160 goto done;
2161 }
2162
2163 err = tg3_phy_auxctl_read(tp,
2164 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2165 if (err)
2166 return err;
2167
2168 val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2169 err = tg3_phy_auxctl_write(tp,
2170 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2171
2172 done:
2173 return err;
2174 }
2175
2176 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2177 {
2178 u32 phytest;
2179
2180 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2181 u32 phy;
2182
2183 tg3_writephy(tp, MII_TG3_FET_TEST,
2184 phytest | MII_TG3_FET_SHADOW_EN);
2185 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2186 if (enable)
2187 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2188 else
2189 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2190 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2191 }
2192 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2193 }
2194 }
2195
2196 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2197 {
2198 u32 reg;
2199
2200 if (!tg3_flag(tp, 5705_PLUS) ||
2201 (tg3_flag(tp, 5717_PLUS) &&
2202 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2203 return;
2204
2205 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2206 tg3_phy_fet_toggle_apd(tp, enable);
2207 return;
2208 }
2209
2210 reg = MII_TG3_MISC_SHDW_WREN |
2211 MII_TG3_MISC_SHDW_SCR5_SEL |
2212 MII_TG3_MISC_SHDW_SCR5_LPED |
2213 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2214 MII_TG3_MISC_SHDW_SCR5_SDTL |
2215 MII_TG3_MISC_SHDW_SCR5_C125OE;
2216 if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2217 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2218
2219 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2220
2221
2222 reg = MII_TG3_MISC_SHDW_WREN |
2223 MII_TG3_MISC_SHDW_APD_SEL |
2224 MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2225 if (enable)
2226 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2227
2228 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2229 }
2230
2231 static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable)
2232 {
2233 u32 phy;
2234
2235 if (!tg3_flag(tp, 5705_PLUS) ||
2236 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2237 return;
2238
2239 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2240 u32 ephy;
2241
2242 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2243 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2244
2245 tg3_writephy(tp, MII_TG3_FET_TEST,
2246 ephy | MII_TG3_FET_SHADOW_EN);
2247 if (!tg3_readphy(tp, reg, &phy)) {
2248 if (enable)
2249 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2250 else
2251 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2252 tg3_writephy(tp, reg, phy);
2253 }
2254 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2255 }
2256 } else {
2257 int ret;
2258
2259 ret = tg3_phy_auxctl_read(tp,
2260 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2261 if (!ret) {
2262 if (enable)
2263 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2264 else
2265 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2266 tg3_phy_auxctl_write(tp,
2267 MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2268 }
2269 }
2270 }
2271
2272 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2273 {
2274 int ret;
2275 u32 val;
2276
2277 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2278 return;
2279
2280 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2281 if (!ret)
2282 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2283 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2284 }
2285
2286 static void tg3_phy_apply_otp(struct tg3 *tp)
2287 {
2288 u32 otp, phy;
2289
2290 if (!tp->phy_otp)
2291 return;
2292
2293 otp = tp->phy_otp;
2294
2295 if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2296 return;
2297
2298 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2299 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2300 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2301
2302 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2303 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2304 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2305
2306 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2307 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2308 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2309
2310 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2311 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2312
2313 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2314 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2315
2316 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2317 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2318 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2319
2320 tg3_phy_toggle_auxctl_smdsp(tp, false);
2321 }
2322
2323 static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up)
2324 {
2325 u32 val;
2326
2327 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2328 return;
2329
2330 tp->setlpicnt = 0;
2331
2332 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2333 current_link_up &&
2334 tp->link_config.active_duplex == DUPLEX_FULL &&
2335 (tp->link_config.active_speed == SPEED_100 ||
2336 tp->link_config.active_speed == SPEED_1000)) {
2337 u32 eeectl;
2338
2339 if (tp->link_config.active_speed == SPEED_1000)
2340 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2341 else
2342 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2343
2344 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2345
2346 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
2347 TG3_CL45_D7_EEERES_STAT, &val);
2348
2349 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2350 val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
2351 tp->setlpicnt = 2;
2352 }
2353
2354 if (!tp->setlpicnt) {
2355 if (current_link_up &&
2356 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2357 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2358 tg3_phy_toggle_auxctl_smdsp(tp, false);
2359 }
2360
2361 val = tr32(TG3_CPMU_EEE_MODE);
2362 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2363 }
2364 }
2365
2366 static void tg3_phy_eee_enable(struct tg3 *tp)
2367 {
2368 u32 val;
2369
2370 if (tp->link_config.active_speed == SPEED_1000 &&
2371 (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2372 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2373 tg3_flag(tp, 57765_CLASS)) &&
2374 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2375 val = MII_TG3_DSP_TAP26_ALNOKO |
2376 MII_TG3_DSP_TAP26_RMRXSTO;
2377 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2378 tg3_phy_toggle_auxctl_smdsp(tp, false);
2379 }
2380
2381 val = tr32(TG3_CPMU_EEE_MODE);
2382 tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2383 }
2384
2385 static int tg3_wait_macro_done(struct tg3 *tp)
2386 {
2387 int limit = 100;
2388
2389 while (limit--) {
2390 u32 tmp32;
2391
2392 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2393 if ((tmp32 & 0x1000) == 0)
2394 break;
2395 }
2396 }
2397 if (limit < 0)
2398 return -EBUSY;
2399
2400 return 0;
2401 }
2402
2403 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2404 {
2405 static const u32 test_pat[4][6] = {
2406 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2407 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2408 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2409 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2410 };
2411 int chan;
2412
2413 for (chan = 0; chan < 4; chan++) {
2414 int i;
2415
2416 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2417 (chan * 0x2000) | 0x0200);
2418 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2419
2420 for (i = 0; i < 6; i++)
2421 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2422 test_pat[chan][i]);
2423
2424 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2425 if (tg3_wait_macro_done(tp)) {
2426 *resetp = 1;
2427 return -EBUSY;
2428 }
2429
2430 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2431 (chan * 0x2000) | 0x0200);
2432 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2433 if (tg3_wait_macro_done(tp)) {
2434 *resetp = 1;
2435 return -EBUSY;
2436 }
2437
2438 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2439 if (tg3_wait_macro_done(tp)) {
2440 *resetp = 1;
2441 return -EBUSY;
2442 }
2443
2444 for (i = 0; i < 6; i += 2) {
2445 u32 low, high;
2446
2447 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2448 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2449 tg3_wait_macro_done(tp)) {
2450 *resetp = 1;
2451 return -EBUSY;
2452 }
2453 low &= 0x7fff;
2454 high &= 0x000f;
2455 if (low != test_pat[chan][i] ||
2456 high != test_pat[chan][i+1]) {
2457 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2458 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2459 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2460
2461 return -EBUSY;
2462 }
2463 }
2464 }
2465
2466 return 0;
2467 }
2468
2469 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2470 {
2471 int chan;
2472
2473 for (chan = 0; chan < 4; chan++) {
2474 int i;
2475
2476 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2477 (chan * 0x2000) | 0x0200);
2478 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2479 for (i = 0; i < 6; i++)
2480 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2481 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2482 if (tg3_wait_macro_done(tp))
2483 return -EBUSY;
2484 }
2485
2486 return 0;
2487 }
2488
2489 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2490 {
2491 u32 reg32, phy9_orig;
2492 int retries, do_phy_reset, err;
2493
2494 retries = 10;
2495 do_phy_reset = 1;
2496 do {
2497 if (do_phy_reset) {
2498 err = tg3_bmcr_reset(tp);
2499 if (err)
2500 return err;
2501 do_phy_reset = 0;
2502 }
2503
2504 /* Disable transmitter and interrupt. */
2505 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2506 continue;
2507
2508 reg32 |= 0x3000;
2509 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2510
2511 /* Set full-duplex, 1000 mbps. */
2512 tg3_writephy(tp, MII_BMCR,
2513 BMCR_FULLDPLX | BMCR_SPEED1000);
2514
2515 /* Set to master mode. */
2516 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2517 continue;
2518
2519 tg3_writephy(tp, MII_CTRL1000,
2520 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2521
2522 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2523 if (err)
2524 return err;
2525
2526 /* Block the PHY control access. */
2527 tg3_phydsp_write(tp, 0x8005, 0x0800);
2528
2529 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2530 if (!err)
2531 break;
2532 } while (--retries);
2533
2534 err = tg3_phy_reset_chanpat(tp);
2535 if (err)
2536 return err;
2537
2538 tg3_phydsp_write(tp, 0x8005, 0x0000);
2539
2540 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2541 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2542
2543 tg3_phy_toggle_auxctl_smdsp(tp, false);
2544
2545 tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2546
2547 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2548 reg32 &= ~0x3000;
2549 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2550 } else if (!err)
2551 err = -EBUSY;
2552
2553 return err;
2554 }
2555
2556 static void tg3_carrier_off(struct tg3 *tp)
2557 {
2558 netif_carrier_off(tp->dev);
2559 tp->link_up = false;
2560 }
2561
2562 static void tg3_warn_mgmt_link_flap(struct tg3 *tp)
2563 {
2564 if (tg3_flag(tp, ENABLE_ASF))
2565 netdev_warn(tp->dev,
2566 "Management side-band traffic will be interrupted during phy settings change\n");
2567 }
2568
2569 /* This will reset the tigon3 PHY if there is no valid
2570 * link unless the FORCE argument is non-zero.
2571 */
2572 static int tg3_phy_reset(struct tg3 *tp)
2573 {
2574 u32 val, cpmuctrl;
2575 int err;
2576
2577 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2578 val = tr32(GRC_MISC_CFG);
2579 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2580 udelay(40);
2581 }
2582 err = tg3_readphy(tp, MII_BMSR, &val);
2583 err |= tg3_readphy(tp, MII_BMSR, &val);
2584 if (err != 0)
2585 return -EBUSY;
2586
2587 if (netif_running(tp->dev) && tp->link_up) {
2588 netif_carrier_off(tp->dev);
2589 tg3_link_report(tp);
2590 }
2591
2592 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2593 tg3_asic_rev(tp) == ASIC_REV_5704 ||
2594 tg3_asic_rev(tp) == ASIC_REV_5705) {
2595 err = tg3_phy_reset_5703_4_5(tp);
2596 if (err)
2597 return err;
2598 goto out;
2599 }
2600
2601 cpmuctrl = 0;
2602 if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2603 tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2604 cpmuctrl = tr32(TG3_CPMU_CTRL);
2605 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2606 tw32(TG3_CPMU_CTRL,
2607 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2608 }
2609
2610 err = tg3_bmcr_reset(tp);
2611 if (err)
2612 return err;
2613
2614 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2615 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2616 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2617
2618 tw32(TG3_CPMU_CTRL, cpmuctrl);
2619 }
2620
2621 if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2622 tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2623 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2624 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2625 CPMU_LSPD_1000MB_MACCLK_12_5) {
2626 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2627 udelay(40);
2628 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2629 }
2630 }
2631
2632 if (tg3_flag(tp, 5717_PLUS) &&
2633 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2634 return 0;
2635
2636 tg3_phy_apply_otp(tp);
2637
2638 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2639 tg3_phy_toggle_apd(tp, true);
2640 else
2641 tg3_phy_toggle_apd(tp, false);
2642
2643 out:
2644 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2645 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2646 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2647 tg3_phydsp_write(tp, 0x000a, 0x0323);
2648 tg3_phy_toggle_auxctl_smdsp(tp, false);
2649 }
2650
2651 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2652 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2653 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2654 }
2655
2656 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2657 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2658 tg3_phydsp_write(tp, 0x000a, 0x310b);
2659 tg3_phydsp_write(tp, 0x201f, 0x9506);
2660 tg3_phydsp_write(tp, 0x401f, 0x14e2);
2661 tg3_phy_toggle_auxctl_smdsp(tp, false);
2662 }
2663 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2664 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2665 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2666 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2667 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2668 tg3_writephy(tp, MII_TG3_TEST1,
2669 MII_TG3_TEST1_TRIM_EN | 0x4);
2670 } else
2671 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2672
2673 tg3_phy_toggle_auxctl_smdsp(tp, false);
2674 }
2675 }
2676
2677 /* Set Extended packet length bit (bit 14) on all chips that */
2678 /* support jumbo frames */
2679 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2680 /* Cannot do read-modify-write on 5401 */
2681 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2682 } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2683 /* Set bit 14 with read-modify-write to preserve other bits */
2684 err = tg3_phy_auxctl_read(tp,
2685 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2686 if (!err)
2687 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2688 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2689 }
2690
2691 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2692 * jumbo frames transmission.
2693 */
2694 if (tg3_flag(tp, JUMBO_CAPABLE)) {
2695 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2696 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2697 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2698 }
2699
2700 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2701 /* adjust output voltage */
2702 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2703 }
2704
2705 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2706 tg3_phydsp_write(tp, 0xffb, 0x4000);
2707
2708 tg3_phy_toggle_automdix(tp, true);
2709 tg3_phy_set_wirespeed(tp);
2710 return 0;
2711 }
2712
2713 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2714 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2715 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2716 TG3_GPIO_MSG_NEED_VAUX)
2717 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2718 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2719 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2720 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2721 (TG3_GPIO_MSG_DRVR_PRES << 12))
2722
2723 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2724 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2725 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2726 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2727 (TG3_GPIO_MSG_NEED_VAUX << 12))
2728
2729 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2730 {
2731 u32 status, shift;
2732
2733 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2734 tg3_asic_rev(tp) == ASIC_REV_5719)
2735 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2736 else
2737 status = tr32(TG3_CPMU_DRV_STATUS);
2738
2739 shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2740 status &= ~(TG3_GPIO_MSG_MASK << shift);
2741 status |= (newstat << shift);
2742
2743 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2744 tg3_asic_rev(tp) == ASIC_REV_5719)
2745 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2746 else
2747 tw32(TG3_CPMU_DRV_STATUS, status);
2748
2749 return status >> TG3_APE_GPIO_MSG_SHIFT;
2750 }
2751
2752 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2753 {
2754 if (!tg3_flag(tp, IS_NIC))
2755 return 0;
2756
2757 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2758 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2759 tg3_asic_rev(tp) == ASIC_REV_5720) {
2760 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2761 return -EIO;
2762
2763 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2764
2765 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2766 TG3_GRC_LCLCTL_PWRSW_DELAY);
2767
2768 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2769 } else {
2770 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2771 TG3_GRC_LCLCTL_PWRSW_DELAY);
2772 }
2773
2774 return 0;
2775 }
2776
2777 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2778 {
2779 u32 grc_local_ctrl;
2780
2781 if (!tg3_flag(tp, IS_NIC) ||
2782 tg3_asic_rev(tp) == ASIC_REV_5700 ||
2783 tg3_asic_rev(tp) == ASIC_REV_5701)
2784 return;
2785
2786 grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2787
2788 tw32_wait_f(GRC_LOCAL_CTRL,
2789 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2790 TG3_GRC_LCLCTL_PWRSW_DELAY);
2791
2792 tw32_wait_f(GRC_LOCAL_CTRL,
2793 grc_local_ctrl,
2794 TG3_GRC_LCLCTL_PWRSW_DELAY);
2795
2796 tw32_wait_f(GRC_LOCAL_CTRL,
2797 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2798 TG3_GRC_LCLCTL_PWRSW_DELAY);
2799 }
2800
2801 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2802 {
2803 if (!tg3_flag(tp, IS_NIC))
2804 return;
2805
2806 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2807 tg3_asic_rev(tp) == ASIC_REV_5701) {
2808 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2809 (GRC_LCLCTRL_GPIO_OE0 |
2810 GRC_LCLCTRL_GPIO_OE1 |
2811 GRC_LCLCTRL_GPIO_OE2 |
2812 GRC_LCLCTRL_GPIO_OUTPUT0 |
2813 GRC_LCLCTRL_GPIO_OUTPUT1),
2814 TG3_GRC_LCLCTL_PWRSW_DELAY);
2815 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2816 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2817 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2818 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2819 GRC_LCLCTRL_GPIO_OE1 |
2820 GRC_LCLCTRL_GPIO_OE2 |
2821 GRC_LCLCTRL_GPIO_OUTPUT0 |
2822 GRC_LCLCTRL_GPIO_OUTPUT1 |
2823 tp->grc_local_ctrl;
2824 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2825 TG3_GRC_LCLCTL_PWRSW_DELAY);
2826
2827 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2828 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2829 TG3_GRC_LCLCTL_PWRSW_DELAY);
2830
2831 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2832 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2833 TG3_GRC_LCLCTL_PWRSW_DELAY);
2834 } else {
2835 u32 no_gpio2;
2836 u32 grc_local_ctrl = 0;
2837
2838 /* Workaround to prevent overdrawing Amps. */
2839 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2840 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2841 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2842 grc_local_ctrl,
2843 TG3_GRC_LCLCTL_PWRSW_DELAY);
2844 }
2845
2846 /* On 5753 and variants, GPIO2 cannot be used. */
2847 no_gpio2 = tp->nic_sram_data_cfg &
2848 NIC_SRAM_DATA_CFG_NO_GPIO2;
2849
2850 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2851 GRC_LCLCTRL_GPIO_OE1 |
2852 GRC_LCLCTRL_GPIO_OE2 |
2853 GRC_LCLCTRL_GPIO_OUTPUT1 |
2854 GRC_LCLCTRL_GPIO_OUTPUT2;
2855 if (no_gpio2) {
2856 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2857 GRC_LCLCTRL_GPIO_OUTPUT2);
2858 }
2859 tw32_wait_f(GRC_LOCAL_CTRL,
2860 tp->grc_local_ctrl | grc_local_ctrl,
2861 TG3_GRC_LCLCTL_PWRSW_DELAY);
2862
2863 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2864
2865 tw32_wait_f(GRC_LOCAL_CTRL,
2866 tp->grc_local_ctrl | grc_local_ctrl,
2867 TG3_GRC_LCLCTL_PWRSW_DELAY);
2868
2869 if (!no_gpio2) {
2870 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2871 tw32_wait_f(GRC_LOCAL_CTRL,
2872 tp->grc_local_ctrl | grc_local_ctrl,
2873 TG3_GRC_LCLCTL_PWRSW_DELAY);
2874 }
2875 }
2876 }
2877
2878 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2879 {
2880 u32 msg = 0;
2881
2882 /* Serialize power state transitions */
2883 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2884 return;
2885
2886 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2887 msg = TG3_GPIO_MSG_NEED_VAUX;
2888
2889 msg = tg3_set_function_status(tp, msg);
2890
2891 if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2892 goto done;
2893
2894 if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2895 tg3_pwrsrc_switch_to_vaux(tp);
2896 else
2897 tg3_pwrsrc_die_with_vmain(tp);
2898
2899 done:
2900 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2901 }
2902
2903 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2904 {
2905 bool need_vaux = false;
2906
2907 /* The GPIOs do something completely different on 57765. */
2908 if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2909 return;
2910
2911 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2912 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2913 tg3_asic_rev(tp) == ASIC_REV_5720) {
2914 tg3_frob_aux_power_5717(tp, include_wol ?
2915 tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2916 return;
2917 }
2918
2919 if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2920 struct net_device *dev_peer;
2921
2922 dev_peer = pci_get_drvdata(tp->pdev_peer);
2923
2924 /* remove_one() may have been run on the peer. */
2925 if (dev_peer) {
2926 struct tg3 *tp_peer = netdev_priv(dev_peer);
2927
2928 if (tg3_flag(tp_peer, INIT_COMPLETE))
2929 return;
2930
2931 if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2932 tg3_flag(tp_peer, ENABLE_ASF))
2933 need_vaux = true;
2934 }
2935 }
2936
2937 if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2938 tg3_flag(tp, ENABLE_ASF))
2939 need_vaux = true;
2940
2941 if (need_vaux)
2942 tg3_pwrsrc_switch_to_vaux(tp);
2943 else
2944 tg3_pwrsrc_die_with_vmain(tp);
2945 }
2946
2947 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2948 {
2949 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2950 return 1;
2951 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2952 if (speed != SPEED_10)
2953 return 1;
2954 } else if (speed == SPEED_10)
2955 return 1;
2956
2957 return 0;
2958 }
2959
2960 static bool tg3_phy_power_bug(struct tg3 *tp)
2961 {
2962 switch (tg3_asic_rev(tp)) {
2963 case ASIC_REV_5700:
2964 case ASIC_REV_5704:
2965 return true;
2966 case ASIC_REV_5780:
2967 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
2968 return true;
2969 return false;
2970 case ASIC_REV_5717:
2971 if (!tp->pci_fn)
2972 return true;
2973 return false;
2974 case ASIC_REV_5719:
2975 case ASIC_REV_5720:
2976 if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
2977 !tp->pci_fn)
2978 return true;
2979 return false;
2980 }
2981
2982 return false;
2983 }
2984
2985 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2986 {
2987 u32 val;
2988
2989 if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)
2990 return;
2991
2992 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2993 if (tg3_asic_rev(tp) == ASIC_REV_5704) {
2994 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2995 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2996
2997 sg_dig_ctrl |=
2998 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2999 tw32(SG_DIG_CTRL, sg_dig_ctrl);
3000 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
3001 }
3002 return;
3003 }
3004
3005 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3006 tg3_bmcr_reset(tp);
3007 val = tr32(GRC_MISC_CFG);
3008 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
3009 udelay(40);
3010 return;
3011 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3012 u32 phytest;
3013 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
3014 u32 phy;
3015
3016 tg3_writephy(tp, MII_ADVERTISE, 0);
3017 tg3_writephy(tp, MII_BMCR,
3018 BMCR_ANENABLE | BMCR_ANRESTART);
3019
3020 tg3_writephy(tp, MII_TG3_FET_TEST,
3021 phytest | MII_TG3_FET_SHADOW_EN);
3022 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
3023 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
3024 tg3_writephy(tp,
3025 MII_TG3_FET_SHDW_AUXMODE4,
3026 phy);
3027 }
3028 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
3029 }
3030 return;
3031 } else if (do_low_power) {
3032 tg3_writephy(tp, MII_TG3_EXT_CTRL,
3033 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
3034
3035 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3036 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
3037 MII_TG3_AUXCTL_PCTL_VREG_11V;
3038 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
3039 }
3040
3041 /* The PHY should not be powered down on some chips because
3042 * of bugs.
3043 */
3044 if (tg3_phy_power_bug(tp))
3045 return;
3046
3047 if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
3048 tg3_chip_rev(tp) == CHIPREV_5761_AX) {
3049 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
3050 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
3051 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
3052 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
3053 }
3054
3055 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3056 }
3057
3058 /* tp->lock is held. */
3059 static int tg3_nvram_lock(struct tg3 *tp)
3060 {
3061 if (tg3_flag(tp, NVRAM)) {
3062 int i;
3063
3064 if (tp->nvram_lock_cnt == 0) {
3065 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3066 for (i = 0; i < 8000; i++) {
3067 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3068 break;
3069 udelay(20);
3070 }
3071 if (i == 8000) {
3072 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3073 return -ENODEV;
3074 }
3075 }
3076 tp->nvram_lock_cnt++;
3077 }
3078 return 0;
3079 }
3080
3081 /* tp->lock is held. */
3082 static void tg3_nvram_unlock(struct tg3 *tp)
3083 {
3084 if (tg3_flag(tp, NVRAM)) {
3085 if (tp->nvram_lock_cnt > 0)
3086 tp->nvram_lock_cnt--;
3087 if (tp->nvram_lock_cnt == 0)
3088 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3089 }
3090 }
3091
3092 /* tp->lock is held. */
3093 static void tg3_enable_nvram_access(struct tg3 *tp)
3094 {
3095 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3096 u32 nvaccess = tr32(NVRAM_ACCESS);
3097
3098 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3099 }
3100 }
3101
3102 /* tp->lock is held. */
3103 static void tg3_disable_nvram_access(struct tg3 *tp)
3104 {
3105 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3106 u32 nvaccess = tr32(NVRAM_ACCESS);
3107
3108 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3109 }
3110 }
3111
3112 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3113 u32 offset, u32 *val)
3114 {
3115 u32 tmp;
3116 int i;
3117
3118 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3119 return -EINVAL;
3120
3121 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3122 EEPROM_ADDR_DEVID_MASK |
3123 EEPROM_ADDR_READ);
3124 tw32(GRC_EEPROM_ADDR,
3125 tmp |
3126 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3127 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3128 EEPROM_ADDR_ADDR_MASK) |
3129 EEPROM_ADDR_READ | EEPROM_ADDR_START);
3130
3131 for (i = 0; i < 1000; i++) {
3132 tmp = tr32(GRC_EEPROM_ADDR);
3133
3134 if (tmp & EEPROM_ADDR_COMPLETE)
3135 break;
3136 msleep(1);
3137 }
3138 if (!(tmp & EEPROM_ADDR_COMPLETE))
3139 return -EBUSY;
3140
3141 tmp = tr32(GRC_EEPROM_DATA);
3142
3143 /*
3144 * The data will always be opposite the native endian
3145 * format. Perform a blind byteswap to compensate.
3146 */
3147 *val = swab32(tmp);
3148
3149 return 0;
3150 }
3151
3152 #define NVRAM_CMD_TIMEOUT 10000
3153
3154 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3155 {
3156 int i;
3157
3158 tw32(NVRAM_CMD, nvram_cmd);
3159 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3160 udelay(10);
3161 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3162 udelay(10);
3163 break;
3164 }
3165 }
3166
3167 if (i == NVRAM_CMD_TIMEOUT)
3168 return -EBUSY;
3169
3170 return 0;
3171 }
3172
3173 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3174 {
3175 if (tg3_flag(tp, NVRAM) &&
3176 tg3_flag(tp, NVRAM_BUFFERED) &&
3177 tg3_flag(tp, FLASH) &&
3178 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3179 (tp->nvram_jedecnum == JEDEC_ATMEL))
3180
3181 addr = ((addr / tp->nvram_pagesize) <<
3182 ATMEL_AT45DB0X1B_PAGE_POS) +
3183 (addr % tp->nvram_pagesize);
3184
3185 return addr;
3186 }
3187
3188 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3189 {
3190 if (tg3_flag(tp, NVRAM) &&
3191 tg3_flag(tp, NVRAM_BUFFERED) &&
3192 tg3_flag(tp, FLASH) &&
3193 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3194 (tp->nvram_jedecnum == JEDEC_ATMEL))
3195
3196 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3197 tp->nvram_pagesize) +
3198 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3199
3200 return addr;
3201 }
3202
3203 /* NOTE: Data read in from NVRAM is byteswapped according to
3204 * the byteswapping settings for all other register accesses.
3205 * tg3 devices are BE devices, so on a BE machine, the data
3206 * returned will be exactly as it is seen in NVRAM. On a LE
3207 * machine, the 32-bit value will be byteswapped.
3208 */
3209 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3210 {
3211 int ret;
3212
3213 if (!tg3_flag(tp, NVRAM))
3214 return tg3_nvram_read_using_eeprom(tp, offset, val);
3215
3216 offset = tg3_nvram_phys_addr(tp, offset);
3217
3218 if (offset > NVRAM_ADDR_MSK)
3219 return -EINVAL;
3220
3221 ret = tg3_nvram_lock(tp);
3222 if (ret)
3223 return ret;
3224
3225 tg3_enable_nvram_access(tp);
3226
3227 tw32(NVRAM_ADDR, offset);
3228 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3229 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3230
3231 if (ret == 0)
3232 *val = tr32(NVRAM_RDDATA);
3233
3234 tg3_disable_nvram_access(tp);
3235
3236 tg3_nvram_unlock(tp);
3237
3238 return ret;
3239 }
3240
3241 /* Ensures NVRAM data is in bytestream format. */
3242 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3243 {
3244 u32 v;
3245 int res = tg3_nvram_read(tp, offset, &v);
3246 if (!res)
3247 *val = cpu_to_be32(v);
3248 return res;
3249 }
3250
3251 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3252 u32 offset, u32 len, u8 *buf)
3253 {
3254 int i, j, rc = 0;
3255 u32 val;
3256
3257 for (i = 0; i < len; i += 4) {
3258 u32 addr;
3259 __be32 data;
3260
3261 addr = offset + i;
3262
3263 memcpy(&data, buf + i, 4);
3264
3265 /*
3266 * The SEEPROM interface expects the data to always be opposite
3267 * the native endian format. We accomplish this by reversing
3268 * all the operations that would have been performed on the
3269 * data from a call to tg3_nvram_read_be32().
3270 */
3271 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3272
3273 val = tr32(GRC_EEPROM_ADDR);
3274 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3275
3276 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3277 EEPROM_ADDR_READ);
3278 tw32(GRC_EEPROM_ADDR, val |
3279 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3280 (addr & EEPROM_ADDR_ADDR_MASK) |
3281 EEPROM_ADDR_START |
3282 EEPROM_ADDR_WRITE);
3283
3284 for (j = 0; j < 1000; j++) {
3285 val = tr32(GRC_EEPROM_ADDR);
3286
3287 if (val & EEPROM_ADDR_COMPLETE)
3288 break;
3289 msleep(1);
3290 }
3291 if (!(val & EEPROM_ADDR_COMPLETE)) {
3292 rc = -EBUSY;
3293 break;
3294 }
3295 }
3296
3297 return rc;
3298 }
3299
3300 /* offset and length are dword aligned */
3301 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3302 u8 *buf)
3303 {
3304 int ret = 0;
3305 u32 pagesize = tp->nvram_pagesize;
3306 u32 pagemask = pagesize - 1;
3307 u32 nvram_cmd;
3308 u8 *tmp;
3309
3310 tmp = kmalloc(pagesize, GFP_KERNEL);
3311 if (tmp == NULL)
3312 return -ENOMEM;
3313
3314 while (len) {
3315 int j;
3316 u32 phy_addr, page_off, size;
3317
3318 phy_addr = offset & ~pagemask;
3319
3320 for (j = 0; j < pagesize; j += 4) {
3321 ret = tg3_nvram_read_be32(tp, phy_addr + j,
3322 (__be32 *) (tmp + j));
3323 if (ret)
3324 break;
3325 }
3326 if (ret)
3327 break;
3328
3329 page_off = offset & pagemask;
3330 size = pagesize;
3331 if (len < size)
3332 size = len;
3333
3334 len -= size;
3335
3336 memcpy(tmp + page_off, buf, size);
3337
3338 offset = offset + (pagesize - page_off);
3339
3340 tg3_enable_nvram_access(tp);
3341
3342 /*
3343 * Before we can erase the flash page, we need
3344 * to issue a special "write enable" command.
3345 */
3346 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3347
3348 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3349 break;
3350
3351 /* Erase the target page */
3352 tw32(NVRAM_ADDR, phy_addr);
3353
3354 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3355 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3356
3357 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3358 break;
3359
3360 /* Issue another write enable to start the write. */
3361 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3362
3363 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3364 break;
3365
3366 for (j = 0; j < pagesize; j += 4) {
3367 __be32 data;
3368
3369 data = *((__be32 *) (tmp + j));
3370
3371 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3372
3373 tw32(NVRAM_ADDR, phy_addr + j);
3374
3375 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3376 NVRAM_CMD_WR;
3377
3378 if (j == 0)
3379 nvram_cmd |= NVRAM_CMD_FIRST;
3380 else if (j == (pagesize - 4))
3381 nvram_cmd |= NVRAM_CMD_LAST;
3382
3383 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3384 if (ret)
3385 break;
3386 }
3387 if (ret)
3388 break;
3389 }
3390
3391 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3392 tg3_nvram_exec_cmd(tp, nvram_cmd);
3393
3394 kfree(tmp);
3395
3396 return ret;
3397 }
3398
3399 /* offset and length are dword aligned */
3400 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3401 u8 *buf)
3402 {
3403 int i, ret = 0;
3404
3405 for (i = 0; i < len; i += 4, offset += 4) {
3406 u32 page_off, phy_addr, nvram_cmd;
3407 __be32 data;
3408
3409 memcpy(&data, buf + i, 4);
3410 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3411
3412 page_off = offset % tp->nvram_pagesize;
3413
3414 phy_addr = tg3_nvram_phys_addr(tp, offset);
3415
3416 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3417
3418 if (page_off == 0 || i == 0)
3419 nvram_cmd |= NVRAM_CMD_FIRST;
3420 if (page_off == (tp->nvram_pagesize - 4))
3421 nvram_cmd |= NVRAM_CMD_LAST;
3422
3423 if (i == (len - 4))
3424 nvram_cmd |= NVRAM_CMD_LAST;
3425
3426 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3427 !tg3_flag(tp, FLASH) ||
3428 !tg3_flag(tp, 57765_PLUS))
3429 tw32(NVRAM_ADDR, phy_addr);
3430
3431 if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3432 !tg3_flag(tp, 5755_PLUS) &&
3433 (tp->nvram_jedecnum == JEDEC_ST) &&
3434 (nvram_cmd & NVRAM_CMD_FIRST)) {
3435 u32 cmd;
3436
3437 cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3438 ret = tg3_nvram_exec_cmd(tp, cmd);
3439 if (ret)
3440 break;
3441 }
3442 if (!tg3_flag(tp, FLASH)) {
3443 /* We always do complete word writes to eeprom. */
3444 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3445 }
3446
3447 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3448 if (ret)
3449 break;
3450 }
3451 return ret;
3452 }
3453
3454 /* offset and length are dword aligned */
3455 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3456 {
3457 int ret;
3458
3459 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3460 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3461 ~GRC_LCLCTRL_GPIO_OUTPUT1);
3462 udelay(40);
3463 }
3464
3465 if (!tg3_flag(tp, NVRAM)) {
3466 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3467 } else {
3468 u32 grc_mode;
3469
3470 ret = tg3_nvram_lock(tp);
3471 if (ret)
3472 return ret;
3473
3474 tg3_enable_nvram_access(tp);
3475 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3476 tw32(NVRAM_WRITE1, 0x406);
3477
3478 grc_mode = tr32(GRC_MODE);
3479 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3480
3481 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3482 ret = tg3_nvram_write_block_buffered(tp, offset, len,
3483 buf);
3484 } else {
3485 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3486 buf);
3487 }
3488
3489 grc_mode = tr32(GRC_MODE);
3490 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3491
3492 tg3_disable_nvram_access(tp);
3493 tg3_nvram_unlock(tp);
3494 }
3495
3496 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3497 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3498 udelay(40);
3499 }
3500
3501 return ret;
3502 }
3503
3504 #define RX_CPU_SCRATCH_BASE 0x30000
3505 #define RX_CPU_SCRATCH_SIZE 0x04000
3506 #define TX_CPU_SCRATCH_BASE 0x34000
3507 #define TX_CPU_SCRATCH_SIZE 0x04000
3508
3509 /* tp->lock is held. */
3510 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3511 {
3512 int i;
3513 const int iters = 10000;
3514
3515 for (i = 0; i < iters; i++) {
3516 tw32(cpu_base + CPU_STATE, 0xffffffff);
3517 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3518 if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3519 break;
3520 }
3521
3522 return (i == iters) ? -EBUSY : 0;
3523 }
3524
3525 /* tp->lock is held. */
3526 static int tg3_rxcpu_pause(struct tg3 *tp)
3527 {
3528 int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
3529
3530 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3531 tw32_f(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
3532 udelay(10);
3533
3534 return rc;
3535 }
3536
3537 /* tp->lock is held. */
3538 static int tg3_txcpu_pause(struct tg3 *tp)
3539 {
3540 return tg3_pause_cpu(tp, TX_CPU_BASE);
3541 }
3542
3543 /* tp->lock is held. */
3544 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
3545 {
3546 tw32(cpu_base + CPU_STATE, 0xffffffff);
3547 tw32_f(cpu_base + CPU_MODE, 0x00000000);
3548 }
3549
3550 /* tp->lock is held. */
3551 static void tg3_rxcpu_resume(struct tg3 *tp)
3552 {
3553 tg3_resume_cpu(tp, RX_CPU_BASE);
3554 }
3555
3556 /* tp->lock is held. */
3557 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
3558 {
3559 int rc;
3560
3561 BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3562
3563 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3564 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3565
3566 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3567 return 0;
3568 }
3569 if (cpu_base == RX_CPU_BASE) {
3570 rc = tg3_rxcpu_pause(tp);
3571 } else {
3572 /*
3573 * There is only an Rx CPU for the 5750 derivative in the
3574 * BCM4785.
3575 */
3576 if (tg3_flag(tp, IS_SSB_CORE))
3577 return 0;
3578
3579 rc = tg3_txcpu_pause(tp);
3580 }
3581
3582 if (rc) {
3583 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3584 __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
3585 return -ENODEV;
3586 }
3587
3588 /* Clear firmware's nvram arbitration. */
3589 if (tg3_flag(tp, NVRAM))
3590 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3591 return 0;
3592 }
3593
3594 static int tg3_fw_data_len(struct tg3 *tp,
3595 const struct tg3_firmware_hdr *fw_hdr)
3596 {
3597 int fw_len;
3598
3599 /* Non fragmented firmware have one firmware header followed by a
3600 * contiguous chunk of data to be written. The length field in that
3601 * header is not the length of data to be written but the complete
3602 * length of the bss. The data length is determined based on
3603 * tp->fw->size minus headers.
3604 *
3605 * Fragmented firmware have a main header followed by multiple
3606 * fragments. Each fragment is identical to non fragmented firmware
3607 * with a firmware header followed by a contiguous chunk of data. In
3608 * the main header, the length field is unused and set to 0xffffffff.
3609 * In each fragment header the length is the entire size of that
3610 * fragment i.e. fragment data + header length. Data length is
3611 * therefore length field in the header minus TG3_FW_HDR_LEN.
3612 */
3613 if (tp->fw_len == 0xffffffff)
3614 fw_len = be32_to_cpu(fw_hdr->len);
3615 else
3616 fw_len = tp->fw->size;
3617
3618 return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
3619 }
3620
3621 /* tp->lock is held. */
3622 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3623 u32 cpu_scratch_base, int cpu_scratch_size,
3624 const struct tg3_firmware_hdr *fw_hdr)
3625 {
3626 int err, i;
3627 void (*write_op)(struct tg3 *, u32, u32);
3628 int total_len = tp->fw->size;
3629
3630 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3631 netdev_err(tp->dev,
3632 "%s: Trying to load TX cpu firmware which is 5705\n",
3633 __func__);
3634 return -EINVAL;
3635 }
3636
3637 if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
3638 write_op = tg3_write_mem;
3639 else
3640 write_op = tg3_write_indirect_reg32;
3641
3642 if (tg3_asic_rev(tp) != ASIC_REV_57766) {
3643 /* It is possible that bootcode is still loading at this point.
3644 * Get the nvram lock first before halting the cpu.
3645 */
3646 int lock_err = tg3_nvram_lock(tp);
3647 err = tg3_halt_cpu(tp, cpu_base);
3648 if (!lock_err)
3649 tg3_nvram_unlock(tp);
3650 if (err)
3651 goto out;
3652
3653 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3654 write_op(tp, cpu_scratch_base + i, 0);
3655 tw32(cpu_base + CPU_STATE, 0xffffffff);
3656 tw32(cpu_base + CPU_MODE,
3657 tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
3658 } else {
3659 /* Subtract additional main header for fragmented firmware and
3660 * advance to the first fragment
3661 */
3662 total_len -= TG3_FW_HDR_LEN;
3663 fw_hdr++;
3664 }
3665
3666 do {
3667 u32 *fw_data = (u32 *)(fw_hdr + 1);
3668 for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
3669 write_op(tp, cpu_scratch_base +
3670 (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
3671 (i * sizeof(u32)),
3672 be32_to_cpu(fw_data[i]));
3673
3674 total_len -= be32_to_cpu(fw_hdr->len);
3675
3676 /* Advance to next fragment */
3677 fw_hdr = (struct tg3_firmware_hdr *)
3678 ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
3679 } while (total_len > 0);
3680
3681 err = 0;
3682
3683 out:
3684 return err;
3685 }
3686
3687 /* tp->lock is held. */
3688 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
3689 {
3690 int i;
3691 const int iters = 5;
3692
3693 tw32(cpu_base + CPU_STATE, 0xffffffff);
3694 tw32_f(cpu_base + CPU_PC, pc);
3695
3696 for (i = 0; i < iters; i++) {
3697 if (tr32(cpu_base + CPU_PC) == pc)
3698 break;
3699 tw32(cpu_base + CPU_STATE, 0xffffffff);
3700 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3701 tw32_f(cpu_base + CPU_PC, pc);
3702 udelay(1000);
3703 }
3704
3705 return (i == iters) ? -EBUSY : 0;
3706 }
3707
3708 /* tp->lock is held. */
3709 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3710 {
3711 const struct tg3_firmware_hdr *fw_hdr;
3712 int err;
3713
3714 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3715
3716 /* Firmware blob starts with version numbers, followed by
3717 start address and length. We are setting complete length.
3718 length = end_address_of_bss - start_address_of_text.
3719 Remainder is the blob to be loaded contiguously
3720 from start address. */
3721
3722 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3723 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3724 fw_hdr);
3725 if (err)
3726 return err;
3727
3728 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3729 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3730 fw_hdr);
3731 if (err)
3732 return err;
3733
3734 /* Now startup only the RX cpu. */
3735 err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
3736 be32_to_cpu(fw_hdr->base_addr));
3737 if (err) {
3738 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3739 "should be %08x\n", __func__,
3740 tr32(RX_CPU_BASE + CPU_PC),
3741 be32_to_cpu(fw_hdr->base_addr));
3742 return -ENODEV;
3743 }
3744
3745 tg3_rxcpu_resume(tp);
3746
3747 return 0;
3748 }
3749
3750 static int tg3_validate_rxcpu_state(struct tg3 *tp)
3751 {
3752 const int iters = 1000;
3753 int i;
3754 u32 val;
3755
3756 /* Wait for boot code to complete initialization and enter service
3757 * loop. It is then safe to download service patches
3758 */
3759 for (i = 0; i < iters; i++) {
3760 if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
3761 break;
3762
3763 udelay(10);
3764 }
3765
3766 if (i == iters) {
3767 netdev_err(tp->dev, "Boot code not ready for service patches\n");
3768 return -EBUSY;
3769 }
3770
3771 val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
3772 if (val & 0xff) {
3773 netdev_warn(tp->dev,
3774 "Other patches exist. Not downloading EEE patch\n");
3775 return -EEXIST;
3776 }
3777
3778 return 0;
3779 }
3780
3781 /* tp->lock is held. */
3782 static void tg3_load_57766_firmware(struct tg3 *tp)
3783 {
3784 struct tg3_firmware_hdr *fw_hdr;
3785
3786 if (!tg3_flag(tp, NO_NVRAM))
3787 return;
3788
3789 if (tg3_validate_rxcpu_state(tp))
3790 return;
3791
3792 if (!tp->fw)
3793 return;
3794
3795 /* This firmware blob has a different format than older firmware
3796 * releases as given below. The main difference is we have fragmented
3797 * data to be written to non-contiguous locations.
3798 *
3799 * In the beginning we have a firmware header identical to other
3800 * firmware which consists of version, base addr and length. The length
3801 * here is unused and set to 0xffffffff.
3802 *
3803 * This is followed by a series of firmware fragments which are
3804 * individually identical to previous firmware. i.e. they have the
3805 * firmware header and followed by data for that fragment. The version
3806 * field of the individual fragment header is unused.
3807 */
3808
3809 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3810 if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
3811 return;
3812
3813 if (tg3_rxcpu_pause(tp))
3814 return;
3815
3816 /* tg3_load_firmware_cpu() will always succeed for the 57766 */
3817 tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
3818
3819 tg3_rxcpu_resume(tp);
3820 }
3821
3822 /* tp->lock is held. */
3823 static int tg3_load_tso_firmware(struct tg3 *tp)
3824 {
3825 const struct tg3_firmware_hdr *fw_hdr;
3826 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3827 int err;
3828
3829 if (!tg3_flag(tp, FW_TSO))
3830 return 0;
3831
3832 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3833
3834 /* Firmware blob starts with version numbers, followed by
3835 start address and length. We are setting complete length.
3836 length = end_address_of_bss - start_address_of_text.
3837 Remainder is the blob to be loaded contiguously
3838 from start address. */
3839
3840 cpu_scratch_size = tp->fw_len;
3841
3842 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3843 cpu_base = RX_CPU_BASE;
3844 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3845 } else {
3846 cpu_base = TX_CPU_BASE;
3847 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3848 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3849 }
3850
3851 err = tg3_load_firmware_cpu(tp, cpu_base,
3852 cpu_scratch_base, cpu_scratch_size,
3853 fw_hdr);
3854 if (err)
3855 return err;
3856
3857 /* Now startup the cpu. */
3858 err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
3859 be32_to_cpu(fw_hdr->base_addr));
3860 if (err) {
3861 netdev_err(tp->dev,
3862 "%s fails to set CPU PC, is %08x should be %08x\n",
3863 __func__, tr32(cpu_base + CPU_PC),
3864 be32_to_cpu(fw_hdr->base_addr));
3865 return -ENODEV;
3866 }
3867
3868 tg3_resume_cpu(tp, cpu_base);
3869 return 0;
3870 }
3871
3872
3873 /* tp->lock is held. */
3874 static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1)
3875 {
3876 u32 addr_high, addr_low;
3877 int i;
3878
3879 addr_high = ((tp->dev->dev_addr[0] << 8) |
3880 tp->dev->dev_addr[1]);
3881 addr_low = ((tp->dev->dev_addr[2] << 24) |
3882 (tp->dev->dev_addr[3] << 16) |
3883 (tp->dev->dev_addr[4] << 8) |
3884 (tp->dev->dev_addr[5] << 0));
3885 for (i = 0; i < 4; i++) {
3886 if (i == 1 && skip_mac_1)
3887 continue;
3888 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3889 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3890 }
3891
3892 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3893 tg3_asic_rev(tp) == ASIC_REV_5704) {
3894 for (i = 0; i < 12; i++) {
3895 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3896 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3897 }
3898 }
3899
3900 addr_high = (tp->dev->dev_addr[0] +
3901 tp->dev->dev_addr[1] +
3902 tp->dev->dev_addr[2] +
3903 tp->dev->dev_addr[3] +
3904 tp->dev->dev_addr[4] +
3905 tp->dev->dev_addr[5]) &
3906 TX_BACKOFF_SEED_MASK;
3907 tw32(MAC_TX_BACKOFF_SEED, addr_high);
3908 }
3909
3910 static void tg3_enable_register_access(struct tg3 *tp)
3911 {
3912 /*
3913 * Make sure register accesses (indirect or otherwise) will function
3914 * correctly.
3915 */
3916 pci_write_config_dword(tp->pdev,
3917 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3918 }
3919
3920 static int tg3_power_up(struct tg3 *tp)
3921 {
3922 int err;
3923
3924 tg3_enable_register_access(tp);
3925
3926 err = pci_set_power_state(tp->pdev, PCI_D0);
3927 if (!err) {
3928 /* Switch out of Vaux if it is a NIC */
3929 tg3_pwrsrc_switch_to_vmain(tp);
3930 } else {
3931 netdev_err(tp->dev, "Transition to D0 failed\n");
3932 }
3933
3934 return err;
3935 }
3936
3937 static int tg3_setup_phy(struct tg3 *, bool);
3938
3939 static int tg3_power_down_prepare(struct tg3 *tp)
3940 {
3941 u32 misc_host_ctrl;
3942 bool device_should_wake, do_low_power;
3943
3944 tg3_enable_register_access(tp);
3945
3946 /* Restore the CLKREQ setting. */
3947 if (tg3_flag(tp, CLKREQ_BUG))
3948 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
3949 PCI_EXP_LNKCTL_CLKREQ_EN);
3950
3951 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
3952 tw32(TG3PCI_MISC_HOST_CTRL,
3953 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
3954
3955 device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
3956 tg3_flag(tp, WOL_ENABLE);
3957
3958 if (tg3_flag(tp, USE_PHYLIB)) {
3959 do_low_power = false;
3960 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
3961 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3962 struct phy_device *phydev;
3963 u32 phyid, advertising;
3964
3965 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
3966
3967 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3968
3969 tp->link_config.speed = phydev->speed;
3970 tp->link_config.duplex = phydev->duplex;
3971 tp->link_config.autoneg = phydev->autoneg;
3972 tp->link_config.advertising = phydev->advertising;
3973
3974 advertising = ADVERTISED_TP |
3975 ADVERTISED_Pause |
3976 ADVERTISED_Autoneg |
3977 ADVERTISED_10baseT_Half;
3978
3979 if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
3980 if (tg3_flag(tp, WOL_SPEED_100MB))
3981 advertising |=
3982 ADVERTISED_100baseT_Half |
3983 ADVERTISED_100baseT_Full |
3984 ADVERTISED_10baseT_Full;
3985 else
3986 advertising |= ADVERTISED_10baseT_Full;
3987 }
3988
3989 phydev->advertising = advertising;
3990
3991 phy_start_aneg(phydev);
3992
3993 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
3994 if (phyid != PHY_ID_BCMAC131) {
3995 phyid &= PHY_BCM_OUI_MASK;
3996 if (phyid == PHY_BCM_OUI_1 ||
3997 phyid == PHY_BCM_OUI_2 ||
3998 phyid == PHY_BCM_OUI_3)
3999 do_low_power = true;
4000 }
4001 }
4002 } else {
4003 do_low_power = true;
4004
4005 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
4006 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4007
4008 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
4009 tg3_setup_phy(tp, false);
4010 }
4011
4012 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
4013 u32 val;
4014
4015 val = tr32(GRC_VCPU_EXT_CTRL);
4016 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
4017 } else if (!tg3_flag(tp, ENABLE_ASF)) {
4018 int i;
4019 u32 val;
4020
4021 for (i = 0; i < 200; i++) {
4022 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
4023 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4024 break;
4025 msleep(1);
4026 }
4027 }
4028 if (tg3_flag(tp, WOL_CAP))
4029 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
4030 WOL_DRV_STATE_SHUTDOWN |
4031 WOL_DRV_WOL |
4032 WOL_SET_MAGIC_PKT);
4033
4034 if (device_should_wake) {
4035 u32 mac_mode;
4036
4037 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
4038 if (do_low_power &&
4039 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
4040 tg3_phy_auxctl_write(tp,
4041 MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
4042 MII_TG3_AUXCTL_PCTL_WOL_EN |
4043 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
4044 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
4045 udelay(40);
4046 }
4047
4048 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4049 mac_mode = MAC_MODE_PORT_MODE_GMII;
4050 else if (tp->phy_flags &
4051 TG3_PHYFLG_KEEP_LINK_ON_PWRDN) {
4052 if (tp->link_config.active_speed == SPEED_1000)
4053 mac_mode = MAC_MODE_PORT_MODE_GMII;
4054 else
4055 mac_mode = MAC_MODE_PORT_MODE_MII;
4056 } else
4057 mac_mode = MAC_MODE_PORT_MODE_MII;
4058
4059 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
4060 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4061 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
4062 SPEED_100 : SPEED_10;
4063 if (tg3_5700_link_polarity(tp, speed))
4064 mac_mode |= MAC_MODE_LINK_POLARITY;
4065 else
4066 mac_mode &= ~MAC_MODE_LINK_POLARITY;
4067 }
4068 } else {
4069 mac_mode = MAC_MODE_PORT_MODE_TBI;
4070 }
4071
4072 if (!tg3_flag(tp, 5750_PLUS))
4073 tw32(MAC_LED_CTRL, tp->led_ctrl);
4074
4075 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
4076 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
4077 (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
4078 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
4079
4080 if (tg3_flag(tp, ENABLE_APE))
4081 mac_mode |= MAC_MODE_APE_TX_EN |
4082 MAC_MODE_APE_RX_EN |
4083 MAC_MODE_TDE_ENABLE;
4084
4085 tw32_f(MAC_MODE, mac_mode);
4086 udelay(100);
4087
4088 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
4089 udelay(10);
4090 }
4091
4092 if (!tg3_flag(tp, WOL_SPEED_100MB) &&
4093 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4094 tg3_asic_rev(tp) == ASIC_REV_5701)) {
4095 u32 base_val;
4096
4097 base_val = tp->pci_clock_ctrl;
4098 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
4099 CLOCK_CTRL_TXCLK_DISABLE);
4100
4101 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
4102 CLOCK_CTRL_PWRDOWN_PLL133, 40);
4103 } else if (tg3_flag(tp, 5780_CLASS) ||
4104 tg3_flag(tp, CPMU_PRESENT) ||
4105 tg3_asic_rev(tp) == ASIC_REV_5906) {
4106 /* do nothing */
4107 } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
4108 u32 newbits1, newbits2;
4109
4110 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4111 tg3_asic_rev(tp) == ASIC_REV_5701) {
4112 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
4113 CLOCK_CTRL_TXCLK_DISABLE |
4114 CLOCK_CTRL_ALTCLK);
4115 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4116 } else if (tg3_flag(tp, 5705_PLUS)) {
4117 newbits1 = CLOCK_CTRL_625_CORE;
4118 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
4119 } else {
4120 newbits1 = CLOCK_CTRL_ALTCLK;
4121 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4122 }
4123
4124 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
4125 40);
4126
4127 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
4128 40);
4129
4130 if (!tg3_flag(tp, 5705_PLUS)) {
4131 u32 newbits3;
4132
4133 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4134 tg3_asic_rev(tp) == ASIC_REV_5701) {
4135 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
4136 CLOCK_CTRL_TXCLK_DISABLE |
4137 CLOCK_CTRL_44MHZ_CORE);
4138 } else {
4139 newbits3 = CLOCK_CTRL_44MHZ_CORE;
4140 }
4141
4142 tw32_wait_f(TG3PCI_CLOCK_CTRL,
4143 tp->pci_clock_ctrl | newbits3, 40);
4144 }
4145 }
4146
4147 if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
4148 tg3_power_down_phy(tp, do_low_power);
4149
4150 tg3_frob_aux_power(tp, true);
4151
4152 /* Workaround for unstable PLL clock */
4153 if ((!tg3_flag(tp, IS_SSB_CORE)) &&
4154 ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
4155 (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
4156 u32 val = tr32(0x7d00);
4157
4158 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4159 tw32(0x7d00, val);
4160 if (!tg3_flag(tp, ENABLE_ASF)) {
4161 int err;
4162
4163 err = tg3_nvram_lock(tp);
4164 tg3_halt_cpu(tp, RX_CPU_BASE);
4165 if (!err)
4166 tg3_nvram_unlock(tp);
4167 }
4168 }
4169
4170 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4171
4172 return 0;
4173 }
4174
4175 static void tg3_power_down(struct tg3 *tp)
4176 {
4177 tg3_power_down_prepare(tp);
4178
4179 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
4180 pci_set_power_state(tp->pdev, PCI_D3hot);
4181 }
4182
4183 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
4184 {
4185 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
4186 case MII_TG3_AUX_STAT_10HALF:
4187 *speed = SPEED_10;
4188 *duplex = DUPLEX_HALF;
4189 break;
4190
4191 case MII_TG3_AUX_STAT_10FULL:
4192 *speed = SPEED_10;
4193 *duplex = DUPLEX_FULL;
4194 break;
4195
4196 case MII_TG3_AUX_STAT_100HALF:
4197 *speed = SPEED_100;
4198 *duplex = DUPLEX_HALF;
4199 break;
4200
4201 case MII_TG3_AUX_STAT_100FULL:
4202 *speed = SPEED_100;
4203 *duplex = DUPLEX_FULL;
4204 break;
4205
4206 case MII_TG3_AUX_STAT_1000HALF:
4207 *speed = SPEED_1000;
4208 *duplex = DUPLEX_HALF;
4209 break;
4210
4211 case MII_TG3_AUX_STAT_1000FULL:
4212 *speed = SPEED_1000;
4213 *duplex = DUPLEX_FULL;
4214 break;
4215
4216 default:
4217 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4218 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4219 SPEED_10;
4220 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4221 DUPLEX_HALF;
4222 break;
4223 }
4224 *speed = SPEED_UNKNOWN;
4225 *duplex = DUPLEX_UNKNOWN;
4226 break;
4227 }
4228 }
4229
4230 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4231 {
4232 int err = 0;
4233 u32 val, new_adv;
4234
4235 new_adv = ADVERTISE_CSMA;
4236 new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4237 new_adv |= mii_advertise_flowctrl(flowctrl);
4238
4239 err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4240 if (err)
4241 goto done;
4242
4243 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4244 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4245
4246 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4247 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4248 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4249
4250 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4251 if (err)
4252 goto done;
4253 }
4254
4255 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4256 goto done;
4257
4258 tw32(TG3_CPMU_EEE_MODE,
4259 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4260
4261 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4262 if (!err) {
4263 u32 err2;
4264
4265 val = 0;
4266 /* Advertise 100-BaseTX EEE ability */
4267 if (advertise & ADVERTISED_100baseT_Full)
4268 val |= MDIO_AN_EEE_ADV_100TX;
4269 /* Advertise 1000-BaseT EEE ability */
4270 if (advertise & ADVERTISED_1000baseT_Full)
4271 val |= MDIO_AN_EEE_ADV_1000T;
4272 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4273 if (err)
4274 val = 0;
4275
4276 switch (tg3_asic_rev(tp)) {
4277 case ASIC_REV_5717:
4278 case ASIC_REV_57765:
4279 case ASIC_REV_57766:
4280 case ASIC_REV_5719:
4281 /* If we advertised any eee advertisements above... */
4282 if (val)
4283 val = MII_TG3_DSP_TAP26_ALNOKO |
4284 MII_TG3_DSP_TAP26_RMRXSTO |
4285 MII_TG3_DSP_TAP26_OPCSINPT;
4286 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4287 /* Fall through */
4288 case ASIC_REV_5720:
4289 case ASIC_REV_5762:
4290 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4291 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4292 MII_TG3_DSP_CH34TP2_HIBW01);
4293 }
4294
4295 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4296 if (!err)
4297 err = err2;
4298 }
4299
4300 done:
4301 return err;
4302 }
4303
4304 static void tg3_phy_copper_begin(struct tg3 *tp)
4305 {
4306 if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4307 (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4308 u32 adv, fc;
4309
4310 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4311 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4312 adv = ADVERTISED_10baseT_Half |
4313 ADVERTISED_10baseT_Full;
4314 if (tg3_flag(tp, WOL_SPEED_100MB))
4315 adv |= ADVERTISED_100baseT_Half |
4316 ADVERTISED_100baseT_Full;
4317 if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK)
4318 adv |= ADVERTISED_1000baseT_Half |
4319 ADVERTISED_1000baseT_Full;
4320
4321 fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4322 } else {
4323 adv = tp->link_config.advertising;
4324 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4325 adv &= ~(ADVERTISED_1000baseT_Half |
4326 ADVERTISED_1000baseT_Full);
4327
4328 fc = tp->link_config.flowctrl;
4329 }
4330
4331 tg3_phy_autoneg_cfg(tp, adv, fc);
4332
4333 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4334 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4335 /* Normally during power down we want to autonegotiate
4336 * the lowest possible speed for WOL. However, to avoid
4337 * link flap, we leave it untouched.
4338 */
4339 return;
4340 }
4341
4342 tg3_writephy(tp, MII_BMCR,
4343 BMCR_ANENABLE | BMCR_ANRESTART);
4344 } else {
4345 int i;
4346 u32 bmcr, orig_bmcr;
4347
4348 tp->link_config.active_speed = tp->link_config.speed;
4349 tp->link_config.active_duplex = tp->link_config.duplex;
4350
4351 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
4352 /* With autoneg disabled, 5715 only links up when the
4353 * advertisement register has the configured speed
4354 * enabled.
4355 */
4356 tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
4357 }
4358
4359 bmcr = 0;
4360 switch (tp->link_config.speed) {
4361 default:
4362 case SPEED_10:
4363 break;
4364
4365 case SPEED_100:
4366 bmcr |= BMCR_SPEED100;
4367 break;
4368
4369 case SPEED_1000:
4370 bmcr |= BMCR_SPEED1000;
4371 break;
4372 }
4373
4374 if (tp->link_config.duplex == DUPLEX_FULL)
4375 bmcr |= BMCR_FULLDPLX;
4376
4377 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4378 (bmcr != orig_bmcr)) {
4379 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4380 for (i = 0; i < 1500; i++) {
4381 u32 tmp;
4382
4383 udelay(10);
4384 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4385 tg3_readphy(tp, MII_BMSR, &tmp))
4386 continue;
4387 if (!(tmp & BMSR_LSTATUS)) {
4388 udelay(40);
4389 break;
4390 }
4391 }
4392 tg3_writephy(tp, MII_BMCR, bmcr);
4393 udelay(40);
4394 }
4395 }
4396 }
4397
4398 static int tg3_phy_pull_config(struct tg3 *tp)
4399 {
4400 int err;
4401 u32 val;
4402
4403 err = tg3_readphy(tp, MII_BMCR, &val);
4404 if (err)
4405 goto done;
4406
4407 if (!(val & BMCR_ANENABLE)) {
4408 tp->link_config.autoneg = AUTONEG_DISABLE;
4409 tp->link_config.advertising = 0;
4410 tg3_flag_clear(tp, PAUSE_AUTONEG);
4411
4412 err = -EIO;
4413
4414 switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) {
4415 case 0:
4416 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4417 goto done;
4418
4419 tp->link_config.speed = SPEED_10;
4420 break;
4421 case BMCR_SPEED100:
4422 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4423 goto done;
4424
4425 tp->link_config.speed = SPEED_100;
4426 break;
4427 case BMCR_SPEED1000:
4428 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4429 tp->link_config.speed = SPEED_1000;
4430 break;
4431 }
4432 /* Fall through */
4433 default:
4434 goto done;
4435 }
4436
4437 if (val & BMCR_FULLDPLX)
4438 tp->link_config.duplex = DUPLEX_FULL;
4439 else
4440 tp->link_config.duplex = DUPLEX_HALF;
4441
4442 tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
4443
4444 err = 0;
4445 goto done;
4446 }
4447
4448 tp->link_config.autoneg = AUTONEG_ENABLE;
4449 tp->link_config.advertising = ADVERTISED_Autoneg;
4450 tg3_flag_set(tp, PAUSE_AUTONEG);
4451
4452 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4453 u32 adv;
4454
4455 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4456 if (err)
4457 goto done;
4458
4459 adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL);
4460 tp->link_config.advertising |= adv | ADVERTISED_TP;
4461
4462 tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val);
4463 } else {
4464 tp->link_config.advertising |= ADVERTISED_FIBRE;
4465 }
4466
4467 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4468 u32 adv;
4469
4470 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4471 err = tg3_readphy(tp, MII_CTRL1000, &val);
4472 if (err)
4473 goto done;
4474
4475 adv = mii_ctrl1000_to_ethtool_adv_t(val);
4476 } else {
4477 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4478 if (err)
4479 goto done;
4480
4481 adv = tg3_decode_flowctrl_1000X(val);
4482 tp->link_config.flowctrl = adv;
4483
4484 val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL);
4485 adv = mii_adv_to_ethtool_adv_x(val);
4486 }
4487
4488 tp->link_config.advertising |= adv;
4489 }
4490
4491 done:
4492 return err;
4493 }
4494
4495 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4496 {
4497 int err;
4498
4499 /* Turn off tap power management. */
4500 /* Set Extended packet length bit */
4501 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4502
4503 err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4504 err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4505 err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4506 err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4507 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4508
4509 udelay(40);
4510
4511 return err;
4512 }
4513
4514 static bool tg3_phy_eee_config_ok(struct tg3 *tp)
4515 {
4516 u32 val;
4517 u32 tgtadv = 0;
4518 u32 advertising = tp->link_config.advertising;
4519
4520 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4521 return true;
4522
4523 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
4524 return false;
4525
4526 val &= (MDIO_AN_EEE_ADV_100TX | MDIO_AN_EEE_ADV_1000T);
4527
4528
4529 if (advertising & ADVERTISED_100baseT_Full)
4530 tgtadv |= MDIO_AN_EEE_ADV_100TX;
4531 if (advertising & ADVERTISED_1000baseT_Full)
4532 tgtadv |= MDIO_AN_EEE_ADV_1000T;
4533
4534 if (val != tgtadv)
4535 return false;
4536
4537 return true;
4538 }
4539
4540 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4541 {
4542 u32 advmsk, tgtadv, advertising;
4543
4544 advertising = tp->link_config.advertising;
4545 tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4546
4547 advmsk = ADVERTISE_ALL;
4548 if (tp->link_config.active_duplex == DUPLEX_FULL) {
4549 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4550 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4551 }
4552
4553 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4554 return false;
4555
4556 if ((*lcladv & advmsk) != tgtadv)
4557 return false;
4558
4559 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4560 u32 tg3_ctrl;
4561
4562 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4563
4564 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4565 return false;
4566
4567 if (tgtadv &&
4568 (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4569 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4570 tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4571 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4572 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4573 } else {
4574 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4575 }
4576
4577 if (tg3_ctrl != tgtadv)
4578 return false;
4579 }
4580
4581 return true;
4582 }
4583
4584 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4585 {
4586 u32 lpeth = 0;
4587
4588 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4589 u32 val;
4590
4591 if (tg3_readphy(tp, MII_STAT1000, &val))
4592 return false;
4593
4594 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4595 }
4596
4597 if (tg3_readphy(tp, MII_LPA, rmtadv))
4598 return false;
4599
4600 lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4601 tp->link_config.rmt_adv = lpeth;
4602
4603 return true;
4604 }
4605
4606 static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up)
4607 {
4608 if (curr_link_up != tp->link_up) {
4609 if (curr_link_up) {
4610 netif_carrier_on(tp->dev);
4611 } else {
4612 netif_carrier_off(tp->dev);
4613 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4614 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4615 }
4616
4617 tg3_link_report(tp);
4618 return true;
4619 }
4620
4621 return false;
4622 }
4623
4624 static void tg3_clear_mac_status(struct tg3 *tp)
4625 {
4626 tw32(MAC_EVENT, 0);
4627
4628 tw32_f(MAC_STATUS,
4629 MAC_STATUS_SYNC_CHANGED |
4630 MAC_STATUS_CFG_CHANGED |
4631 MAC_STATUS_MI_COMPLETION |
4632 MAC_STATUS_LNKSTATE_CHANGED);
4633 udelay(40);
4634 }
4635
4636 static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
4637 {
4638 bool current_link_up;
4639 u32 bmsr, val;
4640 u32 lcl_adv, rmt_adv;
4641 u16 current_speed;
4642 u8 current_duplex;
4643 int i, err;
4644
4645 tg3_clear_mac_status(tp);
4646
4647 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4648 tw32_f(MAC_MI_MODE,
4649 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4650 udelay(80);
4651 }
4652
4653 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4654
4655 /* Some third-party PHYs need to be reset on link going
4656 * down.
4657 */
4658 if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4659 tg3_asic_rev(tp) == ASIC_REV_5704 ||
4660 tg3_asic_rev(tp) == ASIC_REV_5705) &&
4661 tp->link_up) {
4662 tg3_readphy(tp, MII_BMSR, &bmsr);
4663 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4664 !(bmsr & BMSR_LSTATUS))
4665 force_reset = true;
4666 }
4667 if (force_reset)
4668 tg3_phy_reset(tp);
4669
4670 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4671 tg3_readphy(tp, MII_BMSR, &bmsr);
4672 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4673 !tg3_flag(tp, INIT_COMPLETE))
4674 bmsr = 0;
4675
4676 if (!(bmsr & BMSR_LSTATUS)) {
4677 err = tg3_init_5401phy_dsp(tp);
4678 if (err)
4679 return err;
4680
4681 tg3_readphy(tp, MII_BMSR, &bmsr);
4682 for (i = 0; i < 1000; i++) {
4683 udelay(10);
4684 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4685 (bmsr & BMSR_LSTATUS)) {
4686 udelay(40);
4687 break;
4688 }
4689 }
4690
4691 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4692 TG3_PHY_REV_BCM5401_B0 &&
4693 !(bmsr & BMSR_LSTATUS) &&
4694 tp->link_config.active_speed == SPEED_1000) {
4695 err = tg3_phy_reset(tp);
4696 if (!err)
4697 err = tg3_init_5401phy_dsp(tp);
4698 if (err)
4699 return err;
4700 }
4701 }
4702 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4703 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4704 /* 5701 {A0,B0} CRC bug workaround */
4705 tg3_writephy(tp, 0x15, 0x0a75);
4706 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4707 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4708 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4709 }
4710
4711 /* Clear pending interrupts... */
4712 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4713 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4714
4715 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4716 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4717 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4718 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4719
4720 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4721 tg3_asic_rev(tp) == ASIC_REV_5701) {
4722 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4723 tg3_writephy(tp, MII_TG3_EXT_CTRL,
4724 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4725 else
4726 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4727 }
4728
4729 current_link_up = false;
4730 current_speed = SPEED_UNKNOWN;
4731 current_duplex = DUPLEX_UNKNOWN;
4732 tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4733 tp->link_config.rmt_adv = 0;
4734
4735 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4736 err = tg3_phy_auxctl_read(tp,
4737 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4738 &val);
4739 if (!err && !(val & (1 << 10))) {
4740 tg3_phy_auxctl_write(tp,
4741 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4742 val | (1 << 10));
4743 goto relink;
4744 }
4745 }
4746
4747 bmsr = 0;
4748 for (i = 0; i < 100; i++) {
4749 tg3_readphy(tp, MII_BMSR, &bmsr);
4750 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4751 (bmsr & BMSR_LSTATUS))
4752 break;
4753 udelay(40);
4754 }
4755
4756 if (bmsr & BMSR_LSTATUS) {
4757 u32 aux_stat, bmcr;
4758
4759 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4760 for (i = 0; i < 2000; i++) {
4761 udelay(10);
4762 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4763 aux_stat)
4764 break;
4765 }
4766
4767 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4768 &current_speed,
4769 &current_duplex);
4770
4771 bmcr = 0;
4772 for (i = 0; i < 200; i++) {
4773 tg3_readphy(tp, MII_BMCR, &bmcr);
4774 if (tg3_readphy(tp, MII_BMCR, &bmcr))
4775 continue;
4776 if (bmcr && bmcr != 0x7fff)
4777 break;
4778 udelay(10);
4779 }
4780
4781 lcl_adv = 0;
4782 rmt_adv = 0;
4783
4784 tp->link_config.active_speed = current_speed;
4785 tp->link_config.active_duplex = current_duplex;
4786
4787 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4788 bool eee_config_ok = tg3_phy_eee_config_ok(tp);
4789
4790 if ((bmcr & BMCR_ANENABLE) &&
4791 eee_config_ok &&
4792 tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4793 tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4794 current_link_up = true;
4795
4796 /* EEE settings changes take effect only after a phy
4797 * reset. If we have skipped a reset due to Link Flap
4798 * Avoidance being enabled, do it now.
4799 */
4800 if (!eee_config_ok &&
4801 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
4802 !force_reset)
4803 tg3_phy_reset(tp);
4804 } else {
4805 if (!(bmcr & BMCR_ANENABLE) &&
4806 tp->link_config.speed == current_speed &&
4807 tp->link_config.duplex == current_duplex) {
4808 current_link_up = true;
4809 }
4810 }
4811
4812 if (current_link_up &&
4813 tp->link_config.active_duplex == DUPLEX_FULL) {
4814 u32 reg, bit;
4815
4816 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4817 reg = MII_TG3_FET_GEN_STAT;
4818 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4819 } else {
4820 reg = MII_TG3_EXT_STAT;
4821 bit = MII_TG3_EXT_STAT_MDIX;
4822 }
4823
4824 if (!tg3_readphy(tp, reg, &val) && (val & bit))
4825 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4826
4827 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4828 }
4829 }
4830
4831 relink:
4832 if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4833 tg3_phy_copper_begin(tp);
4834
4835 if (tg3_flag(tp, ROBOSWITCH)) {
4836 current_link_up = true;
4837 /* FIXME: when BCM5325 switch is used use 100 MBit/s */
4838 current_speed = SPEED_1000;
4839 current_duplex = DUPLEX_FULL;
4840 tp->link_config.active_speed = current_speed;
4841 tp->link_config.active_duplex = current_duplex;
4842 }
4843
4844 tg3_readphy(tp, MII_BMSR, &bmsr);
4845 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4846 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4847 current_link_up = true;
4848 }
4849
4850 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4851 if (current_link_up) {
4852 if (tp->link_config.active_speed == SPEED_100 ||
4853 tp->link_config.active_speed == SPEED_10)
4854 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4855 else
4856 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4857 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4858 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4859 else
4860 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4861
4862 /* In order for the 5750 core in BCM4785 chip to work properly
4863 * in RGMII mode, the Led Control Register must be set up.
4864 */
4865 if (tg3_flag(tp, RGMII_MODE)) {
4866 u32 led_ctrl = tr32(MAC_LED_CTRL);
4867 led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
4868
4869 if (tp->link_config.active_speed == SPEED_10)
4870 led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
4871 else if (tp->link_config.active_speed == SPEED_100)
4872 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
4873 LED_CTRL_100MBPS_ON);
4874 else if (tp->link_config.active_speed == SPEED_1000)
4875 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
4876 LED_CTRL_1000MBPS_ON);
4877
4878 tw32(MAC_LED_CTRL, led_ctrl);
4879 udelay(40);
4880 }
4881
4882 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4883 if (tp->link_config.active_duplex == DUPLEX_HALF)
4884 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4885
4886 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4887 if (current_link_up &&
4888 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
4889 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
4890 else
4891 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
4892 }
4893
4894 /* ??? Without this setting Netgear GA302T PHY does not
4895 * ??? send/receive packets...
4896 */
4897 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
4898 tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
4899 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
4900 tw32_f(MAC_MI_MODE, tp->mi_mode);
4901 udelay(80);
4902 }
4903
4904 tw32_f(MAC_MODE, tp->mac_mode);
4905 udelay(40);
4906
4907 tg3_phy_eee_adjust(tp, current_link_up);
4908
4909 if (tg3_flag(tp, USE_LINKCHG_REG)) {
4910 /* Polled via timer. */
4911 tw32_f(MAC_EVENT, 0);
4912 } else {
4913 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4914 }
4915 udelay(40);
4916
4917 if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
4918 current_link_up &&
4919 tp->link_config.active_speed == SPEED_1000 &&
4920 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
4921 udelay(120);
4922 tw32_f(MAC_STATUS,
4923 (MAC_STATUS_SYNC_CHANGED |
4924 MAC_STATUS_CFG_CHANGED));
4925 udelay(40);
4926 tg3_write_mem(tp,
4927 NIC_SRAM_FIRMWARE_MBOX,
4928 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
4929 }
4930
4931 /* Prevent send BD corruption. */
4932 if (tg3_flag(tp, CLKREQ_BUG)) {
4933 if (tp->link_config.active_speed == SPEED_100 ||
4934 tp->link_config.active_speed == SPEED_10)
4935 pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
4936 PCI_EXP_LNKCTL_CLKREQ_EN);
4937 else
4938 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4939 PCI_EXP_LNKCTL_CLKREQ_EN);
4940 }
4941
4942 tg3_test_and_report_link_chg(tp, current_link_up);
4943
4944 return 0;
4945 }
4946
4947 struct tg3_fiber_aneginfo {
4948 int state;
4949 #define ANEG_STATE_UNKNOWN 0
4950 #define ANEG_STATE_AN_ENABLE 1
4951 #define ANEG_STATE_RESTART_INIT 2
4952 #define ANEG_STATE_RESTART 3
4953 #define ANEG_STATE_DISABLE_LINK_OK 4
4954 #define ANEG_STATE_ABILITY_DETECT_INIT 5
4955 #define ANEG_STATE_ABILITY_DETECT 6
4956 #define ANEG_STATE_ACK_DETECT_INIT 7
4957 #define ANEG_STATE_ACK_DETECT 8
4958 #define ANEG_STATE_COMPLETE_ACK_INIT 9
4959 #define ANEG_STATE_COMPLETE_ACK 10
4960 #define ANEG_STATE_IDLE_DETECT_INIT 11
4961 #define ANEG_STATE_IDLE_DETECT 12
4962 #define ANEG_STATE_LINK_OK 13
4963 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
4964 #define ANEG_STATE_NEXT_PAGE_WAIT 15
4965
4966 u32 flags;
4967 #define MR_AN_ENABLE 0x00000001
4968 #define MR_RESTART_AN 0x00000002
4969 #define MR_AN_COMPLETE 0x00000004
4970 #define MR_PAGE_RX 0x00000008
4971 #define MR_NP_LOADED 0x00000010
4972 #define MR_TOGGLE_TX 0x00000020
4973 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
4974 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
4975 #define MR_LP_ADV_SYM_PAUSE 0x00000100
4976 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
4977 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
4978 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
4979 #define MR_LP_ADV_NEXT_PAGE 0x00001000
4980 #define MR_TOGGLE_RX 0x00002000
4981 #define MR_NP_RX 0x00004000
4982
4983 #define MR_LINK_OK 0x80000000
4984
4985 unsigned long link_time, cur_time;
4986
4987 u32 ability_match_cfg;
4988 int ability_match_count;
4989
4990 char ability_match, idle_match, ack_match;
4991
4992 u32 txconfig, rxconfig;
4993 #define ANEG_CFG_NP 0x00000080
4994 #define ANEG_CFG_ACK 0x00000040
4995 #define ANEG_CFG_RF2 0x00000020
4996 #define ANEG_CFG_RF1 0x00000010
4997 #define ANEG_CFG_PS2 0x00000001
4998 #define ANEG_CFG_PS1 0x00008000
4999 #define ANEG_CFG_HD 0x00004000
5000 #define ANEG_CFG_FD 0x00002000
5001 #define ANEG_CFG_INVAL 0x00001f06
5002
5003 };
5004 #define ANEG_OK 0
5005 #define ANEG_DONE 1
5006 #define ANEG_TIMER_ENAB 2
5007 #define ANEG_FAILED -1
5008
5009 #define ANEG_STATE_SETTLE_TIME 10000
5010
5011 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
5012 struct tg3_fiber_aneginfo *ap)
5013 {
5014 u16 flowctrl;
5015 unsigned long delta;
5016 u32 rx_cfg_reg;
5017 int ret;
5018
5019 if (ap->state == ANEG_STATE_UNKNOWN) {
5020 ap->rxconfig = 0;
5021 ap->link_time = 0;
5022 ap->cur_time = 0;
5023 ap->ability_match_cfg = 0;
5024 ap->ability_match_count = 0;
5025 ap->ability_match = 0;
5026 ap->idle_match = 0;
5027 ap->ack_match = 0;
5028 }
5029 ap->cur_time++;
5030
5031 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
5032 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
5033
5034 if (rx_cfg_reg != ap->ability_match_cfg) {
5035 ap->ability_match_cfg = rx_cfg_reg;
5036 ap->ability_match = 0;
5037 ap->ability_match_count = 0;
5038 } else {
5039 if (++ap->ability_match_count > 1) {
5040 ap->ability_match = 1;
5041 ap->ability_match_cfg = rx_cfg_reg;
5042 }
5043 }
5044 if (rx_cfg_reg & ANEG_CFG_ACK)
5045 ap->ack_match = 1;
5046 else
5047 ap->ack_match = 0;
5048
5049 ap->idle_match = 0;
5050 } else {
5051 ap->idle_match = 1;
5052 ap->ability_match_cfg = 0;
5053 ap->ability_match_count = 0;
5054 ap->ability_match = 0;
5055 ap->ack_match = 0;
5056
5057 rx_cfg_reg = 0;
5058 }
5059
5060 ap->rxconfig = rx_cfg_reg;
5061 ret = ANEG_OK;
5062
5063 switch (ap->state) {
5064 case ANEG_STATE_UNKNOWN:
5065 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
5066 ap->state = ANEG_STATE_AN_ENABLE;
5067
5068 /* fallthru */
5069 case ANEG_STATE_AN_ENABLE:
5070 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
5071 if (ap->flags & MR_AN_ENABLE) {
5072 ap->link_time = 0;
5073 ap->cur_time = 0;
5074 ap->ability_match_cfg = 0;
5075 ap->ability_match_count = 0;
5076 ap->ability_match = 0;
5077 ap->idle_match = 0;
5078 ap->ack_match = 0;
5079
5080 ap->state = ANEG_STATE_RESTART_INIT;
5081 } else {
5082 ap->state = ANEG_STATE_DISABLE_LINK_OK;
5083 }
5084 break;
5085
5086 case ANEG_STATE_RESTART_INIT:
5087 ap->link_time = ap->cur_time;
5088 ap->flags &= ~(MR_NP_LOADED);
5089 ap->txconfig = 0;
5090 tw32(MAC_TX_AUTO_NEG, 0);
5091 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5092 tw32_f(MAC_MODE, tp->mac_mode);
5093 udelay(40);
5094
5095 ret = ANEG_TIMER_ENAB;
5096 ap->state = ANEG_STATE_RESTART;
5097
5098 /* fallthru */
5099 case ANEG_STATE_RESTART:
5100 delta = ap->cur_time - ap->link_time;
5101 if (delta > ANEG_STATE_SETTLE_TIME)
5102 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
5103 else
5104 ret = ANEG_TIMER_ENAB;
5105 break;
5106
5107 case ANEG_STATE_DISABLE_LINK_OK:
5108 ret = ANEG_DONE;
5109 break;
5110
5111 case ANEG_STATE_ABILITY_DETECT_INIT:
5112 ap->flags &= ~(MR_TOGGLE_TX);
5113 ap->txconfig = ANEG_CFG_FD;
5114 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5115 if (flowctrl & ADVERTISE_1000XPAUSE)
5116 ap->txconfig |= ANEG_CFG_PS1;
5117 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5118 ap->txconfig |= ANEG_CFG_PS2;
5119 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5120 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5121 tw32_f(MAC_MODE, tp->mac_mode);
5122 udelay(40);
5123
5124 ap->state = ANEG_STATE_ABILITY_DETECT;
5125 break;
5126
5127 case ANEG_STATE_ABILITY_DETECT:
5128 if (ap->ability_match != 0 && ap->rxconfig != 0)
5129 ap->state = ANEG_STATE_ACK_DETECT_INIT;
5130 break;
5131
5132 case ANEG_STATE_ACK_DETECT_INIT:
5133 ap->txconfig |= ANEG_CFG_ACK;
5134 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5135 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5136 tw32_f(MAC_MODE, tp->mac_mode);
5137 udelay(40);
5138
5139 ap->state = ANEG_STATE_ACK_DETECT;
5140
5141 /* fallthru */
5142 case ANEG_STATE_ACK_DETECT:
5143 if (ap->ack_match != 0) {
5144 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
5145 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
5146 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
5147 } else {
5148 ap->state = ANEG_STATE_AN_ENABLE;
5149 }
5150 } else if (ap->ability_match != 0 &&
5151 ap->rxconfig == 0) {
5152 ap->state = ANEG_STATE_AN_ENABLE;
5153 }
5154 break;
5155
5156 case ANEG_STATE_COMPLETE_ACK_INIT:
5157 if (ap->rxconfig & ANEG_CFG_INVAL) {
5158 ret = ANEG_FAILED;
5159 break;
5160 }
5161 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
5162 MR_LP_ADV_HALF_DUPLEX |
5163 MR_LP_ADV_SYM_PAUSE |
5164 MR_LP_ADV_ASYM_PAUSE |
5165 MR_LP_ADV_REMOTE_FAULT1 |
5166 MR_LP_ADV_REMOTE_FAULT2 |
5167 MR_LP_ADV_NEXT_PAGE |
5168 MR_TOGGLE_RX |
5169 MR_NP_RX);
5170 if (ap->rxconfig & ANEG_CFG_FD)
5171 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
5172 if (ap->rxconfig & ANEG_CFG_HD)
5173 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
5174 if (ap->rxconfig & ANEG_CFG_PS1)
5175 ap->flags |= MR_LP_ADV_SYM_PAUSE;
5176 if (ap->rxconfig & ANEG_CFG_PS2)
5177 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
5178 if (ap->rxconfig & ANEG_CFG_RF1)
5179 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
5180 if (ap->rxconfig & ANEG_CFG_RF2)
5181 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
5182 if (ap->rxconfig & ANEG_CFG_NP)
5183 ap->flags |= MR_LP_ADV_NEXT_PAGE;
5184
5185 ap->link_time = ap->cur_time;
5186
5187 ap->flags ^= (MR_TOGGLE_TX);
5188 if (ap->rxconfig & 0x0008)
5189 ap->flags |= MR_TOGGLE_RX;
5190 if (ap->rxconfig & ANEG_CFG_NP)
5191 ap->flags |= MR_NP_RX;
5192 ap->flags |= MR_PAGE_RX;
5193
5194 ap->state = ANEG_STATE_COMPLETE_ACK;
5195 ret = ANEG_TIMER_ENAB;
5196 break;
5197
5198 case ANEG_STATE_COMPLETE_ACK:
5199 if (ap->ability_match != 0 &&
5200 ap->rxconfig == 0) {
5201 ap->state = ANEG_STATE_AN_ENABLE;
5202 break;
5203 }
5204 delta = ap->cur_time - ap->link_time;
5205 if (delta > ANEG_STATE_SETTLE_TIME) {
5206 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
5207 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5208 } else {
5209 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
5210 !(ap->flags & MR_NP_RX)) {
5211 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5212 } else {
5213 ret = ANEG_FAILED;
5214 }
5215 }
5216 }
5217 break;
5218
5219 case ANEG_STATE_IDLE_DETECT_INIT:
5220 ap->link_time = ap->cur_time;
5221 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5222 tw32_f(MAC_MODE, tp->mac_mode);
5223 udelay(40);
5224
5225 ap->state = ANEG_STATE_IDLE_DETECT;
5226 ret = ANEG_TIMER_ENAB;
5227 break;
5228
5229 case ANEG_STATE_IDLE_DETECT:
5230 if (ap->ability_match != 0 &&
5231 ap->rxconfig == 0) {
5232 ap->state = ANEG_STATE_AN_ENABLE;
5233 break;
5234 }
5235 delta = ap->cur_time - ap->link_time;
5236 if (delta > ANEG_STATE_SETTLE_TIME) {
5237 /* XXX another gem from the Broadcom driver :( */
5238 ap->state = ANEG_STATE_LINK_OK;
5239 }
5240 break;
5241
5242 case ANEG_STATE_LINK_OK:
5243 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
5244 ret = ANEG_DONE;
5245 break;
5246
5247 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
5248 /* ??? unimplemented */
5249 break;
5250
5251 case ANEG_STATE_NEXT_PAGE_WAIT:
5252 /* ??? unimplemented */
5253 break;
5254
5255 default:
5256 ret = ANEG_FAILED;
5257 break;
5258 }
5259
5260 return ret;
5261 }
5262
5263 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
5264 {
5265 int res = 0;
5266 struct tg3_fiber_aneginfo aninfo;
5267 int status = ANEG_FAILED;
5268 unsigned int tick;
5269 u32 tmp;
5270
5271 tw32_f(MAC_TX_AUTO_NEG, 0);
5272
5273 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5274 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
5275 udelay(40);
5276
5277 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
5278 udelay(40);
5279
5280 memset(&aninfo, 0, sizeof(aninfo));
5281 aninfo.flags |= MR_AN_ENABLE;
5282 aninfo.state = ANEG_STATE_UNKNOWN;
5283 aninfo.cur_time = 0;
5284 tick = 0;
5285 while (++tick < 195000) {
5286 status = tg3_fiber_aneg_smachine(tp, &aninfo);
5287 if (status == ANEG_DONE || status == ANEG_FAILED)
5288 break;
5289
5290 udelay(1);
5291 }
5292
5293 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5294 tw32_f(MAC_MODE, tp->mac_mode);
5295 udelay(40);
5296
5297 *txflags = aninfo.txconfig;
5298 *rxflags = aninfo.flags;
5299
5300 if (status == ANEG_DONE &&
5301 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
5302 MR_LP_ADV_FULL_DUPLEX)))
5303 res = 1;
5304
5305 return res;
5306 }
5307
5308 static void tg3_init_bcm8002(struct tg3 *tp)
5309 {
5310 u32 mac_status = tr32(MAC_STATUS);
5311 int i;
5312
5313 /* Reset when initting first time or we have a link. */
5314 if (tg3_flag(tp, INIT_COMPLETE) &&
5315 !(mac_status & MAC_STATUS_PCS_SYNCED))
5316 return;
5317
5318 /* Set PLL lock range. */
5319 tg3_writephy(tp, 0x16, 0x8007);
5320
5321 /* SW reset */
5322 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
5323
5324 /* Wait for reset to complete. */
5325 /* XXX schedule_timeout() ... */
5326 for (i = 0; i < 500; i++)
5327 udelay(10);
5328
5329 /* Config mode; select PMA/Ch 1 regs. */
5330 tg3_writephy(tp, 0x10, 0x8411);
5331
5332 /* Enable auto-lock and comdet, select txclk for tx. */
5333 tg3_writephy(tp, 0x11, 0x0a10);
5334
5335 tg3_writephy(tp, 0x18, 0x00a0);
5336 tg3_writephy(tp, 0x16, 0x41ff);
5337
5338 /* Assert and deassert POR. */
5339 tg3_writephy(tp, 0x13, 0x0400);
5340 udelay(40);
5341 tg3_writephy(tp, 0x13, 0x0000);
5342
5343 tg3_writephy(tp, 0x11, 0x0a50);
5344 udelay(40);
5345 tg3_writephy(tp, 0x11, 0x0a10);
5346
5347 /* Wait for signal to stabilize */
5348 /* XXX schedule_timeout() ... */
5349 for (i = 0; i < 15000; i++)
5350 udelay(10);
5351
5352 /* Deselect the channel register so we can read the PHYID
5353 * later.
5354 */
5355 tg3_writephy(tp, 0x10, 0x8011);
5356 }
5357
5358 static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5359 {
5360 u16 flowctrl;
5361 bool current_link_up;
5362 u32 sg_dig_ctrl, sg_dig_status;
5363 u32 serdes_cfg, expected_sg_dig_ctrl;
5364 int workaround, port_a;
5365
5366 serdes_cfg = 0;
5367 expected_sg_dig_ctrl = 0;
5368 workaround = 0;
5369 port_a = 1;
5370 current_link_up = false;
5371
5372 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5373 tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5374 workaround = 1;
5375 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5376 port_a = 0;
5377
5378 /* preserve bits 0-11,13,14 for signal pre-emphasis */
5379 /* preserve bits 20-23 for voltage regulator */
5380 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5381 }
5382
5383 sg_dig_ctrl = tr32(SG_DIG_CTRL);
5384
5385 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5386 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5387 if (workaround) {
5388 u32 val = serdes_cfg;
5389
5390 if (port_a)
5391 val |= 0xc010000;
5392 else
5393 val |= 0x4010000;
5394 tw32_f(MAC_SERDES_CFG, val);
5395 }
5396
5397 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5398 }
5399 if (mac_status & MAC_STATUS_PCS_SYNCED) {
5400 tg3_setup_flow_control(tp, 0, 0);
5401 current_link_up = true;
5402 }
5403 goto out;
5404 }
5405
5406 /* Want auto-negotiation. */
5407 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5408
5409 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5410 if (flowctrl & ADVERTISE_1000XPAUSE)
5411 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5412 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5413 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5414
5415 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5416 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5417 tp->serdes_counter &&
5418 ((mac_status & (MAC_STATUS_PCS_SYNCED |
5419 MAC_STATUS_RCVD_CFG)) ==
5420 MAC_STATUS_PCS_SYNCED)) {
5421 tp->serdes_counter--;
5422 current_link_up = true;
5423 goto out;
5424 }
5425 restart_autoneg:
5426 if (workaround)
5427 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5428 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5429 udelay(5);
5430 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5431
5432 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5433 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5434 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5435 MAC_STATUS_SIGNAL_DET)) {
5436 sg_dig_status = tr32(SG_DIG_STATUS);
5437 mac_status = tr32(MAC_STATUS);
5438
5439 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5440 (mac_status & MAC_STATUS_PCS_SYNCED)) {
5441 u32 local_adv = 0, remote_adv = 0;
5442
5443 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5444 local_adv |= ADVERTISE_1000XPAUSE;
5445 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5446 local_adv |= ADVERTISE_1000XPSE_ASYM;
5447
5448 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5449 remote_adv |= LPA_1000XPAUSE;
5450 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5451 remote_adv |= LPA_1000XPAUSE_ASYM;
5452
5453 tp->link_config.rmt_adv =
5454 mii_adv_to_ethtool_adv_x(remote_adv);
5455
5456 tg3_setup_flow_control(tp, local_adv, remote_adv);
5457 current_link_up = true;
5458 tp->serdes_counter = 0;
5459 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5460 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5461 if (tp->serdes_counter)
5462 tp->serdes_counter--;
5463 else {
5464 if (workaround) {
5465 u32 val = serdes_cfg;
5466
5467 if (port_a)
5468 val |= 0xc010000;
5469 else
5470 val |= 0x4010000;
5471
5472 tw32_f(MAC_SERDES_CFG, val);
5473 }
5474
5475 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5476 udelay(40);
5477
5478 /* Link parallel detection - link is up */
5479 /* only if we have PCS_SYNC and not */
5480 /* receiving config code words */
5481 mac_status = tr32(MAC_STATUS);
5482 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5483 !(mac_status & MAC_STATUS_RCVD_CFG)) {
5484 tg3_setup_flow_control(tp, 0, 0);
5485 current_link_up = true;
5486 tp->phy_flags |=
5487 TG3_PHYFLG_PARALLEL_DETECT;
5488 tp->serdes_counter =
5489 SERDES_PARALLEL_DET_TIMEOUT;
5490 } else
5491 goto restart_autoneg;
5492 }
5493 }
5494 } else {
5495 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5496 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5497 }
5498
5499 out:
5500 return current_link_up;
5501 }
5502
5503 static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5504 {
5505 bool current_link_up = false;
5506
5507 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5508 goto out;
5509
5510 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5511 u32 txflags, rxflags;
5512 int i;
5513
5514 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5515 u32 local_adv = 0, remote_adv = 0;
5516
5517 if (txflags & ANEG_CFG_PS1)
5518 local_adv |= ADVERTISE_1000XPAUSE;
5519 if (txflags & ANEG_CFG_PS2)
5520 local_adv |= ADVERTISE_1000XPSE_ASYM;
5521
5522 if (rxflags & MR_LP_ADV_SYM_PAUSE)
5523 remote_adv |= LPA_1000XPAUSE;
5524 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5525 remote_adv |= LPA_1000XPAUSE_ASYM;
5526
5527 tp->link_config.rmt_adv =
5528 mii_adv_to_ethtool_adv_x(remote_adv);
5529
5530 tg3_setup_flow_control(tp, local_adv, remote_adv);
5531
5532 current_link_up = true;
5533 }
5534 for (i = 0; i < 30; i++) {
5535 udelay(20);
5536 tw32_f(MAC_STATUS,
5537 (MAC_STATUS_SYNC_CHANGED |
5538 MAC_STATUS_CFG_CHANGED));
5539 udelay(40);
5540 if ((tr32(MAC_STATUS) &
5541 (MAC_STATUS_SYNC_CHANGED |
5542 MAC_STATUS_CFG_CHANGED)) == 0)
5543 break;
5544 }
5545
5546 mac_status = tr32(MAC_STATUS);
5547 if (!current_link_up &&
5548 (mac_status & MAC_STATUS_PCS_SYNCED) &&
5549 !(mac_status & MAC_STATUS_RCVD_CFG))
5550 current_link_up = true;
5551 } else {
5552 tg3_setup_flow_control(tp, 0, 0);
5553
5554 /* Forcing 1000FD link up. */
5555 current_link_up = true;
5556
5557 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5558 udelay(40);
5559
5560 tw32_f(MAC_MODE, tp->mac_mode);
5561 udelay(40);
5562 }
5563
5564 out:
5565 return current_link_up;
5566 }
5567
5568 static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
5569 {
5570 u32 orig_pause_cfg;
5571 u16 orig_active_speed;
5572 u8 orig_active_duplex;
5573 u32 mac_status;
5574 bool current_link_up;
5575 int i;
5576
5577 orig_pause_cfg = tp->link_config.active_flowctrl;
5578 orig_active_speed = tp->link_config.active_speed;
5579 orig_active_duplex = tp->link_config.active_duplex;
5580
5581 if (!tg3_flag(tp, HW_AUTONEG) &&
5582 tp->link_up &&
5583 tg3_flag(tp, INIT_COMPLETE)) {
5584 mac_status = tr32(MAC_STATUS);
5585 mac_status &= (MAC_STATUS_PCS_SYNCED |
5586 MAC_STATUS_SIGNAL_DET |
5587 MAC_STATUS_CFG_CHANGED |
5588 MAC_STATUS_RCVD_CFG);
5589 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5590 MAC_STATUS_SIGNAL_DET)) {
5591 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5592 MAC_STATUS_CFG_CHANGED));
5593 return 0;
5594 }
5595 }
5596
5597 tw32_f(MAC_TX_AUTO_NEG, 0);
5598
5599 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5600 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5601 tw32_f(MAC_MODE, tp->mac_mode);
5602 udelay(40);
5603
5604 if (tp->phy_id == TG3_PHY_ID_BCM8002)
5605 tg3_init_bcm8002(tp);
5606
5607 /* Enable link change event even when serdes polling. */
5608 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5609 udelay(40);
5610
5611 current_link_up = false;
5612 tp->link_config.rmt_adv = 0;
5613 mac_status = tr32(MAC_STATUS);
5614
5615 if (tg3_flag(tp, HW_AUTONEG))
5616 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5617 else
5618 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5619
5620 tp->napi[0].hw_status->status =
5621 (SD_STATUS_UPDATED |
5622 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5623
5624 for (i = 0; i < 100; i++) {
5625 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5626 MAC_STATUS_CFG_CHANGED));
5627 udelay(5);
5628 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5629 MAC_STATUS_CFG_CHANGED |
5630 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5631 break;
5632 }
5633
5634 mac_status = tr32(MAC_STATUS);
5635 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5636 current_link_up = false;
5637 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5638 tp->serdes_counter == 0) {
5639 tw32_f(MAC_MODE, (tp->mac_mode |
5640 MAC_MODE_SEND_CONFIGS));
5641 udelay(1);
5642 tw32_f(MAC_MODE, tp->mac_mode);
5643 }
5644 }
5645
5646 if (current_link_up) {
5647 tp->link_config.active_speed = SPEED_1000;
5648 tp->link_config.active_duplex = DUPLEX_FULL;
5649 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5650 LED_CTRL_LNKLED_OVERRIDE |
5651 LED_CTRL_1000MBPS_ON));
5652 } else {
5653 tp->link_config.active_speed = SPEED_UNKNOWN;
5654 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5655 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5656 LED_CTRL_LNKLED_OVERRIDE |
5657 LED_CTRL_TRAFFIC_OVERRIDE));
5658 }
5659
5660 if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5661 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5662 if (orig_pause_cfg != now_pause_cfg ||
5663 orig_active_speed != tp->link_config.active_speed ||
5664 orig_active_duplex != tp->link_config.active_duplex)
5665 tg3_link_report(tp);
5666 }
5667
5668 return 0;
5669 }
5670
5671 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
5672 {
5673 int err = 0;
5674 u32 bmsr, bmcr;
5675 u16 current_speed = SPEED_UNKNOWN;
5676 u8 current_duplex = DUPLEX_UNKNOWN;
5677 bool current_link_up = false;
5678 u32 local_adv, remote_adv, sgsr;
5679
5680 if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
5681 tg3_asic_rev(tp) == ASIC_REV_5720) &&
5682 !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) &&
5683 (sgsr & SERDES_TG3_SGMII_MODE)) {
5684
5685 if (force_reset)
5686 tg3_phy_reset(tp);
5687
5688 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
5689
5690 if (!(sgsr & SERDES_TG3_LINK_UP)) {
5691 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5692 } else {
5693 current_link_up = true;
5694 if (sgsr & SERDES_TG3_SPEED_1000) {
5695 current_speed = SPEED_1000;
5696 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5697 } else if (sgsr & SERDES_TG3_SPEED_100) {
5698 current_speed = SPEED_100;
5699 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5700 } else {
5701 current_speed = SPEED_10;
5702 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5703 }
5704
5705 if (sgsr & SERDES_TG3_FULL_DUPLEX)
5706 current_duplex = DUPLEX_FULL;
5707 else
5708 current_duplex = DUPLEX_HALF;
5709 }
5710
5711 tw32_f(MAC_MODE, tp->mac_mode);
5712 udelay(40);
5713
5714 tg3_clear_mac_status(tp);
5715
5716 goto fiber_setup_done;
5717 }
5718
5719 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5720 tw32_f(MAC_MODE, tp->mac_mode);
5721 udelay(40);
5722
5723 tg3_clear_mac_status(tp);
5724
5725 if (force_reset)
5726 tg3_phy_reset(tp);
5727
5728 tp->link_config.rmt_adv = 0;
5729
5730 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5731 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5732 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5733 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5734 bmsr |= BMSR_LSTATUS;
5735 else
5736 bmsr &= ~BMSR_LSTATUS;
5737 }
5738
5739 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5740
5741 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5742 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5743 /* do nothing, just check for link up at the end */
5744 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5745 u32 adv, newadv;
5746
5747 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5748 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5749 ADVERTISE_1000XPAUSE |
5750 ADVERTISE_1000XPSE_ASYM |
5751 ADVERTISE_SLCT);
5752
5753 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5754 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5755
5756 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5757 tg3_writephy(tp, MII_ADVERTISE, newadv);
5758 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5759 tg3_writephy(tp, MII_BMCR, bmcr);
5760
5761 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5762 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5763 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5764
5765 return err;
5766 }
5767 } else {
5768 u32 new_bmcr;
5769
5770 bmcr &= ~BMCR_SPEED1000;
5771 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5772
5773 if (tp->link_config.duplex == DUPLEX_FULL)
5774 new_bmcr |= BMCR_FULLDPLX;
5775
5776 if (new_bmcr != bmcr) {
5777 /* BMCR_SPEED1000 is a reserved bit that needs
5778 * to be set on write.
5779 */
5780 new_bmcr |= BMCR_SPEED1000;
5781
5782 /* Force a linkdown */
5783 if (tp->link_up) {
5784 u32 adv;
5785
5786 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5787 adv &= ~(ADVERTISE_1000XFULL |
5788 ADVERTISE_1000XHALF |
5789 ADVERTISE_SLCT);
5790 tg3_writephy(tp, MII_ADVERTISE, adv);
5791 tg3_writephy(tp, MII_BMCR, bmcr |
5792 BMCR_ANRESTART |
5793 BMCR_ANENABLE);
5794 udelay(10);
5795 tg3_carrier_off(tp);
5796 }
5797 tg3_writephy(tp, MII_BMCR, new_bmcr);
5798 bmcr = new_bmcr;
5799 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5800 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5801 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5802 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5803 bmsr |= BMSR_LSTATUS;
5804 else
5805 bmsr &= ~BMSR_LSTATUS;
5806 }
5807 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5808 }
5809 }
5810
5811 if (bmsr & BMSR_LSTATUS) {
5812 current_speed = SPEED_1000;
5813 current_link_up = true;
5814 if (bmcr & BMCR_FULLDPLX)
5815 current_duplex = DUPLEX_FULL;
5816 else
5817 current_duplex = DUPLEX_HALF;
5818
5819 local_adv = 0;
5820 remote_adv = 0;
5821
5822 if (bmcr & BMCR_ANENABLE) {
5823 u32 common;
5824
5825 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5826 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5827 common = local_adv & remote_adv;
5828 if (common & (ADVERTISE_1000XHALF |
5829 ADVERTISE_1000XFULL)) {
5830 if (common & ADVERTISE_1000XFULL)
5831 current_duplex = DUPLEX_FULL;
5832 else
5833 current_duplex = DUPLEX_HALF;
5834
5835 tp->link_config.rmt_adv =
5836 mii_adv_to_ethtool_adv_x(remote_adv);
5837 } else if (!tg3_flag(tp, 5780_CLASS)) {
5838 /* Link is up via parallel detect */
5839 } else {
5840 current_link_up = false;
5841 }
5842 }
5843 }
5844
5845 fiber_setup_done:
5846 if (current_link_up && current_duplex == DUPLEX_FULL)
5847 tg3_setup_flow_control(tp, local_adv, remote_adv);
5848
5849 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5850 if (tp->link_config.active_duplex == DUPLEX_HALF)
5851 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5852
5853 tw32_f(MAC_MODE, tp->mac_mode);
5854 udelay(40);
5855
5856 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5857
5858 tp->link_config.active_speed = current_speed;
5859 tp->link_config.active_duplex = current_duplex;
5860
5861 tg3_test_and_report_link_chg(tp, current_link_up);
5862 return err;
5863 }
5864
5865 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5866 {
5867 if (tp->serdes_counter) {
5868 /* Give autoneg time to complete. */
5869 tp->serdes_counter--;
5870 return;
5871 }
5872
5873 if (!tp->link_up &&
5874 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5875 u32 bmcr;
5876
5877 tg3_readphy(tp, MII_BMCR, &bmcr);
5878 if (bmcr & BMCR_ANENABLE) {
5879 u32 phy1, phy2;
5880
5881 /* Select shadow register 0x1f */
5882 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5883 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
5884
5885 /* Select expansion interrupt status register */
5886 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5887 MII_TG3_DSP_EXP1_INT_STAT);
5888 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5889 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5890
5891 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5892 /* We have signal detect and not receiving
5893 * config code words, link is up by parallel
5894 * detection.
5895 */
5896
5897 bmcr &= ~BMCR_ANENABLE;
5898 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5899 tg3_writephy(tp, MII_BMCR, bmcr);
5900 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
5901 }
5902 }
5903 } else if (tp->link_up &&
5904 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
5905 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5906 u32 phy2;
5907
5908 /* Select expansion interrupt status register */
5909 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5910 MII_TG3_DSP_EXP1_INT_STAT);
5911 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5912 if (phy2 & 0x20) {
5913 u32 bmcr;
5914
5915 /* Config code words received, turn on autoneg. */
5916 tg3_readphy(tp, MII_BMCR, &bmcr);
5917 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
5918
5919 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5920
5921 }
5922 }
5923 }
5924
5925 static int tg3_setup_phy(struct tg3 *tp, bool force_reset)
5926 {
5927 u32 val;
5928 int err;
5929
5930 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
5931 err = tg3_setup_fiber_phy(tp, force_reset);
5932 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
5933 err = tg3_setup_fiber_mii_phy(tp, force_reset);
5934 else
5935 err = tg3_setup_copper_phy(tp, force_reset);
5936
5937 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
5938 u32 scale;
5939
5940 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
5941 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
5942 scale = 65;
5943 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
5944 scale = 6;
5945 else
5946 scale = 12;
5947
5948 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
5949 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
5950 tw32(GRC_MISC_CFG, val);
5951 }
5952
5953 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5954 (6 << TX_LENGTHS_IPG_SHIFT);
5955 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
5956 tg3_asic_rev(tp) == ASIC_REV_5762)
5957 val |= tr32(MAC_TX_LENGTHS) &
5958 (TX_LENGTHS_JMB_FRM_LEN_MSK |
5959 TX_LENGTHS_CNT_DWN_VAL_MSK);
5960
5961 if (tp->link_config.active_speed == SPEED_1000 &&
5962 tp->link_config.active_duplex == DUPLEX_HALF)
5963 tw32(MAC_TX_LENGTHS, val |
5964 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
5965 else
5966 tw32(MAC_TX_LENGTHS, val |
5967 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5968
5969 if (!tg3_flag(tp, 5705_PLUS)) {
5970 if (tp->link_up) {
5971 tw32(HOSTCC_STAT_COAL_TICKS,
5972 tp->coal.stats_block_coalesce_usecs);
5973 } else {
5974 tw32(HOSTCC_STAT_COAL_TICKS, 0);
5975 }
5976 }
5977
5978 if (tg3_flag(tp, ASPM_WORKAROUND)) {
5979 val = tr32(PCIE_PWR_MGMT_THRESH);
5980 if (!tp->link_up)
5981 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
5982 tp->pwrmgmt_thresh;
5983 else
5984 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
5985 tw32(PCIE_PWR_MGMT_THRESH, val);
5986 }
5987
5988 return err;
5989 }
5990
5991 /* tp->lock must be held */
5992 static u64 tg3_refclk_read(struct tg3 *tp)
5993 {
5994 u64 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
5995 return stamp | (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
5996 }
5997
5998 /* tp->lock must be held */
5999 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
6000 {
6001 tw32(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_STOP);
6002 tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
6003 tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
6004 tw32_f(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_RESUME);
6005 }
6006
6007 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
6008 static inline void tg3_full_unlock(struct tg3 *tp);
6009 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
6010 {
6011 struct tg3 *tp = netdev_priv(dev);
6012
6013 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
6014 SOF_TIMESTAMPING_RX_SOFTWARE |
6015 SOF_TIMESTAMPING_SOFTWARE;
6016
6017 if (tg3_flag(tp, PTP_CAPABLE)) {
6018 info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
6019 SOF_TIMESTAMPING_RX_HARDWARE |
6020 SOF_TIMESTAMPING_RAW_HARDWARE;
6021 }
6022
6023 if (tp->ptp_clock)
6024 info->phc_index = ptp_clock_index(tp->ptp_clock);
6025 else
6026 info->phc_index = -1;
6027
6028 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
6029
6030 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
6031 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
6032 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
6033 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
6034 return 0;
6035 }
6036
6037 static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
6038 {
6039 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6040 bool neg_adj = false;
6041 u32 correction = 0;
6042
6043 if (ppb < 0) {
6044 neg_adj = true;
6045 ppb = -ppb;
6046 }
6047
6048 /* Frequency adjustment is performed using hardware with a 24 bit
6049 * accumulator and a programmable correction value. On each clk, the
6050 * correction value gets added to the accumulator and when it
6051 * overflows, the time counter is incremented/decremented.
6052 *
6053 * So conversion from ppb to correction value is
6054 * ppb * (1 << 24) / 1000000000
6055 */
6056 correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
6057 TG3_EAV_REF_CLK_CORRECT_MASK;
6058
6059 tg3_full_lock(tp, 0);
6060
6061 if (correction)
6062 tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
6063 TG3_EAV_REF_CLK_CORRECT_EN |
6064 (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
6065 else
6066 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
6067
6068 tg3_full_unlock(tp);
6069
6070 return 0;
6071 }
6072
6073 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
6074 {
6075 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6076
6077 tg3_full_lock(tp, 0);
6078 tp->ptp_adjust += delta;
6079 tg3_full_unlock(tp);
6080
6081 return 0;
6082 }
6083
6084 static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
6085 {
6086 u64 ns;
6087 u32 remainder;
6088 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6089
6090 tg3_full_lock(tp, 0);
6091 ns = tg3_refclk_read(tp);
6092 ns += tp->ptp_adjust;
6093 tg3_full_unlock(tp);
6094
6095 ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
6096 ts->tv_nsec = remainder;
6097
6098 return 0;
6099 }
6100
6101 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
6102 const struct timespec *ts)
6103 {
6104 u64 ns;
6105 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6106
6107 ns = timespec_to_ns(ts);
6108
6109 tg3_full_lock(tp, 0);
6110 tg3_refclk_write(tp, ns);
6111 tp->ptp_adjust = 0;
6112 tg3_full_unlock(tp);
6113
6114 return 0;
6115 }
6116
6117 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
6118 struct ptp_clock_request *rq, int on)
6119 {
6120 return -EOPNOTSUPP;
6121 }
6122
6123 static const struct ptp_clock_info tg3_ptp_caps = {
6124 .owner = THIS_MODULE,
6125 .name = "tg3 clock",
6126 .max_adj = 250000000,
6127 .n_alarm = 0,
6128 .n_ext_ts = 0,
6129 .n_per_out = 0,
6130 .pps = 0,
6131 .adjfreq = tg3_ptp_adjfreq,
6132 .adjtime = tg3_ptp_adjtime,
6133 .gettime = tg3_ptp_gettime,
6134 .settime = tg3_ptp_settime,
6135 .enable = tg3_ptp_enable,
6136 };
6137
6138 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
6139 struct skb_shared_hwtstamps *timestamp)
6140 {
6141 memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
6142 timestamp->hwtstamp = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
6143 tp->ptp_adjust);
6144 }
6145
6146 /* tp->lock must be held */
6147 static void tg3_ptp_init(struct tg3 *tp)
6148 {
6149 if (!tg3_flag(tp, PTP_CAPABLE))
6150 return;
6151
6152 /* Initialize the hardware clock to the system time. */
6153 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
6154 tp->ptp_adjust = 0;
6155 tp->ptp_info = tg3_ptp_caps;
6156 }
6157
6158 /* tp->lock must be held */
6159 static void tg3_ptp_resume(struct tg3 *tp)
6160 {
6161 if (!tg3_flag(tp, PTP_CAPABLE))
6162 return;
6163
6164 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
6165 tp->ptp_adjust = 0;
6166 }
6167
6168 static void tg3_ptp_fini(struct tg3 *tp)
6169 {
6170 if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
6171 return;
6172
6173 ptp_clock_unregister(tp->ptp_clock);
6174 tp->ptp_clock = NULL;
6175 tp->ptp_adjust = 0;
6176 }
6177
6178 static inline int tg3_irq_sync(struct tg3 *tp)
6179 {
6180 return tp->irq_sync;
6181 }
6182
6183 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
6184 {
6185 int i;
6186
6187 dst = (u32 *)((u8 *)dst + off);
6188 for (i = 0; i < len; i += sizeof(u32))
6189 *dst++ = tr32(off + i);
6190 }
6191
6192 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
6193 {
6194 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
6195 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
6196 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
6197 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
6198 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
6199 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
6200 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
6201 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
6202 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
6203 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
6204 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
6205 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
6206 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
6207 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
6208 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
6209 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
6210 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
6211 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
6212 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
6213
6214 if (tg3_flag(tp, SUPPORT_MSIX))
6215 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
6216
6217 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
6218 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
6219 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
6220 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
6221 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
6222 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
6223 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
6224 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
6225
6226 if (!tg3_flag(tp, 5705_PLUS)) {
6227 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
6228 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
6229 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
6230 }
6231
6232 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
6233 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
6234 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
6235 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
6236 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
6237
6238 if (tg3_flag(tp, NVRAM))
6239 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
6240 }
6241
6242 static void tg3_dump_state(struct tg3 *tp)
6243 {
6244 int i;
6245 u32 *regs;
6246
6247 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
6248 if (!regs)
6249 return;
6250
6251 if (tg3_flag(tp, PCI_EXPRESS)) {
6252 /* Read up to but not including private PCI registers */
6253 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
6254 regs[i / sizeof(u32)] = tr32(i);
6255 } else
6256 tg3_dump_legacy_regs(tp, regs);
6257
6258 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
6259 if (!regs[i + 0] && !regs[i + 1] &&
6260 !regs[i + 2] && !regs[i + 3])
6261 continue;
6262
6263 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6264 i * 4,
6265 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
6266 }
6267
6268 kfree(regs);
6269
6270 for (i = 0; i < tp->irq_cnt; i++) {
6271 struct tg3_napi *tnapi = &tp->napi[i];
6272
6273 /* SW status block */
6274 netdev_err(tp->dev,
6275 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6276 i,
6277 tnapi->hw_status->status,
6278 tnapi->hw_status->status_tag,
6279 tnapi->hw_status->rx_jumbo_consumer,
6280 tnapi->hw_status->rx_consumer,
6281 tnapi->hw_status->rx_mini_consumer,
6282 tnapi->hw_status->idx[0].rx_producer,
6283 tnapi->hw_status->idx[0].tx_consumer);
6284
6285 netdev_err(tp->dev,
6286 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6287 i,
6288 tnapi->last_tag, tnapi->last_irq_tag,
6289 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
6290 tnapi->rx_rcb_ptr,
6291 tnapi->prodring.rx_std_prod_idx,
6292 tnapi->prodring.rx_std_cons_idx,
6293 tnapi->prodring.rx_jmb_prod_idx,
6294 tnapi->prodring.rx_jmb_cons_idx);
6295 }
6296 }
6297
6298 /* This is called whenever we suspect that the system chipset is re-
6299 * ordering the sequence of MMIO to the tx send mailbox. The symptom
6300 * is bogus tx completions. We try to recover by setting the
6301 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6302 * in the workqueue.
6303 */
6304 static void tg3_tx_recover(struct tg3 *tp)
6305 {
6306 BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
6307 tp->write32_tx_mbox == tg3_write_indirect_mbox);
6308
6309 netdev_warn(tp->dev,
6310 "The system may be re-ordering memory-mapped I/O "
6311 "cycles to the network device, attempting to recover. "
6312 "Please report the problem to the driver maintainer "
6313 "and include system chipset information.\n");
6314
6315 spin_lock(&tp->lock);
6316 tg3_flag_set(tp, TX_RECOVERY_PENDING);
6317 spin_unlock(&tp->lock);
6318 }
6319
6320 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
6321 {
6322 /* Tell compiler to fetch tx indices from memory. */
6323 barrier();
6324 return tnapi->tx_pending -
6325 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
6326 }
6327
6328 /* Tigon3 never reports partial packet sends. So we do not
6329 * need special logic to handle SKBs that have not had all
6330 * of their frags sent yet, like SunGEM does.
6331 */
6332 static void tg3_tx(struct tg3_napi *tnapi)
6333 {
6334 struct tg3 *tp = tnapi->tp;
6335 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
6336 u32 sw_idx = tnapi->tx_cons;
6337 struct netdev_queue *txq;
6338 int index = tnapi - tp->napi;
6339 unsigned int pkts_compl = 0, bytes_compl = 0;
6340
6341 if (tg3_flag(tp, ENABLE_TSS))
6342 index--;
6343
6344 txq = netdev_get_tx_queue(tp->dev, index);
6345
6346 while (sw_idx != hw_idx) {
6347 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
6348 struct sk_buff *skb = ri->skb;
6349 int i, tx_bug = 0;
6350
6351 if (unlikely(skb == NULL)) {
6352 tg3_tx_recover(tp);
6353 return;
6354 }
6355
6356 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
6357 struct skb_shared_hwtstamps timestamp;
6358 u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
6359 hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
6360
6361 tg3_hwclock_to_timestamp(tp, hwclock, &timestamp);
6362
6363 skb_tstamp_tx(skb, &timestamp);
6364 }
6365
6366 pci_unmap_single(tp->pdev,
6367 dma_unmap_addr(ri, mapping),
6368 skb_headlen(skb),
6369 PCI_DMA_TODEVICE);
6370
6371 ri->skb = NULL;
6372
6373 while (ri->fragmented) {
6374 ri->fragmented = false;
6375 sw_idx = NEXT_TX(sw_idx);
6376 ri = &tnapi->tx_buffers[sw_idx];
6377 }
6378
6379 sw_idx = NEXT_TX(sw_idx);
6380
6381 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6382 ri = &tnapi->tx_buffers[sw_idx];
6383 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
6384 tx_bug = 1;
6385
6386 pci_unmap_page(tp->pdev,
6387 dma_unmap_addr(ri, mapping),
6388 skb_frag_size(&skb_shinfo(skb)->frags[i]),
6389 PCI_DMA_TODEVICE);
6390
6391 while (ri->fragmented) {
6392 ri->fragmented = false;
6393 sw_idx = NEXT_TX(sw_idx);
6394 ri = &tnapi->tx_buffers[sw_idx];
6395 }
6396
6397 sw_idx = NEXT_TX(sw_idx);
6398 }
6399
6400 pkts_compl++;
6401 bytes_compl += skb->len;
6402
6403 dev_kfree_skb(skb);
6404
6405 if (unlikely(tx_bug)) {
6406 tg3_tx_recover(tp);
6407 return;
6408 }
6409 }
6410
6411 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6412
6413 tnapi->tx_cons = sw_idx;
6414
6415 /* Need to make the tx_cons update visible to tg3_start_xmit()
6416 * before checking for netif_queue_stopped(). Without the
6417 * memory barrier, there is a small possibility that tg3_start_xmit()
6418 * will miss it and cause the queue to be stopped forever.
6419 */
6420 smp_mb();
6421
6422 if (unlikely(netif_tx_queue_stopped(txq) &&
6423 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6424 __netif_tx_lock(txq, smp_processor_id());
6425 if (netif_tx_queue_stopped(txq) &&
6426 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6427 netif_tx_wake_queue(txq);
6428 __netif_tx_unlock(txq);
6429 }
6430 }
6431
6432 static void tg3_frag_free(bool is_frag, void *data)
6433 {
6434 if (is_frag)
6435 put_page(virt_to_head_page(data));
6436 else
6437 kfree(data);
6438 }
6439
6440 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6441 {
6442 unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6443 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6444
6445 if (!ri->data)
6446 return;
6447
6448 pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
6449 map_sz, PCI_DMA_FROMDEVICE);
6450 tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6451 ri->data = NULL;
6452 }
6453
6454
6455 /* Returns size of skb allocated or < 0 on error.
6456 *
6457 * We only need to fill in the address because the other members
6458 * of the RX descriptor are invariant, see tg3_init_rings.
6459 *
6460 * Note the purposeful assymetry of cpu vs. chip accesses. For
6461 * posting buffers we only dirty the first cache line of the RX
6462 * descriptor (containing the address). Whereas for the RX status
6463 * buffers the cpu only reads the last cacheline of the RX descriptor
6464 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6465 */
6466 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6467 u32 opaque_key, u32 dest_idx_unmasked,
6468 unsigned int *frag_size)
6469 {
6470 struct tg3_rx_buffer_desc *desc;
6471 struct ring_info *map;
6472 u8 *data;
6473 dma_addr_t mapping;
6474 int skb_size, data_size, dest_idx;
6475
6476 switch (opaque_key) {
6477 case RXD_OPAQUE_RING_STD:
6478 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6479 desc = &tpr->rx_std[dest_idx];
6480 map = &tpr->rx_std_buffers[dest_idx];
6481 data_size = tp->rx_pkt_map_sz;
6482 break;
6483
6484 case RXD_OPAQUE_RING_JUMBO:
6485 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6486 desc = &tpr->rx_jmb[dest_idx].std;
6487 map = &tpr->rx_jmb_buffers[dest_idx];
6488 data_size = TG3_RX_JMB_MAP_SZ;
6489 break;
6490
6491 default:
6492 return -EINVAL;
6493 }
6494
6495 /* Do not overwrite any of the map or rp information
6496 * until we are sure we can commit to a new buffer.
6497 *
6498 * Callers depend upon this behavior and assume that
6499 * we leave everything unchanged if we fail.
6500 */
6501 skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6502 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6503 if (skb_size <= PAGE_SIZE) {
6504 data = netdev_alloc_frag(skb_size);
6505 *frag_size = skb_size;
6506 } else {
6507 data = kmalloc(skb_size, GFP_ATOMIC);
6508 *frag_size = 0;
6509 }
6510 if (!data)
6511 return -ENOMEM;
6512
6513 mapping = pci_map_single(tp->pdev,
6514 data + TG3_RX_OFFSET(tp),
6515 data_size,
6516 PCI_DMA_FROMDEVICE);
6517 if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
6518 tg3_frag_free(skb_size <= PAGE_SIZE, data);
6519 return -EIO;
6520 }
6521
6522 map->data = data;
6523 dma_unmap_addr_set(map, mapping, mapping);
6524
6525 desc->addr_hi = ((u64)mapping >> 32);
6526 desc->addr_lo = ((u64)mapping & 0xffffffff);
6527
6528 return data_size;
6529 }
6530
6531 /* We only need to move over in the address because the other
6532 * members of the RX descriptor are invariant. See notes above
6533 * tg3_alloc_rx_data for full details.
6534 */
6535 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6536 struct tg3_rx_prodring_set *dpr,
6537 u32 opaque_key, int src_idx,
6538 u32 dest_idx_unmasked)
6539 {
6540 struct tg3 *tp = tnapi->tp;
6541 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6542 struct ring_info *src_map, *dest_map;
6543 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6544 int dest_idx;
6545
6546 switch (opaque_key) {
6547 case RXD_OPAQUE_RING_STD:
6548 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6549 dest_desc = &dpr->rx_std[dest_idx];
6550 dest_map = &dpr->rx_std_buffers[dest_idx];
6551 src_desc = &spr->rx_std[src_idx];
6552 src_map = &spr->rx_std_buffers[src_idx];
6553 break;
6554
6555 case RXD_OPAQUE_RING_JUMBO:
6556 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6557 dest_desc = &dpr->rx_jmb[dest_idx].std;
6558 dest_map = &dpr->rx_jmb_buffers[dest_idx];
6559 src_desc = &spr->rx_jmb[src_idx].std;
6560 src_map = &spr->rx_jmb_buffers[src_idx];
6561 break;
6562
6563 default:
6564 return;
6565 }
6566
6567 dest_map->data = src_map->data;
6568 dma_unmap_addr_set(dest_map, mapping,
6569 dma_unmap_addr(src_map, mapping));
6570 dest_desc->addr_hi = src_desc->addr_hi;
6571 dest_desc->addr_lo = src_desc->addr_lo;
6572
6573 /* Ensure that the update to the skb happens after the physical
6574 * addresses have been transferred to the new BD location.
6575 */
6576 smp_wmb();
6577
6578 src_map->data = NULL;
6579 }
6580
6581 /* The RX ring scheme is composed of multiple rings which post fresh
6582 * buffers to the chip, and one special ring the chip uses to report
6583 * status back to the host.
6584 *
6585 * The special ring reports the status of received packets to the
6586 * host. The chip does not write into the original descriptor the
6587 * RX buffer was obtained from. The chip simply takes the original
6588 * descriptor as provided by the host, updates the status and length
6589 * field, then writes this into the next status ring entry.
6590 *
6591 * Each ring the host uses to post buffers to the chip is described
6592 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
6593 * it is first placed into the on-chip ram. When the packet's length
6594 * is known, it walks down the TG3_BDINFO entries to select the ring.
6595 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6596 * which is within the range of the new packet's length is chosen.
6597 *
6598 * The "separate ring for rx status" scheme may sound queer, but it makes
6599 * sense from a cache coherency perspective. If only the host writes
6600 * to the buffer post rings, and only the chip writes to the rx status
6601 * rings, then cache lines never move beyond shared-modified state.
6602 * If both the host and chip were to write into the same ring, cache line
6603 * eviction could occur since both entities want it in an exclusive state.
6604 */
6605 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6606 {
6607 struct tg3 *tp = tnapi->tp;
6608 u32 work_mask, rx_std_posted = 0;
6609 u32 std_prod_idx, jmb_prod_idx;
6610 u32 sw_idx = tnapi->rx_rcb_ptr;
6611 u16 hw_idx;
6612 int received;
6613 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6614
6615 hw_idx = *(tnapi->rx_rcb_prod_idx);
6616 /*
6617 * We need to order the read of hw_idx and the read of
6618 * the opaque cookie.
6619 */
6620 rmb();
6621 work_mask = 0;
6622 received = 0;
6623 std_prod_idx = tpr->rx_std_prod_idx;
6624 jmb_prod_idx = tpr->rx_jmb_prod_idx;
6625 while (sw_idx != hw_idx && budget > 0) {
6626 struct ring_info *ri;
6627 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6628 unsigned int len;
6629 struct sk_buff *skb;
6630 dma_addr_t dma_addr;
6631 u32 opaque_key, desc_idx, *post_ptr;
6632 u8 *data;
6633 u64 tstamp = 0;
6634
6635 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6636 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6637 if (opaque_key == RXD_OPAQUE_RING_STD) {
6638 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6639 dma_addr = dma_unmap_addr(ri, mapping);
6640 data = ri->data;
6641 post_ptr = &std_prod_idx;
6642 rx_std_posted++;
6643 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6644 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6645 dma_addr = dma_unmap_addr(ri, mapping);
6646 data = ri->data;
6647 post_ptr = &jmb_prod_idx;
6648 } else
6649 goto next_pkt_nopost;
6650
6651 work_mask |= opaque_key;
6652
6653 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
6654 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
6655 drop_it:
6656 tg3_recycle_rx(tnapi, tpr, opaque_key,
6657 desc_idx, *post_ptr);
6658 drop_it_no_recycle:
6659 /* Other statistics kept track of by card. */
6660 tp->rx_dropped++;
6661 goto next_pkt;
6662 }
6663
6664 prefetch(data + TG3_RX_OFFSET(tp));
6665 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6666 ETH_FCS_LEN;
6667
6668 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6669 RXD_FLAG_PTPSTAT_PTPV1 ||
6670 (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6671 RXD_FLAG_PTPSTAT_PTPV2) {
6672 tstamp = tr32(TG3_RX_TSTAMP_LSB);
6673 tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6674 }
6675
6676 if (len > TG3_RX_COPY_THRESH(tp)) {
6677 int skb_size;
6678 unsigned int frag_size;
6679
6680 skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6681 *post_ptr, &frag_size);
6682 if (skb_size < 0)
6683 goto drop_it;
6684
6685 pci_unmap_single(tp->pdev, dma_addr, skb_size,
6686 PCI_DMA_FROMDEVICE);
6687
6688 skb = build_skb(data, frag_size);
6689 if (!skb) {
6690 tg3_frag_free(frag_size != 0, data);
6691 goto drop_it_no_recycle;
6692 }
6693 skb_reserve(skb, TG3_RX_OFFSET(tp));
6694 /* Ensure that the update to the data happens
6695 * after the usage of the old DMA mapping.
6696 */
6697 smp_wmb();
6698
6699 ri->data = NULL;
6700
6701 } else {
6702 tg3_recycle_rx(tnapi, tpr, opaque_key,
6703 desc_idx, *post_ptr);
6704
6705 skb = netdev_alloc_skb(tp->dev,
6706 len + TG3_RAW_IP_ALIGN);
6707 if (skb == NULL)
6708 goto drop_it_no_recycle;
6709
6710 skb_reserve(skb, TG3_RAW_IP_ALIGN);
6711 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6712 memcpy(skb->data,
6713 data + TG3_RX_OFFSET(tp),
6714 len);
6715 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6716 }
6717
6718 skb_put(skb, len);
6719 if (tstamp)
6720 tg3_hwclock_to_timestamp(tp, tstamp,
6721 skb_hwtstamps(skb));
6722
6723 if ((tp->dev->features & NETIF_F_RXCSUM) &&
6724 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6725 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6726 >> RXD_TCPCSUM_SHIFT) == 0xffff))
6727 skb->ip_summed = CHECKSUM_UNNECESSARY;
6728 else
6729 skb_checksum_none_assert(skb);
6730
6731 skb->protocol = eth_type_trans(skb, tp->dev);
6732
6733 if (len > (tp->dev->mtu + ETH_HLEN) &&
6734 skb->protocol != htons(ETH_P_8021Q)) {
6735 dev_kfree_skb(skb);
6736 goto drop_it_no_recycle;
6737 }
6738
6739 if (desc->type_flags & RXD_FLAG_VLAN &&
6740 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6741 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
6742 desc->err_vlan & RXD_VLAN_MASK);
6743
6744 napi_gro_receive(&tnapi->napi, skb);
6745
6746 received++;
6747 budget--;
6748
6749 next_pkt:
6750 (*post_ptr)++;
6751
6752 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6753 tpr->rx_std_prod_idx = std_prod_idx &
6754 tp->rx_std_ring_mask;
6755 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6756 tpr->rx_std_prod_idx);
6757 work_mask &= ~RXD_OPAQUE_RING_STD;
6758 rx_std_posted = 0;
6759 }
6760 next_pkt_nopost:
6761 sw_idx++;
6762 sw_idx &= tp->rx_ret_ring_mask;
6763
6764 /* Refresh hw_idx to see if there is new work */
6765 if (sw_idx == hw_idx) {
6766 hw_idx = *(tnapi->rx_rcb_prod_idx);
6767 rmb();
6768 }
6769 }
6770
6771 /* ACK the status ring. */
6772 tnapi->rx_rcb_ptr = sw_idx;
6773 tw32_rx_mbox(tnapi->consmbox, sw_idx);
6774
6775 /* Refill RX ring(s). */
6776 if (!tg3_flag(tp, ENABLE_RSS)) {
6777 /* Sync BD data before updating mailbox */
6778 wmb();
6779
6780 if (work_mask & RXD_OPAQUE_RING_STD) {
6781 tpr->rx_std_prod_idx = std_prod_idx &
6782 tp->rx_std_ring_mask;
6783 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6784 tpr->rx_std_prod_idx);
6785 }
6786 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6787 tpr->rx_jmb_prod_idx = jmb_prod_idx &
6788 tp->rx_jmb_ring_mask;
6789 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6790 tpr->rx_jmb_prod_idx);
6791 }
6792 mmiowb();
6793 } else if (work_mask) {
6794 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6795 * updated before the producer indices can be updated.
6796 */
6797 smp_wmb();
6798
6799 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6800 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
6801
6802 if (tnapi != &tp->napi[1]) {
6803 tp->rx_refill = true;
6804 napi_schedule(&tp->napi[1].napi);
6805 }
6806 }
6807
6808 return received;
6809 }
6810
6811 static void tg3_poll_link(struct tg3 *tp)
6812 {
6813 /* handle link change and other phy events */
6814 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
6815 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
6816
6817 if (sblk->status & SD_STATUS_LINK_CHG) {
6818 sblk->status = SD_STATUS_UPDATED |
6819 (sblk->status & ~SD_STATUS_LINK_CHG);
6820 spin_lock(&tp->lock);
6821 if (tg3_flag(tp, USE_PHYLIB)) {
6822 tw32_f(MAC_STATUS,
6823 (MAC_STATUS_SYNC_CHANGED |
6824 MAC_STATUS_CFG_CHANGED |
6825 MAC_STATUS_MI_COMPLETION |
6826 MAC_STATUS_LNKSTATE_CHANGED));
6827 udelay(40);
6828 } else
6829 tg3_setup_phy(tp, false);
6830 spin_unlock(&tp->lock);
6831 }
6832 }
6833 }
6834
6835 static int tg3_rx_prodring_xfer(struct tg3 *tp,
6836 struct tg3_rx_prodring_set *dpr,
6837 struct tg3_rx_prodring_set *spr)
6838 {
6839 u32 si, di, cpycnt, src_prod_idx;
6840 int i, err = 0;
6841
6842 while (1) {
6843 src_prod_idx = spr->rx_std_prod_idx;
6844
6845 /* Make sure updates to the rx_std_buffers[] entries and the
6846 * standard producer index are seen in the correct order.
6847 */
6848 smp_rmb();
6849
6850 if (spr->rx_std_cons_idx == src_prod_idx)
6851 break;
6852
6853 if (spr->rx_std_cons_idx < src_prod_idx)
6854 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
6855 else
6856 cpycnt = tp->rx_std_ring_mask + 1 -
6857 spr->rx_std_cons_idx;
6858
6859 cpycnt = min(cpycnt,
6860 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
6861
6862 si = spr->rx_std_cons_idx;
6863 di = dpr->rx_std_prod_idx;
6864
6865 for (i = di; i < di + cpycnt; i++) {
6866 if (dpr->rx_std_buffers[i].data) {
6867 cpycnt = i - di;
6868 err = -ENOSPC;
6869 break;
6870 }
6871 }
6872
6873 if (!cpycnt)
6874 break;
6875
6876 /* Ensure that updates to the rx_std_buffers ring and the
6877 * shadowed hardware producer ring from tg3_recycle_skb() are
6878 * ordered correctly WRT the skb check above.
6879 */
6880 smp_rmb();
6881
6882 memcpy(&dpr->rx_std_buffers[di],
6883 &spr->rx_std_buffers[si],
6884 cpycnt * sizeof(struct ring_info));
6885
6886 for (i = 0; i < cpycnt; i++, di++, si++) {
6887 struct tg3_rx_buffer_desc *sbd, *dbd;
6888 sbd = &spr->rx_std[si];
6889 dbd = &dpr->rx_std[di];
6890 dbd->addr_hi = sbd->addr_hi;
6891 dbd->addr_lo = sbd->addr_lo;
6892 }
6893
6894 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
6895 tp->rx_std_ring_mask;
6896 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
6897 tp->rx_std_ring_mask;
6898 }
6899
6900 while (1) {
6901 src_prod_idx = spr->rx_jmb_prod_idx;
6902
6903 /* Make sure updates to the rx_jmb_buffers[] entries and
6904 * the jumbo producer index are seen in the correct order.
6905 */
6906 smp_rmb();
6907
6908 if (spr->rx_jmb_cons_idx == src_prod_idx)
6909 break;
6910
6911 if (spr->rx_jmb_cons_idx < src_prod_idx)
6912 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
6913 else
6914 cpycnt = tp->rx_jmb_ring_mask + 1 -
6915 spr->rx_jmb_cons_idx;
6916
6917 cpycnt = min(cpycnt,
6918 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
6919
6920 si = spr->rx_jmb_cons_idx;
6921 di = dpr->rx_jmb_prod_idx;
6922
6923 for (i = di; i < di + cpycnt; i++) {
6924 if (dpr->rx_jmb_buffers[i].data) {
6925 cpycnt = i - di;
6926 err = -ENOSPC;
6927 break;
6928 }
6929 }
6930
6931 if (!cpycnt)
6932 break;
6933
6934 /* Ensure that updates to the rx_jmb_buffers ring and the
6935 * shadowed hardware producer ring from tg3_recycle_skb() are
6936 * ordered correctly WRT the skb check above.
6937 */
6938 smp_rmb();
6939
6940 memcpy(&dpr->rx_jmb_buffers[di],
6941 &spr->rx_jmb_buffers[si],
6942 cpycnt * sizeof(struct ring_info));
6943
6944 for (i = 0; i < cpycnt; i++, di++, si++) {
6945 struct tg3_rx_buffer_desc *sbd, *dbd;
6946 sbd = &spr->rx_jmb[si].std;
6947 dbd = &dpr->rx_jmb[di].std;
6948 dbd->addr_hi = sbd->addr_hi;
6949 dbd->addr_lo = sbd->addr_lo;
6950 }
6951
6952 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
6953 tp->rx_jmb_ring_mask;
6954 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
6955 tp->rx_jmb_ring_mask;
6956 }
6957
6958 return err;
6959 }
6960
6961 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
6962 {
6963 struct tg3 *tp = tnapi->tp;
6964
6965 /* run TX completion thread */
6966 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
6967 tg3_tx(tnapi);
6968 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6969 return work_done;
6970 }
6971
6972 if (!tnapi->rx_rcb_prod_idx)
6973 return work_done;
6974
6975 /* run RX thread, within the bounds set by NAPI.
6976 * All RX "locking" is done by ensuring outside
6977 * code synchronizes with tg3->napi.poll()
6978 */
6979 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
6980 work_done += tg3_rx(tnapi, budget - work_done);
6981
6982 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
6983 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
6984 int i, err = 0;
6985 u32 std_prod_idx = dpr->rx_std_prod_idx;
6986 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
6987
6988 tp->rx_refill = false;
6989 for (i = 1; i <= tp->rxq_cnt; i++)
6990 err |= tg3_rx_prodring_xfer(tp, dpr,
6991 &tp->napi[i].prodring);
6992
6993 wmb();
6994
6995 if (std_prod_idx != dpr->rx_std_prod_idx)
6996 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6997 dpr->rx_std_prod_idx);
6998
6999 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
7000 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
7001 dpr->rx_jmb_prod_idx);
7002
7003 mmiowb();
7004
7005 if (err)
7006 tw32_f(HOSTCC_MODE, tp->coal_now);
7007 }
7008
7009 return work_done;
7010 }
7011
7012 static inline void tg3_reset_task_schedule(struct tg3 *tp)
7013 {
7014 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7015 schedule_work(&tp->reset_task);
7016 }
7017
7018 static inline void tg3_reset_task_cancel(struct tg3 *tp)
7019 {
7020 cancel_work_sync(&tp->reset_task);
7021 tg3_flag_clear(tp, RESET_TASK_PENDING);
7022 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
7023 }
7024
7025 static int tg3_poll_msix(struct napi_struct *napi, int budget)
7026 {
7027 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7028 struct tg3 *tp = tnapi->tp;
7029 int work_done = 0;
7030 struct tg3_hw_status *sblk = tnapi->hw_status;
7031
7032 while (1) {
7033 work_done = tg3_poll_work(tnapi, work_done, budget);
7034
7035 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7036 goto tx_recovery;
7037
7038 if (unlikely(work_done >= budget))
7039 break;
7040
7041 /* tp->last_tag is used in tg3_int_reenable() below
7042 * to tell the hw how much work has been processed,
7043 * so we must read it before checking for more work.
7044 */
7045 tnapi->last_tag = sblk->status_tag;
7046 tnapi->last_irq_tag = tnapi->last_tag;
7047 rmb();
7048
7049 /* check for RX/TX work to do */
7050 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
7051 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
7052
7053 /* This test here is not race free, but will reduce
7054 * the number of interrupts by looping again.
7055 */
7056 if (tnapi == &tp->napi[1] && tp->rx_refill)
7057 continue;
7058
7059 napi_complete(napi);
7060 /* Reenable interrupts. */
7061 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
7062
7063 /* This test here is synchronized by napi_schedule()
7064 * and napi_complete() to close the race condition.
7065 */
7066 if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
7067 tw32(HOSTCC_MODE, tp->coalesce_mode |
7068 HOSTCC_MODE_ENABLE |
7069 tnapi->coal_now);
7070 }
7071 mmiowb();
7072 break;
7073 }
7074 }
7075
7076 return work_done;
7077
7078 tx_recovery:
7079 /* work_done is guaranteed to be less than budget. */
7080 napi_complete(napi);
7081 tg3_reset_task_schedule(tp);
7082 return work_done;
7083 }
7084
7085 static void tg3_process_error(struct tg3 *tp)
7086 {
7087 u32 val;
7088 bool real_error = false;
7089
7090 if (tg3_flag(tp, ERROR_PROCESSED))
7091 return;
7092
7093 /* Check Flow Attention register */
7094 val = tr32(HOSTCC_FLOW_ATTN);
7095 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
7096 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n");
7097 real_error = true;
7098 }
7099
7100 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
7101 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n");
7102 real_error = true;
7103 }
7104
7105 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
7106 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n");
7107 real_error = true;
7108 }
7109
7110 if (!real_error)
7111 return;
7112
7113 tg3_dump_state(tp);
7114
7115 tg3_flag_set(tp, ERROR_PROCESSED);
7116 tg3_reset_task_schedule(tp);
7117 }
7118
7119 static int tg3_poll(struct napi_struct *napi, int budget)
7120 {
7121 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7122 struct tg3 *tp = tnapi->tp;
7123 int work_done = 0;
7124 struct tg3_hw_status *sblk = tnapi->hw_status;
7125
7126 while (1) {
7127 if (sblk->status & SD_STATUS_ERROR)
7128 tg3_process_error(tp);
7129
7130 tg3_poll_link(tp);
7131
7132 work_done = tg3_poll_work(tnapi, work_done, budget);
7133
7134 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7135 goto tx_recovery;
7136
7137 if (unlikely(work_done >= budget))
7138 break;
7139
7140 if (tg3_flag(tp, TAGGED_STATUS)) {
7141 /* tp->last_tag is used in tg3_int_reenable() below
7142 * to tell the hw how much work has been processed,
7143 * so we must read it before checking for more work.
7144 */
7145 tnapi->last_tag = sblk->status_tag;
7146 tnapi->last_irq_tag = tnapi->last_tag;
7147 rmb();
7148 } else
7149 sblk->status &= ~SD_STATUS_UPDATED;
7150
7151 if (likely(!tg3_has_work(tnapi))) {
7152 napi_complete(napi);
7153 tg3_int_reenable(tnapi);
7154 break;
7155 }
7156 }
7157
7158 return work_done;
7159
7160 tx_recovery:
7161 /* work_done is guaranteed to be less than budget. */
7162 napi_complete(napi);
7163 tg3_reset_task_schedule(tp);
7164 return work_done;
7165 }
7166
7167 static void tg3_napi_disable(struct tg3 *tp)
7168 {
7169 int i;
7170
7171 for (i = tp->irq_cnt - 1; i >= 0; i--)
7172 napi_disable(&tp->napi[i].napi);
7173 }
7174
7175 static void tg3_napi_enable(struct tg3 *tp)
7176 {
7177 int i;
7178
7179 for (i = 0; i < tp->irq_cnt; i++)
7180 napi_enable(&tp->napi[i].napi);
7181 }
7182
7183 static void tg3_napi_init(struct tg3 *tp)
7184 {
7185 int i;
7186
7187 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
7188 for (i = 1; i < tp->irq_cnt; i++)
7189 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
7190 }
7191
7192 static void tg3_napi_fini(struct tg3 *tp)
7193 {
7194 int i;
7195
7196 for (i = 0; i < tp->irq_cnt; i++)
7197 netif_napi_del(&tp->napi[i].napi);
7198 }
7199
7200 static inline void tg3_netif_stop(struct tg3 *tp)
7201 {
7202 tp->dev->trans_start = jiffies; /* prevent tx timeout */
7203 tg3_napi_disable(tp);
7204 netif_carrier_off(tp->dev);
7205 netif_tx_disable(tp->dev);
7206 }
7207
7208 /* tp->lock must be held */
7209 static inline void tg3_netif_start(struct tg3 *tp)
7210 {
7211 tg3_ptp_resume(tp);
7212
7213 /* NOTE: unconditional netif_tx_wake_all_queues is only
7214 * appropriate so long as all callers are assured to
7215 * have free tx slots (such as after tg3_init_hw)
7216 */
7217 netif_tx_wake_all_queues(tp->dev);
7218
7219 if (tp->link_up)
7220 netif_carrier_on(tp->dev);
7221
7222 tg3_napi_enable(tp);
7223 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
7224 tg3_enable_ints(tp);
7225 }
7226
7227 static void tg3_irq_quiesce(struct tg3 *tp)
7228 {
7229 int i;
7230
7231 BUG_ON(tp->irq_sync);
7232
7233 tp->irq_sync = 1;
7234 smp_mb();
7235
7236 for (i = 0; i < tp->irq_cnt; i++)
7237 synchronize_irq(tp->napi[i].irq_vec);
7238 }
7239
7240 /* Fully shutdown all tg3 driver activity elsewhere in the system.
7241 * If irq_sync is non-zero, then the IRQ handler must be synchronized
7242 * with as well. Most of the time, this is not necessary except when
7243 * shutting down the device.
7244 */
7245 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
7246 {
7247 spin_lock_bh(&tp->lock);
7248 if (irq_sync)
7249 tg3_irq_quiesce(tp);
7250 }
7251
7252 static inline void tg3_full_unlock(struct tg3 *tp)
7253 {
7254 spin_unlock_bh(&tp->lock);
7255 }
7256
7257 /* One-shot MSI handler - Chip automatically disables interrupt
7258 * after sending MSI so driver doesn't have to do it.
7259 */
7260 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
7261 {
7262 struct tg3_napi *tnapi = dev_id;
7263 struct tg3 *tp = tnapi->tp;
7264
7265 prefetch(tnapi->hw_status);
7266 if (tnapi->rx_rcb)
7267 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7268
7269 if (likely(!tg3_irq_sync(tp)))
7270 napi_schedule(&tnapi->napi);
7271
7272 return IRQ_HANDLED;
7273 }
7274
7275 /* MSI ISR - No need to check for interrupt sharing and no need to
7276 * flush status block and interrupt mailbox. PCI ordering rules
7277 * guarantee that MSI will arrive after the status block.
7278 */
7279 static irqreturn_t tg3_msi(int irq, void *dev_id)
7280 {
7281 struct tg3_napi *tnapi = dev_id;
7282 struct tg3 *tp = tnapi->tp;
7283
7284 prefetch(tnapi->hw_status);
7285 if (tnapi->rx_rcb)
7286 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7287 /*
7288 * Writing any value to intr-mbox-0 clears PCI INTA# and
7289 * chip-internal interrupt pending events.
7290 * Writing non-zero to intr-mbox-0 additional tells the
7291 * NIC to stop sending us irqs, engaging "in-intr-handler"
7292 * event coalescing.
7293 */
7294 tw32_mailbox(tnapi->int_mbox, 0x00000001);
7295 if (likely(!tg3_irq_sync(tp)))
7296 napi_schedule(&tnapi->napi);
7297
7298 return IRQ_RETVAL(1);
7299 }
7300
7301 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
7302 {
7303 struct tg3_napi *tnapi = dev_id;
7304 struct tg3 *tp = tnapi->tp;
7305 struct tg3_hw_status *sblk = tnapi->hw_status;
7306 unsigned int handled = 1;
7307
7308 /* In INTx mode, it is possible for the interrupt to arrive at
7309 * the CPU before the status block posted prior to the interrupt.
7310 * Reading the PCI State register will confirm whether the
7311 * interrupt is ours and will flush the status block.
7312 */
7313 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
7314 if (tg3_flag(tp, CHIP_RESETTING) ||
7315 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7316 handled = 0;
7317 goto out;
7318 }
7319 }
7320
7321 /*
7322 * Writing any value to intr-mbox-0 clears PCI INTA# and
7323 * chip-internal interrupt pending events.
7324 * Writing non-zero to intr-mbox-0 additional tells the
7325 * NIC to stop sending us irqs, engaging "in-intr-handler"
7326 * event coalescing.
7327 *
7328 * Flush the mailbox to de-assert the IRQ immediately to prevent
7329 * spurious interrupts. The flush impacts performance but
7330 * excessive spurious interrupts can be worse in some cases.
7331 */
7332 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7333 if (tg3_irq_sync(tp))
7334 goto out;
7335 sblk->status &= ~SD_STATUS_UPDATED;
7336 if (likely(tg3_has_work(tnapi))) {
7337 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7338 napi_schedule(&tnapi->napi);
7339 } else {
7340 /* No work, shared interrupt perhaps? re-enable
7341 * interrupts, and flush that PCI write
7342 */
7343 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
7344 0x00000000);
7345 }
7346 out:
7347 return IRQ_RETVAL(handled);
7348 }
7349
7350 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
7351 {
7352 struct tg3_napi *tnapi = dev_id;
7353 struct tg3 *tp = tnapi->tp;
7354 struct tg3_hw_status *sblk = tnapi->hw_status;
7355 unsigned int handled = 1;
7356
7357 /* In INTx mode, it is possible for the interrupt to arrive at
7358 * the CPU before the status block posted prior to the interrupt.
7359 * Reading the PCI State register will confirm whether the
7360 * interrupt is ours and will flush the status block.
7361 */
7362 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
7363 if (tg3_flag(tp, CHIP_RESETTING) ||
7364 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7365 handled = 0;
7366 goto out;
7367 }
7368 }
7369
7370 /*
7371 * writing any value to intr-mbox-0 clears PCI INTA# and
7372 * chip-internal interrupt pending events.
7373 * writing non-zero to intr-mbox-0 additional tells the
7374 * NIC to stop sending us irqs, engaging "in-intr-handler"
7375 * event coalescing.
7376 *
7377 * Flush the mailbox to de-assert the IRQ immediately to prevent
7378 * spurious interrupts. The flush impacts performance but
7379 * excessive spurious interrupts can be worse in some cases.
7380 */
7381 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7382
7383 /*
7384 * In a shared interrupt configuration, sometimes other devices'
7385 * interrupts will scream. We record the current status tag here
7386 * so that the above check can report that the screaming interrupts
7387 * are unhandled. Eventually they will be silenced.
7388 */
7389 tnapi->last_irq_tag = sblk->status_tag;
7390
7391 if (tg3_irq_sync(tp))
7392 goto out;
7393
7394 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7395
7396 napi_schedule(&tnapi->napi);
7397
7398 out:
7399 return IRQ_RETVAL(handled);
7400 }
7401
7402 /* ISR for interrupt test */
7403 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7404 {
7405 struct tg3_napi *tnapi = dev_id;
7406 struct tg3 *tp = tnapi->tp;
7407 struct tg3_hw_status *sblk = tnapi->hw_status;
7408
7409 if ((sblk->status & SD_STATUS_UPDATED) ||
7410 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7411 tg3_disable_ints(tp);
7412 return IRQ_RETVAL(1);
7413 }
7414 return IRQ_RETVAL(0);
7415 }
7416
7417 #ifdef CONFIG_NET_POLL_CONTROLLER
7418 static void tg3_poll_controller(struct net_device *dev)
7419 {
7420 int i;
7421 struct tg3 *tp = netdev_priv(dev);
7422
7423 if (tg3_irq_sync(tp))
7424 return;
7425
7426 for (i = 0; i < tp->irq_cnt; i++)
7427 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7428 }
7429 #endif
7430
7431 static void tg3_tx_timeout(struct net_device *dev)
7432 {
7433 struct tg3 *tp = netdev_priv(dev);
7434
7435 if (netif_msg_tx_err(tp)) {
7436 netdev_err(dev, "transmit timed out, resetting\n");
7437 tg3_dump_state(tp);
7438 }
7439
7440 tg3_reset_task_schedule(tp);
7441 }
7442
7443 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7444 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7445 {
7446 u32 base = (u32) mapping & 0xffffffff;
7447
7448 return (base > 0xffffdcc0) && (base + len + 8 < base);
7449 }
7450
7451 /* Test for TSO DMA buffers that cross into regions which are within MSS bytes
7452 * of any 4GB boundaries: 4G, 8G, etc
7453 */
7454 static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7455 u32 len, u32 mss)
7456 {
7457 if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) {
7458 u32 base = (u32) mapping & 0xffffffff;
7459
7460 return ((base + len + (mss & 0x3fff)) < base);
7461 }
7462 return 0;
7463 }
7464
7465 /* Test for DMA addresses > 40-bit */
7466 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7467 int len)
7468 {
7469 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7470 if (tg3_flag(tp, 40BIT_DMA_BUG))
7471 return ((u64) mapping + len) > DMA_BIT_MASK(40);
7472 return 0;
7473 #else
7474 return 0;
7475 #endif
7476 }
7477
7478 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7479 dma_addr_t mapping, u32 len, u32 flags,
7480 u32 mss, u32 vlan)
7481 {
7482 txbd->addr_hi = ((u64) mapping >> 32);
7483 txbd->addr_lo = ((u64) mapping & 0xffffffff);
7484 txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7485 txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7486 }
7487
7488 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7489 dma_addr_t map, u32 len, u32 flags,
7490 u32 mss, u32 vlan)
7491 {
7492 struct tg3 *tp = tnapi->tp;
7493 bool hwbug = false;
7494
7495 if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7496 hwbug = true;
7497
7498 if (tg3_4g_overflow_test(map, len))
7499 hwbug = true;
7500
7501 if (tg3_4g_tso_overflow_test(tp, map, len, mss))
7502 hwbug = true;
7503
7504 if (tg3_40bit_overflow_test(tp, map, len))
7505 hwbug = true;
7506
7507 if (tp->dma_limit) {
7508 u32 prvidx = *entry;
7509 u32 tmp_flag = flags & ~TXD_FLAG_END;
7510 while (len > tp->dma_limit && *budget) {
7511 u32 frag_len = tp->dma_limit;
7512 len -= tp->dma_limit;
7513
7514 /* Avoid the 8byte DMA problem */
7515 if (len <= 8) {
7516 len += tp->dma_limit / 2;
7517 frag_len = tp->dma_limit / 2;
7518 }
7519
7520 tnapi->tx_buffers[*entry].fragmented = true;
7521
7522 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7523 frag_len, tmp_flag, mss, vlan);
7524 *budget -= 1;
7525 prvidx = *entry;
7526 *entry = NEXT_TX(*entry);
7527
7528 map += frag_len;
7529 }
7530
7531 if (len) {
7532 if (*budget) {
7533 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7534 len, flags, mss, vlan);
7535 *budget -= 1;
7536 *entry = NEXT_TX(*entry);
7537 } else {
7538 hwbug = true;
7539 tnapi->tx_buffers[prvidx].fragmented = false;
7540 }
7541 }
7542 } else {
7543 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7544 len, flags, mss, vlan);
7545 *entry = NEXT_TX(*entry);
7546 }
7547
7548 return hwbug;
7549 }
7550
7551 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7552 {
7553 int i;
7554 struct sk_buff *skb;
7555 struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7556
7557 skb = txb->skb;
7558 txb->skb = NULL;
7559
7560 pci_unmap_single(tnapi->tp->pdev,
7561 dma_unmap_addr(txb, mapping),
7562 skb_headlen(skb),
7563 PCI_DMA_TODEVICE);
7564
7565 while (txb->fragmented) {
7566 txb->fragmented = false;
7567 entry = NEXT_TX(entry);
7568 txb = &tnapi->tx_buffers[entry];
7569 }
7570
7571 for (i = 0; i <= last; i++) {
7572 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7573
7574 entry = NEXT_TX(entry);
7575 txb = &tnapi->tx_buffers[entry];
7576
7577 pci_unmap_page(tnapi->tp->pdev,
7578 dma_unmap_addr(txb, mapping),
7579 skb_frag_size(frag), PCI_DMA_TODEVICE);
7580
7581 while (txb->fragmented) {
7582 txb->fragmented = false;
7583 entry = NEXT_TX(entry);
7584 txb = &tnapi->tx_buffers[entry];
7585 }
7586 }
7587 }
7588
7589 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7590 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7591 struct sk_buff **pskb,
7592 u32 *entry, u32 *budget,
7593 u32 base_flags, u32 mss, u32 vlan)
7594 {
7595 struct tg3 *tp = tnapi->tp;
7596 struct sk_buff *new_skb, *skb = *pskb;
7597 dma_addr_t new_addr = 0;
7598 int ret = 0;
7599
7600 if (tg3_asic_rev(tp) != ASIC_REV_5701)
7601 new_skb = skb_copy(skb, GFP_ATOMIC);
7602 else {
7603 int more_headroom = 4 - ((unsigned long)skb->data & 3);
7604
7605 new_skb = skb_copy_expand(skb,
7606 skb_headroom(skb) + more_headroom,
7607 skb_tailroom(skb), GFP_ATOMIC);
7608 }
7609
7610 if (!new_skb) {
7611 ret = -1;
7612 } else {
7613 /* New SKB is guaranteed to be linear. */
7614 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
7615 PCI_DMA_TODEVICE);
7616 /* Make sure the mapping succeeded */
7617 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
7618 dev_kfree_skb(new_skb);
7619 ret = -1;
7620 } else {
7621 u32 save_entry = *entry;
7622
7623 base_flags |= TXD_FLAG_END;
7624
7625 tnapi->tx_buffers[*entry].skb = new_skb;
7626 dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7627 mapping, new_addr);
7628
7629 if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7630 new_skb->len, base_flags,
7631 mss, vlan)) {
7632 tg3_tx_skb_unmap(tnapi, save_entry, -1);
7633 dev_kfree_skb(new_skb);
7634 ret = -1;
7635 }
7636 }
7637 }
7638
7639 dev_kfree_skb(skb);
7640 *pskb = new_skb;
7641 return ret;
7642 }
7643
7644 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7645
7646 /* Use GSO to workaround a rare TSO bug that may be triggered when the
7647 * TSO header is greater than 80 bytes.
7648 */
7649 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
7650 {
7651 struct sk_buff *segs, *nskb;
7652 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7653
7654 /* Estimate the number of fragments in the worst case */
7655 if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
7656 netif_stop_queue(tp->dev);
7657
7658 /* netif_tx_stop_queue() must be done before checking
7659 * checking tx index in tg3_tx_avail() below, because in
7660 * tg3_tx(), we update tx index before checking for
7661 * netif_tx_queue_stopped().
7662 */
7663 smp_mb();
7664 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
7665 return NETDEV_TX_BUSY;
7666
7667 netif_wake_queue(tp->dev);
7668 }
7669
7670 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
7671 if (IS_ERR(segs))
7672 goto tg3_tso_bug_end;
7673
7674 do {
7675 nskb = segs;
7676 segs = segs->next;
7677 nskb->next = NULL;
7678 tg3_start_xmit(nskb, tp->dev);
7679 } while (segs);
7680
7681 tg3_tso_bug_end:
7682 dev_kfree_skb(skb);
7683
7684 return NETDEV_TX_OK;
7685 }
7686
7687 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
7688 * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
7689 */
7690 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7691 {
7692 struct tg3 *tp = netdev_priv(dev);
7693 u32 len, entry, base_flags, mss, vlan = 0;
7694 u32 budget;
7695 int i = -1, would_hit_hwbug;
7696 dma_addr_t mapping;
7697 struct tg3_napi *tnapi;
7698 struct netdev_queue *txq;
7699 unsigned int last;
7700
7701 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7702 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7703 if (tg3_flag(tp, ENABLE_TSS))
7704 tnapi++;
7705
7706 budget = tg3_tx_avail(tnapi);
7707
7708 /* We are running in BH disabled context with netif_tx_lock
7709 * and TX reclaim runs via tp->napi.poll inside of a software
7710 * interrupt. Furthermore, IRQ processing runs lockless so we have
7711 * no IRQ context deadlocks to worry about either. Rejoice!
7712 */
7713 if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7714 if (!netif_tx_queue_stopped(txq)) {
7715 netif_tx_stop_queue(txq);
7716
7717 /* This is a hard error, log it. */
7718 netdev_err(dev,
7719 "BUG! Tx Ring full when queue awake!\n");
7720 }
7721 return NETDEV_TX_BUSY;
7722 }
7723
7724 entry = tnapi->tx_prod;
7725 base_flags = 0;
7726 if (skb->ip_summed == CHECKSUM_PARTIAL)
7727 base_flags |= TXD_FLAG_TCPUDP_CSUM;
7728
7729 mss = skb_shinfo(skb)->gso_size;
7730 if (mss) {
7731 struct iphdr *iph;
7732 u32 tcp_opt_len, hdr_len;
7733
7734 if (skb_header_cloned(skb) &&
7735 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
7736 goto drop;
7737
7738 iph = ip_hdr(skb);
7739 tcp_opt_len = tcp_optlen(skb);
7740
7741 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7742
7743 if (!skb_is_gso_v6(skb)) {
7744 iph->check = 0;
7745 iph->tot_len = htons(mss + hdr_len);
7746 }
7747
7748 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7749 tg3_flag(tp, TSO_BUG))
7750 return tg3_tso_bug(tp, skb);
7751
7752 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7753 TXD_FLAG_CPU_POST_DMA);
7754
7755 if (tg3_flag(tp, HW_TSO_1) ||
7756 tg3_flag(tp, HW_TSO_2) ||
7757 tg3_flag(tp, HW_TSO_3)) {
7758 tcp_hdr(skb)->check = 0;
7759 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7760 } else
7761 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
7762 iph->daddr, 0,
7763 IPPROTO_TCP,
7764 0);
7765
7766 if (tg3_flag(tp, HW_TSO_3)) {
7767 mss |= (hdr_len & 0xc) << 12;
7768 if (hdr_len & 0x10)
7769 base_flags |= 0x00000010;
7770 base_flags |= (hdr_len & 0x3e0) << 5;
7771 } else if (tg3_flag(tp, HW_TSO_2))
7772 mss |= hdr_len << 9;
7773 else if (tg3_flag(tp, HW_TSO_1) ||
7774 tg3_asic_rev(tp) == ASIC_REV_5705) {
7775 if (tcp_opt_len || iph->ihl > 5) {
7776 int tsflags;
7777
7778 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7779 mss |= (tsflags << 11);
7780 }
7781 } else {
7782 if (tcp_opt_len || iph->ihl > 5) {
7783 int tsflags;
7784
7785 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7786 base_flags |= tsflags << 12;
7787 }
7788 }
7789 }
7790
7791 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
7792 !mss && skb->len > VLAN_ETH_FRAME_LEN)
7793 base_flags |= TXD_FLAG_JMB_PKT;
7794
7795 if (vlan_tx_tag_present(skb)) {
7796 base_flags |= TXD_FLAG_VLAN;
7797 vlan = vlan_tx_tag_get(skb);
7798 }
7799
7800 if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
7801 tg3_flag(tp, TX_TSTAMP_EN)) {
7802 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
7803 base_flags |= TXD_FLAG_HWTSTAMP;
7804 }
7805
7806 len = skb_headlen(skb);
7807
7808 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
7809 if (pci_dma_mapping_error(tp->pdev, mapping))
7810 goto drop;
7811
7812
7813 tnapi->tx_buffers[entry].skb = skb;
7814 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
7815
7816 would_hit_hwbug = 0;
7817
7818 if (tg3_flag(tp, 5701_DMA_BUG))
7819 would_hit_hwbug = 1;
7820
7821 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
7822 ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
7823 mss, vlan)) {
7824 would_hit_hwbug = 1;
7825 } else if (skb_shinfo(skb)->nr_frags > 0) {
7826 u32 tmp_mss = mss;
7827
7828 if (!tg3_flag(tp, HW_TSO_1) &&
7829 !tg3_flag(tp, HW_TSO_2) &&
7830 !tg3_flag(tp, HW_TSO_3))
7831 tmp_mss = 0;
7832
7833 /* Now loop through additional data
7834 * fragments, and queue them.
7835 */
7836 last = skb_shinfo(skb)->nr_frags - 1;
7837 for (i = 0; i <= last; i++) {
7838 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7839
7840 len = skb_frag_size(frag);
7841 mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
7842 len, DMA_TO_DEVICE);
7843
7844 tnapi->tx_buffers[entry].skb = NULL;
7845 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
7846 mapping);
7847 if (dma_mapping_error(&tp->pdev->dev, mapping))
7848 goto dma_error;
7849
7850 if (!budget ||
7851 tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
7852 len, base_flags |
7853 ((i == last) ? TXD_FLAG_END : 0),
7854 tmp_mss, vlan)) {
7855 would_hit_hwbug = 1;
7856 break;
7857 }
7858 }
7859 }
7860
7861 if (would_hit_hwbug) {
7862 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
7863
7864 /* If the workaround fails due to memory/mapping
7865 * failure, silently drop this packet.
7866 */
7867 entry = tnapi->tx_prod;
7868 budget = tg3_tx_avail(tnapi);
7869 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
7870 base_flags, mss, vlan))
7871 goto drop_nofree;
7872 }
7873
7874 skb_tx_timestamp(skb);
7875 netdev_tx_sent_queue(txq, skb->len);
7876
7877 /* Sync BD data before updating mailbox */
7878 wmb();
7879
7880 /* Packets are ready, update Tx producer idx local and on card. */
7881 tw32_tx_mbox(tnapi->prodmbox, entry);
7882
7883 tnapi->tx_prod = entry;
7884 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
7885 netif_tx_stop_queue(txq);
7886
7887 /* netif_tx_stop_queue() must be done before checking
7888 * checking tx index in tg3_tx_avail() below, because in
7889 * tg3_tx(), we update tx index before checking for
7890 * netif_tx_queue_stopped().
7891 */
7892 smp_mb();
7893 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
7894 netif_tx_wake_queue(txq);
7895 }
7896
7897 mmiowb();
7898 return NETDEV_TX_OK;
7899
7900 dma_error:
7901 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
7902 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
7903 drop:
7904 dev_kfree_skb(skb);
7905 drop_nofree:
7906 tp->tx_dropped++;
7907 return NETDEV_TX_OK;
7908 }
7909
7910 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
7911 {
7912 if (enable) {
7913 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
7914 MAC_MODE_PORT_MODE_MASK);
7915
7916 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
7917
7918 if (!tg3_flag(tp, 5705_PLUS))
7919 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7920
7921 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
7922 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
7923 else
7924 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7925 } else {
7926 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
7927
7928 if (tg3_flag(tp, 5705_PLUS) ||
7929 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
7930 tg3_asic_rev(tp) == ASIC_REV_5700)
7931 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
7932 }
7933
7934 tw32(MAC_MODE, tp->mac_mode);
7935 udelay(40);
7936 }
7937
7938 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
7939 {
7940 u32 val, bmcr, mac_mode, ptest = 0;
7941
7942 tg3_phy_toggle_apd(tp, false);
7943 tg3_phy_toggle_automdix(tp, false);
7944
7945 if (extlpbk && tg3_phy_set_extloopbk(tp))
7946 return -EIO;
7947
7948 bmcr = BMCR_FULLDPLX;
7949 switch (speed) {
7950 case SPEED_10:
7951 break;
7952 case SPEED_100:
7953 bmcr |= BMCR_SPEED100;
7954 break;
7955 case SPEED_1000:
7956 default:
7957 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
7958 speed = SPEED_100;
7959 bmcr |= BMCR_SPEED100;
7960 } else {
7961 speed = SPEED_1000;
7962 bmcr |= BMCR_SPEED1000;
7963 }
7964 }
7965
7966 if (extlpbk) {
7967 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
7968 tg3_readphy(tp, MII_CTRL1000, &val);
7969 val |= CTL1000_AS_MASTER |
7970 CTL1000_ENABLE_MASTER;
7971 tg3_writephy(tp, MII_CTRL1000, val);
7972 } else {
7973 ptest = MII_TG3_FET_PTEST_TRIM_SEL |
7974 MII_TG3_FET_PTEST_TRIM_2;
7975 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
7976 }
7977 } else
7978 bmcr |= BMCR_LOOPBACK;
7979
7980 tg3_writephy(tp, MII_BMCR, bmcr);
7981
7982 /* The write needs to be flushed for the FETs */
7983 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
7984 tg3_readphy(tp, MII_BMCR, &bmcr);
7985
7986 udelay(40);
7987
7988 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
7989 tg3_asic_rev(tp) == ASIC_REV_5785) {
7990 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
7991 MII_TG3_FET_PTEST_FRC_TX_LINK |
7992 MII_TG3_FET_PTEST_FRC_TX_LOCK);
7993
7994 /* The write needs to be flushed for the AC131 */
7995 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
7996 }
7997
7998 /* Reset to prevent losing 1st rx packet intermittently */
7999 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8000 tg3_flag(tp, 5780_CLASS)) {
8001 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8002 udelay(10);
8003 tw32_f(MAC_RX_MODE, tp->rx_mode);
8004 }
8005
8006 mac_mode = tp->mac_mode &
8007 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
8008 if (speed == SPEED_1000)
8009 mac_mode |= MAC_MODE_PORT_MODE_GMII;
8010 else
8011 mac_mode |= MAC_MODE_PORT_MODE_MII;
8012
8013 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
8014 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
8015
8016 if (masked_phy_id == TG3_PHY_ID_BCM5401)
8017 mac_mode &= ~MAC_MODE_LINK_POLARITY;
8018 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
8019 mac_mode |= MAC_MODE_LINK_POLARITY;
8020
8021 tg3_writephy(tp, MII_TG3_EXT_CTRL,
8022 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8023 }
8024
8025 tw32(MAC_MODE, mac_mode);
8026 udelay(40);
8027
8028 return 0;
8029 }
8030
8031 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
8032 {
8033 struct tg3 *tp = netdev_priv(dev);
8034
8035 if (features & NETIF_F_LOOPBACK) {
8036 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
8037 return;
8038
8039 spin_lock_bh(&tp->lock);
8040 tg3_mac_loopback(tp, true);
8041 netif_carrier_on(tp->dev);
8042 spin_unlock_bh(&tp->lock);
8043 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
8044 } else {
8045 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
8046 return;
8047
8048 spin_lock_bh(&tp->lock);
8049 tg3_mac_loopback(tp, false);
8050 /* Force link status check */
8051 tg3_setup_phy(tp, true);
8052 spin_unlock_bh(&tp->lock);
8053 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
8054 }
8055 }
8056
8057 static netdev_features_t tg3_fix_features(struct net_device *dev,
8058 netdev_features_t features)
8059 {
8060 struct tg3 *tp = netdev_priv(dev);
8061
8062 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
8063 features &= ~NETIF_F_ALL_TSO;
8064
8065 return features;
8066 }
8067
8068 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
8069 {
8070 netdev_features_t changed = dev->features ^ features;
8071
8072 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
8073 tg3_set_loopback(dev, features);
8074
8075 return 0;
8076 }
8077
8078 static void tg3_rx_prodring_free(struct tg3 *tp,
8079 struct tg3_rx_prodring_set *tpr)
8080 {
8081 int i;
8082
8083 if (tpr != &tp->napi[0].prodring) {
8084 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
8085 i = (i + 1) & tp->rx_std_ring_mask)
8086 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8087 tp->rx_pkt_map_sz);
8088
8089 if (tg3_flag(tp, JUMBO_CAPABLE)) {
8090 for (i = tpr->rx_jmb_cons_idx;
8091 i != tpr->rx_jmb_prod_idx;
8092 i = (i + 1) & tp->rx_jmb_ring_mask) {
8093 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8094 TG3_RX_JMB_MAP_SZ);
8095 }
8096 }
8097
8098 return;
8099 }
8100
8101 for (i = 0; i <= tp->rx_std_ring_mask; i++)
8102 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8103 tp->rx_pkt_map_sz);
8104
8105 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8106 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
8107 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8108 TG3_RX_JMB_MAP_SZ);
8109 }
8110 }
8111
8112 /* Initialize rx rings for packet processing.
8113 *
8114 * The chip has been shut down and the driver detached from
8115 * the networking, so no interrupts or new tx packets will
8116 * end up in the driver. tp->{tx,}lock are held and thus
8117 * we may not sleep.
8118 */
8119 static int tg3_rx_prodring_alloc(struct tg3 *tp,
8120 struct tg3_rx_prodring_set *tpr)
8121 {
8122 u32 i, rx_pkt_dma_sz;
8123
8124 tpr->rx_std_cons_idx = 0;
8125 tpr->rx_std_prod_idx = 0;
8126 tpr->rx_jmb_cons_idx = 0;
8127 tpr->rx_jmb_prod_idx = 0;
8128
8129 if (tpr != &tp->napi[0].prodring) {
8130 memset(&tpr->rx_std_buffers[0], 0,
8131 TG3_RX_STD_BUFF_RING_SIZE(tp));
8132 if (tpr->rx_jmb_buffers)
8133 memset(&tpr->rx_jmb_buffers[0], 0,
8134 TG3_RX_JMB_BUFF_RING_SIZE(tp));
8135 goto done;
8136 }
8137
8138 /* Zero out all descriptors. */
8139 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
8140
8141 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
8142 if (tg3_flag(tp, 5780_CLASS) &&
8143 tp->dev->mtu > ETH_DATA_LEN)
8144 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
8145 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
8146
8147 /* Initialize invariants of the rings, we only set this
8148 * stuff once. This works because the card does not
8149 * write into the rx buffer posting rings.
8150 */
8151 for (i = 0; i <= tp->rx_std_ring_mask; i++) {
8152 struct tg3_rx_buffer_desc *rxd;
8153
8154 rxd = &tpr->rx_std[i];
8155 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
8156 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
8157 rxd->opaque = (RXD_OPAQUE_RING_STD |
8158 (i << RXD_OPAQUE_INDEX_SHIFT));
8159 }
8160
8161 /* Now allocate fresh SKBs for each rx ring. */
8162 for (i = 0; i < tp->rx_pending; i++) {
8163 unsigned int frag_size;
8164
8165 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
8166 &frag_size) < 0) {
8167 netdev_warn(tp->dev,
8168 "Using a smaller RX standard ring. Only "
8169 "%d out of %d buffers were allocated "
8170 "successfully\n", i, tp->rx_pending);
8171 if (i == 0)
8172 goto initfail;
8173 tp->rx_pending = i;
8174 break;
8175 }
8176 }
8177
8178 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8179 goto done;
8180
8181 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
8182
8183 if (!tg3_flag(tp, JUMBO_RING_ENABLE))
8184 goto done;
8185
8186 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
8187 struct tg3_rx_buffer_desc *rxd;
8188
8189 rxd = &tpr->rx_jmb[i].std;
8190 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
8191 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
8192 RXD_FLAG_JUMBO;
8193 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
8194 (i << RXD_OPAQUE_INDEX_SHIFT));
8195 }
8196
8197 for (i = 0; i < tp->rx_jumbo_pending; i++) {
8198 unsigned int frag_size;
8199
8200 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
8201 &frag_size) < 0) {
8202 netdev_warn(tp->dev,
8203 "Using a smaller RX jumbo ring. Only %d "
8204 "out of %d buffers were allocated "
8205 "successfully\n", i, tp->rx_jumbo_pending);
8206 if (i == 0)
8207 goto initfail;
8208 tp->rx_jumbo_pending = i;
8209 break;
8210 }
8211 }
8212
8213 done:
8214 return 0;
8215
8216 initfail:
8217 tg3_rx_prodring_free(tp, tpr);
8218 return -ENOMEM;
8219 }
8220
8221 static void tg3_rx_prodring_fini(struct tg3 *tp,
8222 struct tg3_rx_prodring_set *tpr)
8223 {
8224 kfree(tpr->rx_std_buffers);
8225 tpr->rx_std_buffers = NULL;
8226 kfree(tpr->rx_jmb_buffers);
8227 tpr->rx_jmb_buffers = NULL;
8228 if (tpr->rx_std) {
8229 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
8230 tpr->rx_std, tpr->rx_std_mapping);
8231 tpr->rx_std = NULL;
8232 }
8233 if (tpr->rx_jmb) {
8234 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
8235 tpr->rx_jmb, tpr->rx_jmb_mapping);
8236 tpr->rx_jmb = NULL;
8237 }
8238 }
8239
8240 static int tg3_rx_prodring_init(struct tg3 *tp,
8241 struct tg3_rx_prodring_set *tpr)
8242 {
8243 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
8244 GFP_KERNEL);
8245 if (!tpr->rx_std_buffers)
8246 return -ENOMEM;
8247
8248 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
8249 TG3_RX_STD_RING_BYTES(tp),
8250 &tpr->rx_std_mapping,
8251 GFP_KERNEL);
8252 if (!tpr->rx_std)
8253 goto err_out;
8254
8255 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8256 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
8257 GFP_KERNEL);
8258 if (!tpr->rx_jmb_buffers)
8259 goto err_out;
8260
8261 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
8262 TG3_RX_JMB_RING_BYTES(tp),
8263 &tpr->rx_jmb_mapping,
8264 GFP_KERNEL);
8265 if (!tpr->rx_jmb)
8266 goto err_out;
8267 }
8268
8269 return 0;
8270
8271 err_out:
8272 tg3_rx_prodring_fini(tp, tpr);
8273 return -ENOMEM;
8274 }
8275
8276 /* Free up pending packets in all rx/tx rings.
8277 *
8278 * The chip has been shut down and the driver detached from
8279 * the networking, so no interrupts or new tx packets will
8280 * end up in the driver. tp->{tx,}lock is not held and we are not
8281 * in an interrupt context and thus may sleep.
8282 */
8283 static void tg3_free_rings(struct tg3 *tp)
8284 {
8285 int i, j;
8286
8287 for (j = 0; j < tp->irq_cnt; j++) {
8288 struct tg3_napi *tnapi = &tp->napi[j];
8289
8290 tg3_rx_prodring_free(tp, &tnapi->prodring);
8291
8292 if (!tnapi->tx_buffers)
8293 continue;
8294
8295 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
8296 struct sk_buff *skb = tnapi->tx_buffers[i].skb;
8297
8298 if (!skb)
8299 continue;
8300
8301 tg3_tx_skb_unmap(tnapi, i,
8302 skb_shinfo(skb)->nr_frags - 1);
8303
8304 dev_kfree_skb_any(skb);
8305 }
8306 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
8307 }
8308 }
8309
8310 /* Initialize tx/rx rings for packet processing.
8311 *
8312 * The chip has been shut down and the driver detached from
8313 * the networking, so no interrupts or new tx packets will
8314 * end up in the driver. tp->{tx,}lock are held and thus
8315 * we may not sleep.
8316 */
8317 static int tg3_init_rings(struct tg3 *tp)
8318 {
8319 int i;
8320
8321 /* Free up all the SKBs. */
8322 tg3_free_rings(tp);
8323
8324 for (i = 0; i < tp->irq_cnt; i++) {
8325 struct tg3_napi *tnapi = &tp->napi[i];
8326
8327 tnapi->last_tag = 0;
8328 tnapi->last_irq_tag = 0;
8329 tnapi->hw_status->status = 0;
8330 tnapi->hw_status->status_tag = 0;
8331 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8332
8333 tnapi->tx_prod = 0;
8334 tnapi->tx_cons = 0;
8335 if (tnapi->tx_ring)
8336 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
8337
8338 tnapi->rx_rcb_ptr = 0;
8339 if (tnapi->rx_rcb)
8340 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8341
8342 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
8343 tg3_free_rings(tp);
8344 return -ENOMEM;
8345 }
8346 }
8347
8348 return 0;
8349 }
8350
8351 static void tg3_mem_tx_release(struct tg3 *tp)
8352 {
8353 int i;
8354
8355 for (i = 0; i < tp->irq_max; i++) {
8356 struct tg3_napi *tnapi = &tp->napi[i];
8357
8358 if (tnapi->tx_ring) {
8359 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
8360 tnapi->tx_ring, tnapi->tx_desc_mapping);
8361 tnapi->tx_ring = NULL;
8362 }
8363
8364 kfree(tnapi->tx_buffers);
8365 tnapi->tx_buffers = NULL;
8366 }
8367 }
8368
8369 static int tg3_mem_tx_acquire(struct tg3 *tp)
8370 {
8371 int i;
8372 struct tg3_napi *tnapi = &tp->napi[0];
8373
8374 /* If multivector TSS is enabled, vector 0 does not handle
8375 * tx interrupts. Don't allocate any resources for it.
8376 */
8377 if (tg3_flag(tp, ENABLE_TSS))
8378 tnapi++;
8379
8380 for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
8381 tnapi->tx_buffers = kzalloc(sizeof(struct tg3_tx_ring_info) *
8382 TG3_TX_RING_SIZE, GFP_KERNEL);
8383 if (!tnapi->tx_buffers)
8384 goto err_out;
8385
8386 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
8387 TG3_TX_RING_BYTES,
8388 &tnapi->tx_desc_mapping,
8389 GFP_KERNEL);
8390 if (!tnapi->tx_ring)
8391 goto err_out;
8392 }
8393
8394 return 0;
8395
8396 err_out:
8397 tg3_mem_tx_release(tp);
8398 return -ENOMEM;
8399 }
8400
8401 static void tg3_mem_rx_release(struct tg3 *tp)
8402 {
8403 int i;
8404
8405 for (i = 0; i < tp->irq_max; i++) {
8406 struct tg3_napi *tnapi = &tp->napi[i];
8407
8408 tg3_rx_prodring_fini(tp, &tnapi->prodring);
8409
8410 if (!tnapi->rx_rcb)
8411 continue;
8412
8413 dma_free_coherent(&tp->pdev->dev,
8414 TG3_RX_RCB_RING_BYTES(tp),
8415 tnapi->rx_rcb,
8416 tnapi->rx_rcb_mapping);
8417 tnapi->rx_rcb = NULL;
8418 }
8419 }
8420
8421 static int tg3_mem_rx_acquire(struct tg3 *tp)
8422 {
8423 unsigned int i, limit;
8424
8425 limit = tp->rxq_cnt;
8426
8427 /* If RSS is enabled, we need a (dummy) producer ring
8428 * set on vector zero. This is the true hw prodring.
8429 */
8430 if (tg3_flag(tp, ENABLE_RSS))
8431 limit++;
8432
8433 for (i = 0; i < limit; i++) {
8434 struct tg3_napi *tnapi = &tp->napi[i];
8435
8436 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8437 goto err_out;
8438
8439 /* If multivector RSS is enabled, vector 0
8440 * does not handle rx or tx interrupts.
8441 * Don't allocate any resources for it.
8442 */
8443 if (!i && tg3_flag(tp, ENABLE_RSS))
8444 continue;
8445
8446 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
8447 TG3_RX_RCB_RING_BYTES(tp),
8448 &tnapi->rx_rcb_mapping,
8449 GFP_KERNEL | __GFP_ZERO);
8450 if (!tnapi->rx_rcb)
8451 goto err_out;
8452 }
8453
8454 return 0;
8455
8456 err_out:
8457 tg3_mem_rx_release(tp);
8458 return -ENOMEM;
8459 }
8460
8461 /*
8462 * Must not be invoked with interrupt sources disabled and
8463 * the hardware shutdown down.
8464 */
8465 static void tg3_free_consistent(struct tg3 *tp)
8466 {
8467 int i;
8468
8469 for (i = 0; i < tp->irq_cnt; i++) {
8470 struct tg3_napi *tnapi = &tp->napi[i];
8471
8472 if (tnapi->hw_status) {
8473 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8474 tnapi->hw_status,
8475 tnapi->status_mapping);
8476 tnapi->hw_status = NULL;
8477 }
8478 }
8479
8480 tg3_mem_rx_release(tp);
8481 tg3_mem_tx_release(tp);
8482
8483 if (tp->hw_stats) {
8484 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8485 tp->hw_stats, tp->stats_mapping);
8486 tp->hw_stats = NULL;
8487 }
8488 }
8489
8490 /*
8491 * Must not be invoked with interrupt sources disabled and
8492 * the hardware shutdown down. Can sleep.
8493 */
8494 static int tg3_alloc_consistent(struct tg3 *tp)
8495 {
8496 int i;
8497
8498 tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
8499 sizeof(struct tg3_hw_stats),
8500 &tp->stats_mapping,
8501 GFP_KERNEL | __GFP_ZERO);
8502 if (!tp->hw_stats)
8503 goto err_out;
8504
8505 for (i = 0; i < tp->irq_cnt; i++) {
8506 struct tg3_napi *tnapi = &tp->napi[i];
8507 struct tg3_hw_status *sblk;
8508
8509 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
8510 TG3_HW_STATUS_SIZE,
8511 &tnapi->status_mapping,
8512 GFP_KERNEL | __GFP_ZERO);
8513 if (!tnapi->hw_status)
8514 goto err_out;
8515
8516 sblk = tnapi->hw_status;
8517
8518 if (tg3_flag(tp, ENABLE_RSS)) {
8519 u16 *prodptr = NULL;
8520
8521 /*
8522 * When RSS is enabled, the status block format changes
8523 * slightly. The "rx_jumbo_consumer", "reserved",
8524 * and "rx_mini_consumer" members get mapped to the
8525 * other three rx return ring producer indexes.
8526 */
8527 switch (i) {
8528 case 1:
8529 prodptr = &sblk->idx[0].rx_producer;
8530 break;
8531 case 2:
8532 prodptr = &sblk->rx_jumbo_consumer;
8533 break;
8534 case 3:
8535 prodptr = &sblk->reserved;
8536 break;
8537 case 4:
8538 prodptr = &sblk->rx_mini_consumer;
8539 break;
8540 }
8541 tnapi->rx_rcb_prod_idx = prodptr;
8542 } else {
8543 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8544 }
8545 }
8546
8547 if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8548 goto err_out;
8549
8550 return 0;
8551
8552 err_out:
8553 tg3_free_consistent(tp);
8554 return -ENOMEM;
8555 }
8556
8557 #define MAX_WAIT_CNT 1000
8558
8559 /* To stop a block, clear the enable bit and poll till it
8560 * clears. tp->lock is held.
8561 */
8562 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent)
8563 {
8564 unsigned int i;
8565 u32 val;
8566
8567 if (tg3_flag(tp, 5705_PLUS)) {
8568 switch (ofs) {
8569 case RCVLSC_MODE:
8570 case DMAC_MODE:
8571 case MBFREE_MODE:
8572 case BUFMGR_MODE:
8573 case MEMARB_MODE:
8574 /* We can't enable/disable these bits of the
8575 * 5705/5750, just say success.
8576 */
8577 return 0;
8578
8579 default:
8580 break;
8581 }
8582 }
8583
8584 val = tr32(ofs);
8585 val &= ~enable_bit;
8586 tw32_f(ofs, val);
8587
8588 for (i = 0; i < MAX_WAIT_CNT; i++) {
8589 udelay(100);
8590 val = tr32(ofs);
8591 if ((val & enable_bit) == 0)
8592 break;
8593 }
8594
8595 if (i == MAX_WAIT_CNT && !silent) {
8596 dev_err(&tp->pdev->dev,
8597 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8598 ofs, enable_bit);
8599 return -ENODEV;
8600 }
8601
8602 return 0;
8603 }
8604
8605 /* tp->lock is held. */
8606 static int tg3_abort_hw(struct tg3 *tp, bool silent)
8607 {
8608 int i, err;
8609
8610 tg3_disable_ints(tp);
8611
8612 tp->rx_mode &= ~RX_MODE_ENABLE;
8613 tw32_f(MAC_RX_MODE, tp->rx_mode);
8614 udelay(10);
8615
8616 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8617 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8618 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8619 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8620 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8621 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8622
8623 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8624 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8625 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8626 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8627 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8628 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8629 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8630
8631 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8632 tw32_f(MAC_MODE, tp->mac_mode);
8633 udelay(40);
8634
8635 tp->tx_mode &= ~TX_MODE_ENABLE;
8636 tw32_f(MAC_TX_MODE, tp->tx_mode);
8637
8638 for (i = 0; i < MAX_WAIT_CNT; i++) {
8639 udelay(100);
8640 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8641 break;
8642 }
8643 if (i >= MAX_WAIT_CNT) {
8644 dev_err(&tp->pdev->dev,
8645 "%s timed out, TX_MODE_ENABLE will not clear "
8646 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8647 err |= -ENODEV;
8648 }
8649
8650 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8651 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8652 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8653
8654 tw32(FTQ_RESET, 0xffffffff);
8655 tw32(FTQ_RESET, 0x00000000);
8656
8657 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8658 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8659
8660 for (i = 0; i < tp->irq_cnt; i++) {
8661 struct tg3_napi *tnapi = &tp->napi[i];
8662 if (tnapi->hw_status)
8663 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8664 }
8665
8666 return err;
8667 }
8668
8669 /* Save PCI command register before chip reset */
8670 static void tg3_save_pci_state(struct tg3 *tp)
8671 {
8672 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8673 }
8674
8675 /* Restore PCI state after chip reset */
8676 static void tg3_restore_pci_state(struct tg3 *tp)
8677 {
8678 u32 val;
8679
8680 /* Re-enable indirect register accesses. */
8681 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8682 tp->misc_host_ctrl);
8683
8684 /* Set MAX PCI retry to zero. */
8685 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8686 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
8687 tg3_flag(tp, PCIX_MODE))
8688 val |= PCISTATE_RETRY_SAME_DMA;
8689 /* Allow reads and writes to the APE register and memory space. */
8690 if (tg3_flag(tp, ENABLE_APE))
8691 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8692 PCISTATE_ALLOW_APE_SHMEM_WR |
8693 PCISTATE_ALLOW_APE_PSPACE_WR;
8694 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8695
8696 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8697
8698 if (!tg3_flag(tp, PCI_EXPRESS)) {
8699 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8700 tp->pci_cacheline_sz);
8701 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8702 tp->pci_lat_timer);
8703 }
8704
8705 /* Make sure PCI-X relaxed ordering bit is clear. */
8706 if (tg3_flag(tp, PCIX_MODE)) {
8707 u16 pcix_cmd;
8708
8709 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8710 &pcix_cmd);
8711 pcix_cmd &= ~PCI_X_CMD_ERO;
8712 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8713 pcix_cmd);
8714 }
8715
8716 if (tg3_flag(tp, 5780_CLASS)) {
8717
8718 /* Chip reset on 5780 will reset MSI enable bit,
8719 * so need to restore it.
8720 */
8721 if (tg3_flag(tp, USING_MSI)) {
8722 u16 ctrl;
8723
8724 pci_read_config_word(tp->pdev,
8725 tp->msi_cap + PCI_MSI_FLAGS,
8726 &ctrl);
8727 pci_write_config_word(tp->pdev,
8728 tp->msi_cap + PCI_MSI_FLAGS,
8729 ctrl | PCI_MSI_FLAGS_ENABLE);
8730 val = tr32(MSGINT_MODE);
8731 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
8732 }
8733 }
8734 }
8735
8736 /* tp->lock is held. */
8737 static int tg3_chip_reset(struct tg3 *tp)
8738 {
8739 u32 val;
8740 void (*write_op)(struct tg3 *, u32, u32);
8741 int i, err;
8742
8743 tg3_nvram_lock(tp);
8744
8745 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
8746
8747 /* No matching tg3_nvram_unlock() after this because
8748 * chip reset below will undo the nvram lock.
8749 */
8750 tp->nvram_lock_cnt = 0;
8751
8752 /* GRC_MISC_CFG core clock reset will clear the memory
8753 * enable bit in PCI register 4 and the MSI enable bit
8754 * on some chips, so we save relevant registers here.
8755 */
8756 tg3_save_pci_state(tp);
8757
8758 if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
8759 tg3_flag(tp, 5755_PLUS))
8760 tw32(GRC_FASTBOOT_PC, 0);
8761
8762 /*
8763 * We must avoid the readl() that normally takes place.
8764 * It locks machines, causes machine checks, and other
8765 * fun things. So, temporarily disable the 5701
8766 * hardware workaround, while we do the reset.
8767 */
8768 write_op = tp->write32;
8769 if (write_op == tg3_write_flush_reg32)
8770 tp->write32 = tg3_write32;
8771
8772 /* Prevent the irq handler from reading or writing PCI registers
8773 * during chip reset when the memory enable bit in the PCI command
8774 * register may be cleared. The chip does not generate interrupt
8775 * at this time, but the irq handler may still be called due to irq
8776 * sharing or irqpoll.
8777 */
8778 tg3_flag_set(tp, CHIP_RESETTING);
8779 for (i = 0; i < tp->irq_cnt; i++) {
8780 struct tg3_napi *tnapi = &tp->napi[i];
8781 if (tnapi->hw_status) {
8782 tnapi->hw_status->status = 0;
8783 tnapi->hw_status->status_tag = 0;
8784 }
8785 tnapi->last_tag = 0;
8786 tnapi->last_irq_tag = 0;
8787 }
8788 smp_mb();
8789
8790 for (i = 0; i < tp->irq_cnt; i++)
8791 synchronize_irq(tp->napi[i].irq_vec);
8792
8793 if (tg3_asic_rev(tp) == ASIC_REV_57780) {
8794 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8795 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8796 }
8797
8798 /* do the reset */
8799 val = GRC_MISC_CFG_CORECLK_RESET;
8800
8801 if (tg3_flag(tp, PCI_EXPRESS)) {
8802 /* Force PCIe 1.0a mode */
8803 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
8804 !tg3_flag(tp, 57765_PLUS) &&
8805 tr32(TG3_PCIE_PHY_TSTCTL) ==
8806 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
8807 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
8808
8809 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
8810 tw32(GRC_MISC_CFG, (1 << 29));
8811 val |= (1 << 29);
8812 }
8813 }
8814
8815 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
8816 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
8817 tw32(GRC_VCPU_EXT_CTRL,
8818 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
8819 }
8820
8821 /* Manage gphy power for all CPMU absent PCIe devices. */
8822 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
8823 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
8824
8825 tw32(GRC_MISC_CFG, val);
8826
8827 /* restore 5701 hardware bug workaround write method */
8828 tp->write32 = write_op;
8829
8830 /* Unfortunately, we have to delay before the PCI read back.
8831 * Some 575X chips even will not respond to a PCI cfg access
8832 * when the reset command is given to the chip.
8833 *
8834 * How do these hardware designers expect things to work
8835 * properly if the PCI write is posted for a long period
8836 * of time? It is always necessary to have some method by
8837 * which a register read back can occur to push the write
8838 * out which does the reset.
8839 *
8840 * For most tg3 variants the trick below was working.
8841 * Ho hum...
8842 */
8843 udelay(120);
8844
8845 /* Flush PCI posted writes. The normal MMIO registers
8846 * are inaccessible at this time so this is the only
8847 * way to make this reliably (actually, this is no longer
8848 * the case, see above). I tried to use indirect
8849 * register read/write but this upset some 5701 variants.
8850 */
8851 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
8852
8853 udelay(120);
8854
8855 if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
8856 u16 val16;
8857
8858 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
8859 int j;
8860 u32 cfg_val;
8861
8862 /* Wait for link training to complete. */
8863 for (j = 0; j < 5000; j++)
8864 udelay(100);
8865
8866 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
8867 pci_write_config_dword(tp->pdev, 0xc4,
8868 cfg_val | (1 << 15));
8869 }
8870
8871 /* Clear the "no snoop" and "relaxed ordering" bits. */
8872 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
8873 /*
8874 * Older PCIe devices only support the 128 byte
8875 * MPS setting. Enforce the restriction.
8876 */
8877 if (!tg3_flag(tp, CPMU_PRESENT))
8878 val16 |= PCI_EXP_DEVCTL_PAYLOAD;
8879 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
8880
8881 /* Clear error status */
8882 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
8883 PCI_EXP_DEVSTA_CED |
8884 PCI_EXP_DEVSTA_NFED |
8885 PCI_EXP_DEVSTA_FED |
8886 PCI_EXP_DEVSTA_URD);
8887 }
8888
8889 tg3_restore_pci_state(tp);
8890
8891 tg3_flag_clear(tp, CHIP_RESETTING);
8892 tg3_flag_clear(tp, ERROR_PROCESSED);
8893
8894 val = 0;
8895 if (tg3_flag(tp, 5780_CLASS))
8896 val = tr32(MEMARB_MODE);
8897 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
8898
8899 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
8900 tg3_stop_fw(tp);
8901 tw32(0x5000, 0x400);
8902 }
8903
8904 if (tg3_flag(tp, IS_SSB_CORE)) {
8905 /*
8906 * BCM4785: In order to avoid repercussions from using
8907 * potentially defective internal ROM, stop the Rx RISC CPU,
8908 * which is not required.
8909 */
8910 tg3_stop_fw(tp);
8911 tg3_halt_cpu(tp, RX_CPU_BASE);
8912 }
8913
8914 err = tg3_poll_fw(tp);
8915 if (err)
8916 return err;
8917
8918 tw32(GRC_MODE, tp->grc_mode);
8919
8920 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
8921 val = tr32(0xc4);
8922
8923 tw32(0xc4, val | (1 << 15));
8924 }
8925
8926 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
8927 tg3_asic_rev(tp) == ASIC_REV_5705) {
8928 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
8929 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
8930 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
8931 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8932 }
8933
8934 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8935 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
8936 val = tp->mac_mode;
8937 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8938 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
8939 val = tp->mac_mode;
8940 } else
8941 val = 0;
8942
8943 tw32_f(MAC_MODE, val);
8944 udelay(40);
8945
8946 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
8947
8948 tg3_mdio_start(tp);
8949
8950 if (tg3_flag(tp, PCI_EXPRESS) &&
8951 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
8952 tg3_asic_rev(tp) != ASIC_REV_5785 &&
8953 !tg3_flag(tp, 57765_PLUS)) {
8954 val = tr32(0x7c00);
8955
8956 tw32(0x7c00, val | (1 << 25));
8957 }
8958
8959 if (tg3_asic_rev(tp) == ASIC_REV_5720) {
8960 val = tr32(TG3_CPMU_CLCK_ORIDE);
8961 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
8962 }
8963
8964 /* Reprobe ASF enable state. */
8965 tg3_flag_clear(tp, ENABLE_ASF);
8966 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
8967 TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
8968
8969 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
8970 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
8971 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
8972 u32 nic_cfg;
8973
8974 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
8975 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
8976 tg3_flag_set(tp, ENABLE_ASF);
8977 tp->last_event_jiffies = jiffies;
8978 if (tg3_flag(tp, 5750_PLUS))
8979 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
8980
8981 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg);
8982 if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK)
8983 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
8984 if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID)
8985 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
8986 }
8987 }
8988
8989 return 0;
8990 }
8991
8992 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
8993 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
8994
8995 /* tp->lock is held. */
8996 static int tg3_halt(struct tg3 *tp, int kind, bool silent)
8997 {
8998 int err;
8999
9000 tg3_stop_fw(tp);
9001
9002 tg3_write_sig_pre_reset(tp, kind);
9003
9004 tg3_abort_hw(tp, silent);
9005 err = tg3_chip_reset(tp);
9006
9007 __tg3_set_mac_addr(tp, false);
9008
9009 tg3_write_sig_legacy(tp, kind);
9010 tg3_write_sig_post_reset(tp, kind);
9011
9012 if (tp->hw_stats) {
9013 /* Save the stats across chip resets... */
9014 tg3_get_nstats(tp, &tp->net_stats_prev);
9015 tg3_get_estats(tp, &tp->estats_prev);
9016
9017 /* And make sure the next sample is new data */
9018 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
9019 }
9020
9021 if (err)
9022 return err;
9023
9024 return 0;
9025 }
9026
9027 static int tg3_set_mac_addr(struct net_device *dev, void *p)
9028 {
9029 struct tg3 *tp = netdev_priv(dev);
9030 struct sockaddr *addr = p;
9031 int err = 0;
9032 bool skip_mac_1 = false;
9033
9034 if (!is_valid_ether_addr(addr->sa_data))
9035 return -EADDRNOTAVAIL;
9036
9037 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9038
9039 if (!netif_running(dev))
9040 return 0;
9041
9042 if (tg3_flag(tp, ENABLE_ASF)) {
9043 u32 addr0_high, addr0_low, addr1_high, addr1_low;
9044
9045 addr0_high = tr32(MAC_ADDR_0_HIGH);
9046 addr0_low = tr32(MAC_ADDR_0_LOW);
9047 addr1_high = tr32(MAC_ADDR_1_HIGH);
9048 addr1_low = tr32(MAC_ADDR_1_LOW);
9049
9050 /* Skip MAC addr 1 if ASF is using it. */
9051 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
9052 !(addr1_high == 0 && addr1_low == 0))
9053 skip_mac_1 = true;
9054 }
9055 spin_lock_bh(&tp->lock);
9056 __tg3_set_mac_addr(tp, skip_mac_1);
9057 spin_unlock_bh(&tp->lock);
9058
9059 return err;
9060 }
9061
9062 /* tp->lock is held. */
9063 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
9064 dma_addr_t mapping, u32 maxlen_flags,
9065 u32 nic_addr)
9066 {
9067 tg3_write_mem(tp,
9068 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
9069 ((u64) mapping >> 32));
9070 tg3_write_mem(tp,
9071 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
9072 ((u64) mapping & 0xffffffff));
9073 tg3_write_mem(tp,
9074 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
9075 maxlen_flags);
9076
9077 if (!tg3_flag(tp, 5705_PLUS))
9078 tg3_write_mem(tp,
9079 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
9080 nic_addr);
9081 }
9082
9083
9084 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9085 {
9086 int i = 0;
9087
9088 if (!tg3_flag(tp, ENABLE_TSS)) {
9089 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
9090 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
9091 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
9092 } else {
9093 tw32(HOSTCC_TXCOL_TICKS, 0);
9094 tw32(HOSTCC_TXMAX_FRAMES, 0);
9095 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
9096
9097 for (; i < tp->txq_cnt; i++) {
9098 u32 reg;
9099
9100 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
9101 tw32(reg, ec->tx_coalesce_usecs);
9102 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
9103 tw32(reg, ec->tx_max_coalesced_frames);
9104 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
9105 tw32(reg, ec->tx_max_coalesced_frames_irq);
9106 }
9107 }
9108
9109 for (; i < tp->irq_max - 1; i++) {
9110 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
9111 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
9112 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9113 }
9114 }
9115
9116 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9117 {
9118 int i = 0;
9119 u32 limit = tp->rxq_cnt;
9120
9121 if (!tg3_flag(tp, ENABLE_RSS)) {
9122 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
9123 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
9124 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
9125 limit--;
9126 } else {
9127 tw32(HOSTCC_RXCOL_TICKS, 0);
9128 tw32(HOSTCC_RXMAX_FRAMES, 0);
9129 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
9130 }
9131
9132 for (; i < limit; i++) {
9133 u32 reg;
9134
9135 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
9136 tw32(reg, ec->rx_coalesce_usecs);
9137 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
9138 tw32(reg, ec->rx_max_coalesced_frames);
9139 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
9140 tw32(reg, ec->rx_max_coalesced_frames_irq);
9141 }
9142
9143 for (; i < tp->irq_max - 1; i++) {
9144 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
9145 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
9146 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9147 }
9148 }
9149
9150 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
9151 {
9152 tg3_coal_tx_init(tp, ec);
9153 tg3_coal_rx_init(tp, ec);
9154
9155 if (!tg3_flag(tp, 5705_PLUS)) {
9156 u32 val = ec->stats_block_coalesce_usecs;
9157
9158 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
9159 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
9160
9161 if (!tp->link_up)
9162 val = 0;
9163
9164 tw32(HOSTCC_STAT_COAL_TICKS, val);
9165 }
9166 }
9167
9168 /* tp->lock is held. */
9169 static void tg3_rings_reset(struct tg3 *tp)
9170 {
9171 int i;
9172 u32 stblk, txrcb, rxrcb, limit;
9173 struct tg3_napi *tnapi = &tp->napi[0];
9174
9175 /* Disable all transmit rings but the first. */
9176 if (!tg3_flag(tp, 5705_PLUS))
9177 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
9178 else if (tg3_flag(tp, 5717_PLUS))
9179 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
9180 else if (tg3_flag(tp, 57765_CLASS) ||
9181 tg3_asic_rev(tp) == ASIC_REV_5762)
9182 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
9183 else
9184 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9185
9186 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9187 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
9188 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
9189 BDINFO_FLAGS_DISABLED);
9190
9191
9192 /* Disable all receive return rings but the first. */
9193 if (tg3_flag(tp, 5717_PLUS))
9194 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
9195 else if (!tg3_flag(tp, 5705_PLUS))
9196 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
9197 else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9198 tg3_asic_rev(tp) == ASIC_REV_5762 ||
9199 tg3_flag(tp, 57765_CLASS))
9200 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
9201 else
9202 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9203
9204 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9205 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
9206 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
9207 BDINFO_FLAGS_DISABLED);
9208
9209 /* Disable interrupts */
9210 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
9211 tp->napi[0].chk_msi_cnt = 0;
9212 tp->napi[0].last_rx_cons = 0;
9213 tp->napi[0].last_tx_cons = 0;
9214
9215 /* Zero mailbox registers. */
9216 if (tg3_flag(tp, SUPPORT_MSIX)) {
9217 for (i = 1; i < tp->irq_max; i++) {
9218 tp->napi[i].tx_prod = 0;
9219 tp->napi[i].tx_cons = 0;
9220 if (tg3_flag(tp, ENABLE_TSS))
9221 tw32_mailbox(tp->napi[i].prodmbox, 0);
9222 tw32_rx_mbox(tp->napi[i].consmbox, 0);
9223 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
9224 tp->napi[i].chk_msi_cnt = 0;
9225 tp->napi[i].last_rx_cons = 0;
9226 tp->napi[i].last_tx_cons = 0;
9227 }
9228 if (!tg3_flag(tp, ENABLE_TSS))
9229 tw32_mailbox(tp->napi[0].prodmbox, 0);
9230 } else {
9231 tp->napi[0].tx_prod = 0;
9232 tp->napi[0].tx_cons = 0;
9233 tw32_mailbox(tp->napi[0].prodmbox, 0);
9234 tw32_rx_mbox(tp->napi[0].consmbox, 0);
9235 }
9236
9237 /* Make sure the NIC-based send BD rings are disabled. */
9238 if (!tg3_flag(tp, 5705_PLUS)) {
9239 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
9240 for (i = 0; i < 16; i++)
9241 tw32_tx_mbox(mbox + i * 8, 0);
9242 }
9243
9244 txrcb = NIC_SRAM_SEND_RCB;
9245 rxrcb = NIC_SRAM_RCV_RET_RCB;
9246
9247 /* Clear status block in ram. */
9248 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9249
9250 /* Set status block DMA address */
9251 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9252 ((u64) tnapi->status_mapping >> 32));
9253 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9254 ((u64) tnapi->status_mapping & 0xffffffff));
9255
9256 if (tnapi->tx_ring) {
9257 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9258 (TG3_TX_RING_SIZE <<
9259 BDINFO_FLAGS_MAXLEN_SHIFT),
9260 NIC_SRAM_TX_BUFFER_DESC);
9261 txrcb += TG3_BDINFO_SIZE;
9262 }
9263
9264 if (tnapi->rx_rcb) {
9265 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9266 (tp->rx_ret_ring_mask + 1) <<
9267 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
9268 rxrcb += TG3_BDINFO_SIZE;
9269 }
9270
9271 stblk = HOSTCC_STATBLCK_RING1;
9272
9273 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
9274 u64 mapping = (u64)tnapi->status_mapping;
9275 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
9276 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
9277
9278 /* Clear status block in ram. */
9279 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9280
9281 if (tnapi->tx_ring) {
9282 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9283 (TG3_TX_RING_SIZE <<
9284 BDINFO_FLAGS_MAXLEN_SHIFT),
9285 NIC_SRAM_TX_BUFFER_DESC);
9286 txrcb += TG3_BDINFO_SIZE;
9287 }
9288
9289 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9290 ((tp->rx_ret_ring_mask + 1) <<
9291 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
9292
9293 stblk += 8;
9294 rxrcb += TG3_BDINFO_SIZE;
9295 }
9296 }
9297
9298 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
9299 {
9300 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
9301
9302 if (!tg3_flag(tp, 5750_PLUS) ||
9303 tg3_flag(tp, 5780_CLASS) ||
9304 tg3_asic_rev(tp) == ASIC_REV_5750 ||
9305 tg3_asic_rev(tp) == ASIC_REV_5752 ||
9306 tg3_flag(tp, 57765_PLUS))
9307 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
9308 else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9309 tg3_asic_rev(tp) == ASIC_REV_5787)
9310 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
9311 else
9312 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
9313
9314 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
9315 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
9316
9317 val = min(nic_rep_thresh, host_rep_thresh);
9318 tw32(RCVBDI_STD_THRESH, val);
9319
9320 if (tg3_flag(tp, 57765_PLUS))
9321 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
9322
9323 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
9324 return;
9325
9326 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
9327
9328 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
9329
9330 val = min(bdcache_maxcnt / 2, host_rep_thresh);
9331 tw32(RCVBDI_JUMBO_THRESH, val);
9332
9333 if (tg3_flag(tp, 57765_PLUS))
9334 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
9335 }
9336
9337 static inline u32 calc_crc(unsigned char *buf, int len)
9338 {
9339 u32 reg;
9340 u32 tmp;
9341 int j, k;
9342
9343 reg = 0xffffffff;
9344
9345 for (j = 0; j < len; j++) {
9346 reg ^= buf[j];
9347
9348 for (k = 0; k < 8; k++) {
9349 tmp = reg & 0x01;
9350
9351 reg >>= 1;
9352
9353 if (tmp)
9354 reg ^= 0xedb88320;
9355 }
9356 }
9357
9358 return ~reg;
9359 }
9360
9361 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9362 {
9363 /* accept or reject all multicast frames */
9364 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9365 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9366 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9367 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9368 }
9369
9370 static void __tg3_set_rx_mode(struct net_device *dev)
9371 {
9372 struct tg3 *tp = netdev_priv(dev);
9373 u32 rx_mode;
9374
9375 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9376 RX_MODE_KEEP_VLAN_TAG);
9377
9378 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9379 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9380 * flag clear.
9381 */
9382 if (!tg3_flag(tp, ENABLE_ASF))
9383 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9384 #endif
9385
9386 if (dev->flags & IFF_PROMISC) {
9387 /* Promiscuous mode. */
9388 rx_mode |= RX_MODE_PROMISC;
9389 } else if (dev->flags & IFF_ALLMULTI) {
9390 /* Accept all multicast. */
9391 tg3_set_multi(tp, 1);
9392 } else if (netdev_mc_empty(dev)) {
9393 /* Reject all multicast. */
9394 tg3_set_multi(tp, 0);
9395 } else {
9396 /* Accept one or more multicast(s). */
9397 struct netdev_hw_addr *ha;
9398 u32 mc_filter[4] = { 0, };
9399 u32 regidx;
9400 u32 bit;
9401 u32 crc;
9402
9403 netdev_for_each_mc_addr(ha, dev) {
9404 crc = calc_crc(ha->addr, ETH_ALEN);
9405 bit = ~crc & 0x7f;
9406 regidx = (bit & 0x60) >> 5;
9407 bit &= 0x1f;
9408 mc_filter[regidx] |= (1 << bit);
9409 }
9410
9411 tw32(MAC_HASH_REG_0, mc_filter[0]);
9412 tw32(MAC_HASH_REG_1, mc_filter[1]);
9413 tw32(MAC_HASH_REG_2, mc_filter[2]);
9414 tw32(MAC_HASH_REG_3, mc_filter[3]);
9415 }
9416
9417 if (rx_mode != tp->rx_mode) {
9418 tp->rx_mode = rx_mode;
9419 tw32_f(MAC_RX_MODE, rx_mode);
9420 udelay(10);
9421 }
9422 }
9423
9424 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9425 {
9426 int i;
9427
9428 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9429 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9430 }
9431
9432 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9433 {
9434 int i;
9435
9436 if (!tg3_flag(tp, SUPPORT_MSIX))
9437 return;
9438
9439 if (tp->rxq_cnt == 1) {
9440 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9441 return;
9442 }
9443
9444 /* Validate table against current IRQ count */
9445 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9446 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9447 break;
9448 }
9449
9450 if (i != TG3_RSS_INDIR_TBL_SIZE)
9451 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9452 }
9453
9454 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9455 {
9456 int i = 0;
9457 u32 reg = MAC_RSS_INDIR_TBL_0;
9458
9459 while (i < TG3_RSS_INDIR_TBL_SIZE) {
9460 u32 val = tp->rss_ind_tbl[i];
9461 i++;
9462 for (; i % 8; i++) {
9463 val <<= 4;
9464 val |= tp->rss_ind_tbl[i];
9465 }
9466 tw32(reg, val);
9467 reg += 4;
9468 }
9469 }
9470
9471 /* tp->lock is held. */
9472 static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
9473 {
9474 u32 val, rdmac_mode;
9475 int i, err, limit;
9476 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9477
9478 tg3_disable_ints(tp);
9479
9480 tg3_stop_fw(tp);
9481
9482 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9483
9484 if (tg3_flag(tp, INIT_COMPLETE))
9485 tg3_abort_hw(tp, 1);
9486
9487 /* Enable MAC control of LPI */
9488 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
9489 val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
9490 TG3_CPMU_EEE_LNKIDL_UART_IDL;
9491 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
9492 val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
9493
9494 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
9495
9496 tw32_f(TG3_CPMU_EEE_CTRL,
9497 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
9498
9499 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
9500 TG3_CPMU_EEEMD_LPI_IN_TX |
9501 TG3_CPMU_EEEMD_LPI_IN_RX |
9502 TG3_CPMU_EEEMD_EEE_ENABLE;
9503
9504 if (tg3_asic_rev(tp) != ASIC_REV_5717)
9505 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
9506
9507 if (tg3_flag(tp, ENABLE_APE))
9508 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
9509
9510 tw32_f(TG3_CPMU_EEE_MODE, val);
9511
9512 tw32_f(TG3_CPMU_EEE_DBTMR1,
9513 TG3_CPMU_DBTMR1_PCIEXIT_2047US |
9514 TG3_CPMU_DBTMR1_LNKIDLE_2047US);
9515
9516 tw32_f(TG3_CPMU_EEE_DBTMR2,
9517 TG3_CPMU_DBTMR2_APE_TX_2047US |
9518 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
9519 }
9520
9521 if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
9522 !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) {
9523 tg3_phy_pull_config(tp);
9524 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
9525 }
9526
9527 if (reset_phy)
9528 tg3_phy_reset(tp);
9529
9530 err = tg3_chip_reset(tp);
9531 if (err)
9532 return err;
9533
9534 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9535
9536 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
9537 val = tr32(TG3_CPMU_CTRL);
9538 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9539 tw32(TG3_CPMU_CTRL, val);
9540
9541 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9542 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9543 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9544 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9545
9546 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9547 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9548 val |= CPMU_LNK_AWARE_MACCLK_6_25;
9549 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9550
9551 val = tr32(TG3_CPMU_HST_ACC);
9552 val &= ~CPMU_HST_ACC_MACCLK_MASK;
9553 val |= CPMU_HST_ACC_MACCLK_6_25;
9554 tw32(TG3_CPMU_HST_ACC, val);
9555 }
9556
9557 if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9558 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9559 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9560 PCIE_PWR_MGMT_L1_THRESH_4MS;
9561 tw32(PCIE_PWR_MGMT_THRESH, val);
9562
9563 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9564 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9565
9566 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9567
9568 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9569 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9570 }
9571
9572 if (tg3_flag(tp, L1PLLPD_EN)) {
9573 u32 grc_mode = tr32(GRC_MODE);
9574
9575 /* Access the lower 1K of PL PCIE block registers. */
9576 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9577 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9578
9579 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9580 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9581 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9582
9583 tw32(GRC_MODE, grc_mode);
9584 }
9585
9586 if (tg3_flag(tp, 57765_CLASS)) {
9587 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
9588 u32 grc_mode = tr32(GRC_MODE);
9589
9590 /* Access the lower 1K of PL PCIE block registers. */
9591 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9592 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9593
9594 val = tr32(TG3_PCIE_TLDLPL_PORT +
9595 TG3_PCIE_PL_LO_PHYCTL5);
9596 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9597 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
9598
9599 tw32(GRC_MODE, grc_mode);
9600 }
9601
9602 if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
9603 u32 grc_mode;
9604
9605 /* Fix transmit hangs */
9606 val = tr32(TG3_CPMU_PADRNG_CTL);
9607 val |= TG3_CPMU_PADRNG_CTL_RDIV2;
9608 tw32(TG3_CPMU_PADRNG_CTL, val);
9609
9610 grc_mode = tr32(GRC_MODE);
9611
9612 /* Access the lower 1K of DL PCIE block registers. */
9613 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9614 tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9615
9616 val = tr32(TG3_PCIE_TLDLPL_PORT +
9617 TG3_PCIE_DL_LO_FTSMAX);
9618 val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9619 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9620 val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9621
9622 tw32(GRC_MODE, grc_mode);
9623 }
9624
9625 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9626 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9627 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9628 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9629 }
9630
9631 /* This works around an issue with Athlon chipsets on
9632 * B3 tigon3 silicon. This bit has no effect on any
9633 * other revision. But do not set this on PCI Express
9634 * chips and don't even touch the clocks if the CPMU is present.
9635 */
9636 if (!tg3_flag(tp, CPMU_PRESENT)) {
9637 if (!tg3_flag(tp, PCI_EXPRESS))
9638 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
9639 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9640 }
9641
9642 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
9643 tg3_flag(tp, PCIX_MODE)) {
9644 val = tr32(TG3PCI_PCISTATE);
9645 val |= PCISTATE_RETRY_SAME_DMA;
9646 tw32(TG3PCI_PCISTATE, val);
9647 }
9648
9649 if (tg3_flag(tp, ENABLE_APE)) {
9650 /* Allow reads and writes to the
9651 * APE register and memory space.
9652 */
9653 val = tr32(TG3PCI_PCISTATE);
9654 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
9655 PCISTATE_ALLOW_APE_SHMEM_WR |
9656 PCISTATE_ALLOW_APE_PSPACE_WR;
9657 tw32(TG3PCI_PCISTATE, val);
9658 }
9659
9660 if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
9661 /* Enable some hw fixes. */
9662 val = tr32(TG3PCI_MSI_DATA);
9663 val |= (1 << 26) | (1 << 28) | (1 << 29);
9664 tw32(TG3PCI_MSI_DATA, val);
9665 }
9666
9667 /* Descriptor ring init may make accesses to the
9668 * NIC SRAM area to setup the TX descriptors, so we
9669 * can only do this after the hardware has been
9670 * successfully reset.
9671 */
9672 err = tg3_init_rings(tp);
9673 if (err)
9674 return err;
9675
9676 if (tg3_flag(tp, 57765_PLUS)) {
9677 val = tr32(TG3PCI_DMA_RW_CTRL) &
9678 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
9679 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
9680 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
9681 if (!tg3_flag(tp, 57765_CLASS) &&
9682 tg3_asic_rev(tp) != ASIC_REV_5717 &&
9683 tg3_asic_rev(tp) != ASIC_REV_5762)
9684 val |= DMA_RWCTRL_TAGGED_STAT_WA;
9685 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
9686 } else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
9687 tg3_asic_rev(tp) != ASIC_REV_5761) {
9688 /* This value is determined during the probe time DMA
9689 * engine test, tg3_test_dma.
9690 */
9691 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
9692 }
9693
9694 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
9695 GRC_MODE_4X_NIC_SEND_RINGS |
9696 GRC_MODE_NO_TX_PHDR_CSUM |
9697 GRC_MODE_NO_RX_PHDR_CSUM);
9698 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
9699
9700 /* Pseudo-header checksum is done by hardware logic and not
9701 * the offload processers, so make the chip do the pseudo-
9702 * header checksums on receive. For transmit it is more
9703 * convenient to do the pseudo-header checksum in software
9704 * as Linux does that on transmit for us in all cases.
9705 */
9706 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
9707
9708 val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
9709 if (tp->rxptpctl)
9710 tw32(TG3_RX_PTP_CTL,
9711 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
9712
9713 if (tg3_flag(tp, PTP_CAPABLE))
9714 val |= GRC_MODE_TIME_SYNC_ENABLE;
9715
9716 tw32(GRC_MODE, tp->grc_mode | val);
9717
9718 /* Setup the timer prescalar register. Clock is always 66Mhz. */
9719 val = tr32(GRC_MISC_CFG);
9720 val &= ~0xff;
9721 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
9722 tw32(GRC_MISC_CFG, val);
9723
9724 /* Initialize MBUF/DESC pool. */
9725 if (tg3_flag(tp, 5750_PLUS)) {
9726 /* Do nothing. */
9727 } else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
9728 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
9729 if (tg3_asic_rev(tp) == ASIC_REV_5704)
9730 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
9731 else
9732 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
9733 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
9734 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
9735 } else if (tg3_flag(tp, TSO_CAPABLE)) {
9736 int fw_len;
9737
9738 fw_len = tp->fw_len;
9739 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
9740 tw32(BUFMGR_MB_POOL_ADDR,
9741 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
9742 tw32(BUFMGR_MB_POOL_SIZE,
9743 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
9744 }
9745
9746 if (tp->dev->mtu <= ETH_DATA_LEN) {
9747 tw32(BUFMGR_MB_RDMA_LOW_WATER,
9748 tp->bufmgr_config.mbuf_read_dma_low_water);
9749 tw32(BUFMGR_MB_MACRX_LOW_WATER,
9750 tp->bufmgr_config.mbuf_mac_rx_low_water);
9751 tw32(BUFMGR_MB_HIGH_WATER,
9752 tp->bufmgr_config.mbuf_high_water);
9753 } else {
9754 tw32(BUFMGR_MB_RDMA_LOW_WATER,
9755 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
9756 tw32(BUFMGR_MB_MACRX_LOW_WATER,
9757 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
9758 tw32(BUFMGR_MB_HIGH_WATER,
9759 tp->bufmgr_config.mbuf_high_water_jumbo);
9760 }
9761 tw32(BUFMGR_DMA_LOW_WATER,
9762 tp->bufmgr_config.dma_low_water);
9763 tw32(BUFMGR_DMA_HIGH_WATER,
9764 tp->bufmgr_config.dma_high_water);
9765
9766 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
9767 if (tg3_asic_rev(tp) == ASIC_REV_5719)
9768 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
9769 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
9770 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9771 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
9772 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
9773 tw32(BUFMGR_MODE, val);
9774 for (i = 0; i < 2000; i++) {
9775 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
9776 break;
9777 udelay(10);
9778 }
9779 if (i >= 2000) {
9780 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
9781 return -ENODEV;
9782 }
9783
9784 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
9785 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
9786
9787 tg3_setup_rxbd_thresholds(tp);
9788
9789 /* Initialize TG3_BDINFO's at:
9790 * RCVDBDI_STD_BD: standard eth size rx ring
9791 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
9792 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
9793 *
9794 * like so:
9795 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
9796 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
9797 * ring attribute flags
9798 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
9799 *
9800 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
9801 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
9802 *
9803 * The size of each ring is fixed in the firmware, but the location is
9804 * configurable.
9805 */
9806 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9807 ((u64) tpr->rx_std_mapping >> 32));
9808 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9809 ((u64) tpr->rx_std_mapping & 0xffffffff));
9810 if (!tg3_flag(tp, 5717_PLUS))
9811 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
9812 NIC_SRAM_RX_BUFFER_DESC);
9813
9814 /* Disable the mini ring */
9815 if (!tg3_flag(tp, 5705_PLUS))
9816 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
9817 BDINFO_FLAGS_DISABLED);
9818
9819 /* Program the jumbo buffer descriptor ring control
9820 * blocks on those devices that have them.
9821 */
9822 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9823 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
9824
9825 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
9826 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9827 ((u64) tpr->rx_jmb_mapping >> 32));
9828 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9829 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
9830 val = TG3_RX_JMB_RING_SIZE(tp) <<
9831 BDINFO_FLAGS_MAXLEN_SHIFT;
9832 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9833 val | BDINFO_FLAGS_USE_EXT_RECV);
9834 if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
9835 tg3_flag(tp, 57765_CLASS) ||
9836 tg3_asic_rev(tp) == ASIC_REV_5762)
9837 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
9838 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
9839 } else {
9840 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9841 BDINFO_FLAGS_DISABLED);
9842 }
9843
9844 if (tg3_flag(tp, 57765_PLUS)) {
9845 val = TG3_RX_STD_RING_SIZE(tp);
9846 val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
9847 val |= (TG3_RX_STD_DMA_SZ << 2);
9848 } else
9849 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
9850 } else
9851 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
9852
9853 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
9854
9855 tpr->rx_std_prod_idx = tp->rx_pending;
9856 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
9857
9858 tpr->rx_jmb_prod_idx =
9859 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
9860 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
9861
9862 tg3_rings_reset(tp);
9863
9864 /* Initialize MAC address and backoff seed. */
9865 __tg3_set_mac_addr(tp, false);
9866
9867 /* MTU + ethernet header + FCS + optional VLAN tag */
9868 tw32(MAC_RX_MTU_SIZE,
9869 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
9870
9871 /* The slot time is changed by tg3_setup_phy if we
9872 * run at gigabit with half duplex.
9873 */
9874 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
9875 (6 << TX_LENGTHS_IPG_SHIFT) |
9876 (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
9877
9878 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
9879 tg3_asic_rev(tp) == ASIC_REV_5762)
9880 val |= tr32(MAC_TX_LENGTHS) &
9881 (TX_LENGTHS_JMB_FRM_LEN_MSK |
9882 TX_LENGTHS_CNT_DWN_VAL_MSK);
9883
9884 tw32(MAC_TX_LENGTHS, val);
9885
9886 /* Receive rules. */
9887 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
9888 tw32(RCVLPC_CONFIG, 0x0181);
9889
9890 /* Calculate RDMAC_MODE setting early, we need it to determine
9891 * the RCVLPC_STATE_ENABLE mask.
9892 */
9893 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
9894 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
9895 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
9896 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
9897 RDMAC_MODE_LNGREAD_ENAB);
9898
9899 if (tg3_asic_rev(tp) == ASIC_REV_5717)
9900 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
9901
9902 if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
9903 tg3_asic_rev(tp) == ASIC_REV_5785 ||
9904 tg3_asic_rev(tp) == ASIC_REV_57780)
9905 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
9906 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
9907 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
9908
9909 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
9910 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
9911 if (tg3_flag(tp, TSO_CAPABLE) &&
9912 tg3_asic_rev(tp) == ASIC_REV_5705) {
9913 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
9914 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9915 !tg3_flag(tp, IS_5788)) {
9916 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9917 }
9918 }
9919
9920 if (tg3_flag(tp, PCI_EXPRESS))
9921 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9922
9923 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
9924 tp->dma_limit = 0;
9925 if (tp->dev->mtu <= ETH_DATA_LEN) {
9926 rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
9927 tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
9928 }
9929 }
9930
9931 if (tg3_flag(tp, HW_TSO_1) ||
9932 tg3_flag(tp, HW_TSO_2) ||
9933 tg3_flag(tp, HW_TSO_3))
9934 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
9935
9936 if (tg3_flag(tp, 57765_PLUS) ||
9937 tg3_asic_rev(tp) == ASIC_REV_5785 ||
9938 tg3_asic_rev(tp) == ASIC_REV_57780)
9939 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
9940
9941 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
9942 tg3_asic_rev(tp) == ASIC_REV_5762)
9943 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
9944
9945 if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
9946 tg3_asic_rev(tp) == ASIC_REV_5784 ||
9947 tg3_asic_rev(tp) == ASIC_REV_5785 ||
9948 tg3_asic_rev(tp) == ASIC_REV_57780 ||
9949 tg3_flag(tp, 57765_PLUS)) {
9950 u32 tgtreg;
9951
9952 if (tg3_asic_rev(tp) == ASIC_REV_5762)
9953 tgtreg = TG3_RDMA_RSRVCTRL_REG2;
9954 else
9955 tgtreg = TG3_RDMA_RSRVCTRL_REG;
9956
9957 val = tr32(tgtreg);
9958 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9959 tg3_asic_rev(tp) == ASIC_REV_5762) {
9960 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
9961 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
9962 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
9963 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
9964 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
9965 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
9966 }
9967 tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
9968 }
9969
9970 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
9971 tg3_asic_rev(tp) == ASIC_REV_5720 ||
9972 tg3_asic_rev(tp) == ASIC_REV_5762) {
9973 u32 tgtreg;
9974
9975 if (tg3_asic_rev(tp) == ASIC_REV_5762)
9976 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
9977 else
9978 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
9979
9980 val = tr32(tgtreg);
9981 tw32(tgtreg, val |
9982 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
9983 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
9984 }
9985
9986 /* Receive/send statistics. */
9987 if (tg3_flag(tp, 5750_PLUS)) {
9988 val = tr32(RCVLPC_STATS_ENABLE);
9989 val &= ~RCVLPC_STATSENAB_DACK_FIX;
9990 tw32(RCVLPC_STATS_ENABLE, val);
9991 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
9992 tg3_flag(tp, TSO_CAPABLE)) {
9993 val = tr32(RCVLPC_STATS_ENABLE);
9994 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
9995 tw32(RCVLPC_STATS_ENABLE, val);
9996 } else {
9997 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
9998 }
9999 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
10000 tw32(SNDDATAI_STATSENAB, 0xffffff);
10001 tw32(SNDDATAI_STATSCTRL,
10002 (SNDDATAI_SCTRL_ENABLE |
10003 SNDDATAI_SCTRL_FASTUPD));
10004
10005 /* Setup host coalescing engine. */
10006 tw32(HOSTCC_MODE, 0);
10007 for (i = 0; i < 2000; i++) {
10008 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
10009 break;
10010 udelay(10);
10011 }
10012
10013 __tg3_set_coalesce(tp, &tp->coal);
10014
10015 if (!tg3_flag(tp, 5705_PLUS)) {
10016 /* Status/statistics block address. See tg3_timer,
10017 * the tg3_periodic_fetch_stats call there, and
10018 * tg3_get_stats to see how this works for 5705/5750 chips.
10019 */
10020 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
10021 ((u64) tp->stats_mapping >> 32));
10022 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
10023 ((u64) tp->stats_mapping & 0xffffffff));
10024 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
10025
10026 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
10027
10028 /* Clear statistics and status block memory areas */
10029 for (i = NIC_SRAM_STATS_BLK;
10030 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
10031 i += sizeof(u32)) {
10032 tg3_write_mem(tp, i, 0);
10033 udelay(40);
10034 }
10035 }
10036
10037 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
10038
10039 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
10040 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
10041 if (!tg3_flag(tp, 5705_PLUS))
10042 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
10043
10044 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
10045 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
10046 /* reset to prevent losing 1st rx packet intermittently */
10047 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10048 udelay(10);
10049 }
10050
10051 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
10052 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
10053 MAC_MODE_FHDE_ENABLE;
10054 if (tg3_flag(tp, ENABLE_APE))
10055 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
10056 if (!tg3_flag(tp, 5705_PLUS) &&
10057 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10058 tg3_asic_rev(tp) != ASIC_REV_5700)
10059 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
10060 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
10061 udelay(40);
10062
10063 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
10064 * If TG3_FLAG_IS_NIC is zero, we should read the
10065 * register to preserve the GPIO settings for LOMs. The GPIOs,
10066 * whether used as inputs or outputs, are set by boot code after
10067 * reset.
10068 */
10069 if (!tg3_flag(tp, IS_NIC)) {
10070 u32 gpio_mask;
10071
10072 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
10073 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
10074 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
10075
10076 if (tg3_asic_rev(tp) == ASIC_REV_5752)
10077 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
10078 GRC_LCLCTRL_GPIO_OUTPUT3;
10079
10080 if (tg3_asic_rev(tp) == ASIC_REV_5755)
10081 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
10082
10083 tp->grc_local_ctrl &= ~gpio_mask;
10084 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
10085
10086 /* GPIO1 must be driven high for eeprom write protect */
10087 if (tg3_flag(tp, EEPROM_WRITE_PROT))
10088 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10089 GRC_LCLCTRL_GPIO_OUTPUT1);
10090 }
10091 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10092 udelay(100);
10093
10094 if (tg3_flag(tp, USING_MSIX)) {
10095 val = tr32(MSGINT_MODE);
10096 val |= MSGINT_MODE_ENABLE;
10097 if (tp->irq_cnt > 1)
10098 val |= MSGINT_MODE_MULTIVEC_EN;
10099 if (!tg3_flag(tp, 1SHOT_MSI))
10100 val |= MSGINT_MODE_ONE_SHOT_DISABLE;
10101 tw32(MSGINT_MODE, val);
10102 }
10103
10104 if (!tg3_flag(tp, 5705_PLUS)) {
10105 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
10106 udelay(40);
10107 }
10108
10109 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
10110 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
10111 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
10112 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
10113 WDMAC_MODE_LNGREAD_ENAB);
10114
10115 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10116 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10117 if (tg3_flag(tp, TSO_CAPABLE) &&
10118 (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
10119 tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
10120 /* nothing */
10121 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10122 !tg3_flag(tp, IS_5788)) {
10123 val |= WDMAC_MODE_RX_ACCEL;
10124 }
10125 }
10126
10127 /* Enable host coalescing bug fix */
10128 if (tg3_flag(tp, 5755_PLUS))
10129 val |= WDMAC_MODE_STATUS_TAG_FIX;
10130
10131 if (tg3_asic_rev(tp) == ASIC_REV_5785)
10132 val |= WDMAC_MODE_BURST_ALL_DATA;
10133
10134 tw32_f(WDMAC_MODE, val);
10135 udelay(40);
10136
10137 if (tg3_flag(tp, PCIX_MODE)) {
10138 u16 pcix_cmd;
10139
10140 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10141 &pcix_cmd);
10142 if (tg3_asic_rev(tp) == ASIC_REV_5703) {
10143 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
10144 pcix_cmd |= PCI_X_CMD_READ_2K;
10145 } else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
10146 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
10147 pcix_cmd |= PCI_X_CMD_READ_2K;
10148 }
10149 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10150 pcix_cmd);
10151 }
10152
10153 tw32_f(RDMAC_MODE, rdmac_mode);
10154 udelay(40);
10155
10156 if (tg3_asic_rev(tp) == ASIC_REV_5719) {
10157 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
10158 if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
10159 break;
10160 }
10161 if (i < TG3_NUM_RDMA_CHANNELS) {
10162 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10163 val |= TG3_LSO_RD_DMA_TX_LENGTH_WA;
10164 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10165 tg3_flag_set(tp, 5719_RDMA_BUG);
10166 }
10167 }
10168
10169 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
10170 if (!tg3_flag(tp, 5705_PLUS))
10171 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
10172
10173 if (tg3_asic_rev(tp) == ASIC_REV_5761)
10174 tw32(SNDDATAC_MODE,
10175 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
10176 else
10177 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
10178
10179 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
10180 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
10181 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
10182 if (tg3_flag(tp, LRG_PROD_RING_CAP))
10183 val |= RCVDBDI_MODE_LRG_RING_SZ;
10184 tw32(RCVDBDI_MODE, val);
10185 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
10186 if (tg3_flag(tp, HW_TSO_1) ||
10187 tg3_flag(tp, HW_TSO_2) ||
10188 tg3_flag(tp, HW_TSO_3))
10189 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
10190 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
10191 if (tg3_flag(tp, ENABLE_TSS))
10192 val |= SNDBDI_MODE_MULTI_TXQ_EN;
10193 tw32(SNDBDI_MODE, val);
10194 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
10195
10196 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
10197 err = tg3_load_5701_a0_firmware_fix(tp);
10198 if (err)
10199 return err;
10200 }
10201
10202 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10203 /* Ignore any errors for the firmware download. If download
10204 * fails, the device will operate with EEE disabled
10205 */
10206 tg3_load_57766_firmware(tp);
10207 }
10208
10209 if (tg3_flag(tp, TSO_CAPABLE)) {
10210 err = tg3_load_tso_firmware(tp);
10211 if (err)
10212 return err;
10213 }
10214
10215 tp->tx_mode = TX_MODE_ENABLE;
10216
10217 if (tg3_flag(tp, 5755_PLUS) ||
10218 tg3_asic_rev(tp) == ASIC_REV_5906)
10219 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
10220
10221 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10222 tg3_asic_rev(tp) == ASIC_REV_5762) {
10223 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
10224 tp->tx_mode &= ~val;
10225 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
10226 }
10227
10228 tw32_f(MAC_TX_MODE, tp->tx_mode);
10229 udelay(100);
10230
10231 if (tg3_flag(tp, ENABLE_RSS)) {
10232 tg3_rss_write_indir_tbl(tp);
10233
10234 /* Setup the "secret" hash key. */
10235 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
10236 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
10237 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
10238 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
10239 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
10240 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
10241 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
10242 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
10243 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
10244 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
10245 }
10246
10247 tp->rx_mode = RX_MODE_ENABLE;
10248 if (tg3_flag(tp, 5755_PLUS))
10249 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
10250
10251 if (tg3_flag(tp, ENABLE_RSS))
10252 tp->rx_mode |= RX_MODE_RSS_ENABLE |
10253 RX_MODE_RSS_ITBL_HASH_BITS_7 |
10254 RX_MODE_RSS_IPV6_HASH_EN |
10255 RX_MODE_RSS_TCP_IPV6_HASH_EN |
10256 RX_MODE_RSS_IPV4_HASH_EN |
10257 RX_MODE_RSS_TCP_IPV4_HASH_EN;
10258
10259 tw32_f(MAC_RX_MODE, tp->rx_mode);
10260 udelay(10);
10261
10262 tw32(MAC_LED_CTRL, tp->led_ctrl);
10263
10264 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
10265 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10266 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10267 udelay(10);
10268 }
10269 tw32_f(MAC_RX_MODE, tp->rx_mode);
10270 udelay(10);
10271
10272 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10273 if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
10274 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
10275 /* Set drive transmission level to 1.2V */
10276 /* only if the signal pre-emphasis bit is not set */
10277 val = tr32(MAC_SERDES_CFG);
10278 val &= 0xfffff000;
10279 val |= 0x880;
10280 tw32(MAC_SERDES_CFG, val);
10281 }
10282 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
10283 tw32(MAC_SERDES_CFG, 0x616000);
10284 }
10285
10286 /* Prevent chip from dropping frames when flow control
10287 * is enabled.
10288 */
10289 if (tg3_flag(tp, 57765_CLASS))
10290 val = 1;
10291 else
10292 val = 2;
10293 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
10294
10295 if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
10296 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
10297 /* Use hardware link auto-negotiation */
10298 tg3_flag_set(tp, HW_AUTONEG);
10299 }
10300
10301 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10302 tg3_asic_rev(tp) == ASIC_REV_5714) {
10303 u32 tmp;
10304
10305 tmp = tr32(SERDES_RX_CTRL);
10306 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
10307 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
10308 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
10309 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10310 }
10311
10312 if (!tg3_flag(tp, USE_PHYLIB)) {
10313 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10314 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
10315
10316 err = tg3_setup_phy(tp, false);
10317 if (err)
10318 return err;
10319
10320 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10321 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
10322 u32 tmp;
10323
10324 /* Clear CRC stats. */
10325 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
10326 tg3_writephy(tp, MII_TG3_TEST1,
10327 tmp | MII_TG3_TEST1_CRC_EN);
10328 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
10329 }
10330 }
10331 }
10332
10333 __tg3_set_rx_mode(tp->dev);
10334
10335 /* Initialize receive rules. */
10336 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
10337 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
10338 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
10339 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
10340
10341 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
10342 limit = 8;
10343 else
10344 limit = 16;
10345 if (tg3_flag(tp, ENABLE_ASF))
10346 limit -= 4;
10347 switch (limit) {
10348 case 16:
10349 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
10350 case 15:
10351 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
10352 case 14:
10353 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
10354 case 13:
10355 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
10356 case 12:
10357 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
10358 case 11:
10359 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
10360 case 10:
10361 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
10362 case 9:
10363 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
10364 case 8:
10365 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
10366 case 7:
10367 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
10368 case 6:
10369 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
10370 case 5:
10371 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
10372 case 4:
10373 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
10374 case 3:
10375 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
10376 case 2:
10377 case 1:
10378
10379 default:
10380 break;
10381 }
10382
10383 if (tg3_flag(tp, ENABLE_APE))
10384 /* Write our heartbeat update interval to APE. */
10385 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
10386 APE_HOST_HEARTBEAT_INT_DISABLE);
10387
10388 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
10389
10390 return 0;
10391 }
10392
10393 /* Called at device open time to get the chip ready for
10394 * packet processing. Invoked with tp->lock held.
10395 */
10396 static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
10397 {
10398 tg3_switch_clocks(tp);
10399
10400 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10401
10402 return tg3_reset_hw(tp, reset_phy);
10403 }
10404
10405 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10406 {
10407 int i;
10408
10409 for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
10410 u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
10411
10412 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
10413 off += len;
10414
10415 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
10416 !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
10417 memset(ocir, 0, TG3_OCIR_LEN);
10418 }
10419 }
10420
10421 /* sysfs attributes for hwmon */
10422 static ssize_t tg3_show_temp(struct device *dev,
10423 struct device_attribute *devattr, char *buf)
10424 {
10425 struct pci_dev *pdev = to_pci_dev(dev);
10426 struct net_device *netdev = pci_get_drvdata(pdev);
10427 struct tg3 *tp = netdev_priv(netdev);
10428 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10429 u32 temperature;
10430
10431 spin_lock_bh(&tp->lock);
10432 tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10433 sizeof(temperature));
10434 spin_unlock_bh(&tp->lock);
10435 return sprintf(buf, "%u\n", temperature);
10436 }
10437
10438
10439 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL,
10440 TG3_TEMP_SENSOR_OFFSET);
10441 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL,
10442 TG3_TEMP_CAUTION_OFFSET);
10443 static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL,
10444 TG3_TEMP_MAX_OFFSET);
10445
10446 static struct attribute *tg3_attributes[] = {
10447 &sensor_dev_attr_temp1_input.dev_attr.attr,
10448 &sensor_dev_attr_temp1_crit.dev_attr.attr,
10449 &sensor_dev_attr_temp1_max.dev_attr.attr,
10450 NULL
10451 };
10452
10453 static const struct attribute_group tg3_group = {
10454 .attrs = tg3_attributes,
10455 };
10456
10457 static void tg3_hwmon_close(struct tg3 *tp)
10458 {
10459 if (tp->hwmon_dev) {
10460 hwmon_device_unregister(tp->hwmon_dev);
10461 tp->hwmon_dev = NULL;
10462 sysfs_remove_group(&tp->pdev->dev.kobj, &tg3_group);
10463 }
10464 }
10465
10466 static void tg3_hwmon_open(struct tg3 *tp)
10467 {
10468 int i, err;
10469 u32 size = 0;
10470 struct pci_dev *pdev = tp->pdev;
10471 struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10472
10473 tg3_sd_scan_scratchpad(tp, ocirs);
10474
10475 for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10476 if (!ocirs[i].src_data_length)
10477 continue;
10478
10479 size += ocirs[i].src_hdr_length;
10480 size += ocirs[i].src_data_length;
10481 }
10482
10483 if (!size)
10484 return;
10485
10486 /* Register hwmon sysfs hooks */
10487 err = sysfs_create_group(&pdev->dev.kobj, &tg3_group);
10488 if (err) {
10489 dev_err(&pdev->dev, "Cannot create sysfs group, aborting\n");
10490 return;
10491 }
10492
10493 tp->hwmon_dev = hwmon_device_register(&pdev->dev);
10494 if (IS_ERR(tp->hwmon_dev)) {
10495 tp->hwmon_dev = NULL;
10496 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10497 sysfs_remove_group(&pdev->dev.kobj, &tg3_group);
10498 }
10499 }
10500
10501
10502 #define TG3_STAT_ADD32(PSTAT, REG) \
10503 do { u32 __val = tr32(REG); \
10504 (PSTAT)->low += __val; \
10505 if ((PSTAT)->low < __val) \
10506 (PSTAT)->high += 1; \
10507 } while (0)
10508
10509 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10510 {
10511 struct tg3_hw_stats *sp = tp->hw_stats;
10512
10513 if (!tp->link_up)
10514 return;
10515
10516 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10517 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10518 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10519 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10520 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10521 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10522 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10523 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10524 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10525 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10526 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10527 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10528 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10529 if (unlikely(tg3_flag(tp, 5719_RDMA_BUG) &&
10530 (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10531 sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10532 u32 val;
10533
10534 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10535 val &= ~TG3_LSO_RD_DMA_TX_LENGTH_WA;
10536 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10537 tg3_flag_clear(tp, 5719_RDMA_BUG);
10538 }
10539
10540 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10541 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10542 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10543 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10544 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10545 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10546 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10547 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10548 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10549 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10550 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10551 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10552 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
10553 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
10554
10555 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
10556 if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
10557 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
10558 tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
10559 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10560 } else {
10561 u32 val = tr32(HOSTCC_FLOW_ATTN);
10562 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10563 if (val) {
10564 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10565 sp->rx_discards.low += val;
10566 if (sp->rx_discards.low < val)
10567 sp->rx_discards.high += 1;
10568 }
10569 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10570 }
10571 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
10572 }
10573
10574 static void tg3_chk_missed_msi(struct tg3 *tp)
10575 {
10576 u32 i;
10577
10578 for (i = 0; i < tp->irq_cnt; i++) {
10579 struct tg3_napi *tnapi = &tp->napi[i];
10580
10581 if (tg3_has_work(tnapi)) {
10582 if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10583 tnapi->last_tx_cons == tnapi->tx_cons) {
10584 if (tnapi->chk_msi_cnt < 1) {
10585 tnapi->chk_msi_cnt++;
10586 return;
10587 }
10588 tg3_msi(0, tnapi);
10589 }
10590 }
10591 tnapi->chk_msi_cnt = 0;
10592 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10593 tnapi->last_tx_cons = tnapi->tx_cons;
10594 }
10595 }
10596
10597 static void tg3_timer(unsigned long __opaque)
10598 {
10599 struct tg3 *tp = (struct tg3 *) __opaque;
10600
10601 if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
10602 goto restart_timer;
10603
10604 spin_lock(&tp->lock);
10605
10606 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10607 tg3_flag(tp, 57765_CLASS))
10608 tg3_chk_missed_msi(tp);
10609
10610 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
10611 /* BCM4785: Flush posted writes from GbE to host memory. */
10612 tr32(HOSTCC_MODE);
10613 }
10614
10615 if (!tg3_flag(tp, TAGGED_STATUS)) {
10616 /* All of this garbage is because when using non-tagged
10617 * IRQ status the mailbox/status_block protocol the chip
10618 * uses with the cpu is race prone.
10619 */
10620 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
10621 tw32(GRC_LOCAL_CTRL,
10622 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
10623 } else {
10624 tw32(HOSTCC_MODE, tp->coalesce_mode |
10625 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
10626 }
10627
10628 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10629 spin_unlock(&tp->lock);
10630 tg3_reset_task_schedule(tp);
10631 goto restart_timer;
10632 }
10633 }
10634
10635 /* This part only runs once per second. */
10636 if (!--tp->timer_counter) {
10637 if (tg3_flag(tp, 5705_PLUS))
10638 tg3_periodic_fetch_stats(tp);
10639
10640 if (tp->setlpicnt && !--tp->setlpicnt)
10641 tg3_phy_eee_enable(tp);
10642
10643 if (tg3_flag(tp, USE_LINKCHG_REG)) {
10644 u32 mac_stat;
10645 int phy_event;
10646
10647 mac_stat = tr32(MAC_STATUS);
10648
10649 phy_event = 0;
10650 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
10651 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
10652 phy_event = 1;
10653 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
10654 phy_event = 1;
10655
10656 if (phy_event)
10657 tg3_setup_phy(tp, false);
10658 } else if (tg3_flag(tp, POLL_SERDES)) {
10659 u32 mac_stat = tr32(MAC_STATUS);
10660 int need_setup = 0;
10661
10662 if (tp->link_up &&
10663 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
10664 need_setup = 1;
10665 }
10666 if (!tp->link_up &&
10667 (mac_stat & (MAC_STATUS_PCS_SYNCED |
10668 MAC_STATUS_SIGNAL_DET))) {
10669 need_setup = 1;
10670 }
10671 if (need_setup) {
10672 if (!tp->serdes_counter) {
10673 tw32_f(MAC_MODE,
10674 (tp->mac_mode &
10675 ~MAC_MODE_PORT_MODE_MASK));
10676 udelay(40);
10677 tw32_f(MAC_MODE, tp->mac_mode);
10678 udelay(40);
10679 }
10680 tg3_setup_phy(tp, false);
10681 }
10682 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10683 tg3_flag(tp, 5780_CLASS)) {
10684 tg3_serdes_parallel_detect(tp);
10685 }
10686
10687 tp->timer_counter = tp->timer_multiplier;
10688 }
10689
10690 /* Heartbeat is only sent once every 2 seconds.
10691 *
10692 * The heartbeat is to tell the ASF firmware that the host
10693 * driver is still alive. In the event that the OS crashes,
10694 * ASF needs to reset the hardware to free up the FIFO space
10695 * that may be filled with rx packets destined for the host.
10696 * If the FIFO is full, ASF will no longer function properly.
10697 *
10698 * Unintended resets have been reported on real time kernels
10699 * where the timer doesn't run on time. Netpoll will also have
10700 * same problem.
10701 *
10702 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
10703 * to check the ring condition when the heartbeat is expiring
10704 * before doing the reset. This will prevent most unintended
10705 * resets.
10706 */
10707 if (!--tp->asf_counter) {
10708 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
10709 tg3_wait_for_event_ack(tp);
10710
10711 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
10712 FWCMD_NICDRV_ALIVE3);
10713 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
10714 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
10715 TG3_FW_UPDATE_TIMEOUT_SEC);
10716
10717 tg3_generate_fw_event(tp);
10718 }
10719 tp->asf_counter = tp->asf_multiplier;
10720 }
10721
10722 spin_unlock(&tp->lock);
10723
10724 restart_timer:
10725 tp->timer.expires = jiffies + tp->timer_offset;
10726 add_timer(&tp->timer);
10727 }
10728
10729 static void tg3_timer_init(struct tg3 *tp)
10730 {
10731 if (tg3_flag(tp, TAGGED_STATUS) &&
10732 tg3_asic_rev(tp) != ASIC_REV_5717 &&
10733 !tg3_flag(tp, 57765_CLASS))
10734 tp->timer_offset = HZ;
10735 else
10736 tp->timer_offset = HZ / 10;
10737
10738 BUG_ON(tp->timer_offset > HZ);
10739
10740 tp->timer_multiplier = (HZ / tp->timer_offset);
10741 tp->asf_multiplier = (HZ / tp->timer_offset) *
10742 TG3_FW_UPDATE_FREQ_SEC;
10743
10744 init_timer(&tp->timer);
10745 tp->timer.data = (unsigned long) tp;
10746 tp->timer.function = tg3_timer;
10747 }
10748
10749 static void tg3_timer_start(struct tg3 *tp)
10750 {
10751 tp->asf_counter = tp->asf_multiplier;
10752 tp->timer_counter = tp->timer_multiplier;
10753
10754 tp->timer.expires = jiffies + tp->timer_offset;
10755 add_timer(&tp->timer);
10756 }
10757
10758 static void tg3_timer_stop(struct tg3 *tp)
10759 {
10760 del_timer_sync(&tp->timer);
10761 }
10762
10763 /* Restart hardware after configuration changes, self-test, etc.
10764 * Invoked with tp->lock held.
10765 */
10766 static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
10767 __releases(tp->lock)
10768 __acquires(tp->lock)
10769 {
10770 int err;
10771
10772 err = tg3_init_hw(tp, reset_phy);
10773 if (err) {
10774 netdev_err(tp->dev,
10775 "Failed to re-initialize device, aborting\n");
10776 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10777 tg3_full_unlock(tp);
10778 tg3_timer_stop(tp);
10779 tp->irq_sync = 0;
10780 tg3_napi_enable(tp);
10781 dev_close(tp->dev);
10782 tg3_full_lock(tp, 0);
10783 }
10784 return err;
10785 }
10786
10787 static void tg3_reset_task(struct work_struct *work)
10788 {
10789 struct tg3 *tp = container_of(work, struct tg3, reset_task);
10790 int err;
10791
10792 tg3_full_lock(tp, 0);
10793
10794 if (!netif_running(tp->dev)) {
10795 tg3_flag_clear(tp, RESET_TASK_PENDING);
10796 tg3_full_unlock(tp);
10797 return;
10798 }
10799
10800 tg3_full_unlock(tp);
10801
10802 tg3_phy_stop(tp);
10803
10804 tg3_netif_stop(tp);
10805
10806 tg3_full_lock(tp, 1);
10807
10808 if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
10809 tp->write32_tx_mbox = tg3_write32_tx_mbox;
10810 tp->write32_rx_mbox = tg3_write_flush_reg32;
10811 tg3_flag_set(tp, MBOX_WRITE_REORDER);
10812 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
10813 }
10814
10815 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
10816 err = tg3_init_hw(tp, true);
10817 if (err)
10818 goto out;
10819
10820 tg3_netif_start(tp);
10821
10822 out:
10823 tg3_full_unlock(tp);
10824
10825 if (!err)
10826 tg3_phy_start(tp);
10827
10828 tg3_flag_clear(tp, RESET_TASK_PENDING);
10829 }
10830
10831 static int tg3_request_irq(struct tg3 *tp, int irq_num)
10832 {
10833 irq_handler_t fn;
10834 unsigned long flags;
10835 char *name;
10836 struct tg3_napi *tnapi = &tp->napi[irq_num];
10837
10838 if (tp->irq_cnt == 1)
10839 name = tp->dev->name;
10840 else {
10841 name = &tnapi->irq_lbl[0];
10842 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
10843 name[IFNAMSIZ-1] = 0;
10844 }
10845
10846 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
10847 fn = tg3_msi;
10848 if (tg3_flag(tp, 1SHOT_MSI))
10849 fn = tg3_msi_1shot;
10850 flags = 0;
10851 } else {
10852 fn = tg3_interrupt;
10853 if (tg3_flag(tp, TAGGED_STATUS))
10854 fn = tg3_interrupt_tagged;
10855 flags = IRQF_SHARED;
10856 }
10857
10858 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
10859 }
10860
10861 static int tg3_test_interrupt(struct tg3 *tp)
10862 {
10863 struct tg3_napi *tnapi = &tp->napi[0];
10864 struct net_device *dev = tp->dev;
10865 int err, i, intr_ok = 0;
10866 u32 val;
10867
10868 if (!netif_running(dev))
10869 return -ENODEV;
10870
10871 tg3_disable_ints(tp);
10872
10873 free_irq(tnapi->irq_vec, tnapi);
10874
10875 /*
10876 * Turn off MSI one shot mode. Otherwise this test has no
10877 * observable way to know whether the interrupt was delivered.
10878 */
10879 if (tg3_flag(tp, 57765_PLUS)) {
10880 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
10881 tw32(MSGINT_MODE, val);
10882 }
10883
10884 err = request_irq(tnapi->irq_vec, tg3_test_isr,
10885 IRQF_SHARED, dev->name, tnapi);
10886 if (err)
10887 return err;
10888
10889 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
10890 tg3_enable_ints(tp);
10891
10892 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10893 tnapi->coal_now);
10894
10895 for (i = 0; i < 5; i++) {
10896 u32 int_mbox, misc_host_ctrl;
10897
10898 int_mbox = tr32_mailbox(tnapi->int_mbox);
10899 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
10900
10901 if ((int_mbox != 0) ||
10902 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
10903 intr_ok = 1;
10904 break;
10905 }
10906
10907 if (tg3_flag(tp, 57765_PLUS) &&
10908 tnapi->hw_status->status_tag != tnapi->last_tag)
10909 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
10910
10911 msleep(10);
10912 }
10913
10914 tg3_disable_ints(tp);
10915
10916 free_irq(tnapi->irq_vec, tnapi);
10917
10918 err = tg3_request_irq(tp, 0);
10919
10920 if (err)
10921 return err;
10922
10923 if (intr_ok) {
10924 /* Reenable MSI one shot mode. */
10925 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
10926 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
10927 tw32(MSGINT_MODE, val);
10928 }
10929 return 0;
10930 }
10931
10932 return -EIO;
10933 }
10934
10935 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
10936 * successfully restored
10937 */
10938 static int tg3_test_msi(struct tg3 *tp)
10939 {
10940 int err;
10941 u16 pci_cmd;
10942
10943 if (!tg3_flag(tp, USING_MSI))
10944 return 0;
10945
10946 /* Turn off SERR reporting in case MSI terminates with Master
10947 * Abort.
10948 */
10949 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10950 pci_write_config_word(tp->pdev, PCI_COMMAND,
10951 pci_cmd & ~PCI_COMMAND_SERR);
10952
10953 err = tg3_test_interrupt(tp);
10954
10955 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10956
10957 if (!err)
10958 return 0;
10959
10960 /* other failures */
10961 if (err != -EIO)
10962 return err;
10963
10964 /* MSI test failed, go back to INTx mode */
10965 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
10966 "to INTx mode. Please report this failure to the PCI "
10967 "maintainer and include system chipset information\n");
10968
10969 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
10970
10971 pci_disable_msi(tp->pdev);
10972
10973 tg3_flag_clear(tp, USING_MSI);
10974 tp->napi[0].irq_vec = tp->pdev->irq;
10975
10976 err = tg3_request_irq(tp, 0);
10977 if (err)
10978 return err;
10979
10980 /* Need to reset the chip because the MSI cycle may have terminated
10981 * with Master Abort.
10982 */
10983 tg3_full_lock(tp, 1);
10984
10985 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10986 err = tg3_init_hw(tp, true);
10987
10988 tg3_full_unlock(tp);
10989
10990 if (err)
10991 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
10992
10993 return err;
10994 }
10995
10996 static int tg3_request_firmware(struct tg3 *tp)
10997 {
10998 const struct tg3_firmware_hdr *fw_hdr;
10999
11000 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
11001 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
11002 tp->fw_needed);
11003 return -ENOENT;
11004 }
11005
11006 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
11007
11008 /* Firmware blob starts with version numbers, followed by
11009 * start address and _full_ length including BSS sections
11010 * (which must be longer than the actual data, of course
11011 */
11012
11013 tp->fw_len = be32_to_cpu(fw_hdr->len); /* includes bss */
11014 if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
11015 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
11016 tp->fw_len, tp->fw_needed);
11017 release_firmware(tp->fw);
11018 tp->fw = NULL;
11019 return -EINVAL;
11020 }
11021
11022 /* We no longer need firmware; we have it. */
11023 tp->fw_needed = NULL;
11024 return 0;
11025 }
11026
11027 static u32 tg3_irq_count(struct tg3 *tp)
11028 {
11029 u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
11030
11031 if (irq_cnt > 1) {
11032 /* We want as many rx rings enabled as there are cpus.
11033 * In multiqueue MSI-X mode, the first MSI-X vector
11034 * only deals with link interrupts, etc, so we add
11035 * one to the number of vectors we are requesting.
11036 */
11037 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
11038 }
11039
11040 return irq_cnt;
11041 }
11042
11043 static bool tg3_enable_msix(struct tg3 *tp)
11044 {
11045 int i, rc;
11046 struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
11047
11048 tp->txq_cnt = tp->txq_req;
11049 tp->rxq_cnt = tp->rxq_req;
11050 if (!tp->rxq_cnt)
11051 tp->rxq_cnt = netif_get_num_default_rss_queues();
11052 if (tp->rxq_cnt > tp->rxq_max)
11053 tp->rxq_cnt = tp->rxq_max;
11054
11055 /* Disable multiple TX rings by default. Simple round-robin hardware
11056 * scheduling of the TX rings can cause starvation of rings with
11057 * small packets when other rings have TSO or jumbo packets.
11058 */
11059 if (!tp->txq_req)
11060 tp->txq_cnt = 1;
11061
11062 tp->irq_cnt = tg3_irq_count(tp);
11063
11064 for (i = 0; i < tp->irq_max; i++) {
11065 msix_ent[i].entry = i;
11066 msix_ent[i].vector = 0;
11067 }
11068
11069 rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
11070 if (rc < 0) {
11071 return false;
11072 } else if (rc != 0) {
11073 if (pci_enable_msix(tp->pdev, msix_ent, rc))
11074 return false;
11075 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
11076 tp->irq_cnt, rc);
11077 tp->irq_cnt = rc;
11078 tp->rxq_cnt = max(rc - 1, 1);
11079 if (tp->txq_cnt)
11080 tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
11081 }
11082
11083 for (i = 0; i < tp->irq_max; i++)
11084 tp->napi[i].irq_vec = msix_ent[i].vector;
11085
11086 if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
11087 pci_disable_msix(tp->pdev);
11088 return false;
11089 }
11090
11091 if (tp->irq_cnt == 1)
11092 return true;
11093
11094 tg3_flag_set(tp, ENABLE_RSS);
11095
11096 if (tp->txq_cnt > 1)
11097 tg3_flag_set(tp, ENABLE_TSS);
11098
11099 netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
11100
11101 return true;
11102 }
11103
11104 static void tg3_ints_init(struct tg3 *tp)
11105 {
11106 if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
11107 !tg3_flag(tp, TAGGED_STATUS)) {
11108 /* All MSI supporting chips should support tagged
11109 * status. Assert that this is the case.
11110 */
11111 netdev_warn(tp->dev,
11112 "MSI without TAGGED_STATUS? Not using MSI\n");
11113 goto defcfg;
11114 }
11115
11116 if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
11117 tg3_flag_set(tp, USING_MSIX);
11118 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
11119 tg3_flag_set(tp, USING_MSI);
11120
11121 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11122 u32 msi_mode = tr32(MSGINT_MODE);
11123 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
11124 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
11125 if (!tg3_flag(tp, 1SHOT_MSI))
11126 msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
11127 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
11128 }
11129 defcfg:
11130 if (!tg3_flag(tp, USING_MSIX)) {
11131 tp->irq_cnt = 1;
11132 tp->napi[0].irq_vec = tp->pdev->irq;
11133 }
11134
11135 if (tp->irq_cnt == 1) {
11136 tp->txq_cnt = 1;
11137 tp->rxq_cnt = 1;
11138 netif_set_real_num_tx_queues(tp->dev, 1);
11139 netif_set_real_num_rx_queues(tp->dev, 1);
11140 }
11141 }
11142
11143 static void tg3_ints_fini(struct tg3 *tp)
11144 {
11145 if (tg3_flag(tp, USING_MSIX))
11146 pci_disable_msix(tp->pdev);
11147 else if (tg3_flag(tp, USING_MSI))
11148 pci_disable_msi(tp->pdev);
11149 tg3_flag_clear(tp, USING_MSI);
11150 tg3_flag_clear(tp, USING_MSIX);
11151 tg3_flag_clear(tp, ENABLE_RSS);
11152 tg3_flag_clear(tp, ENABLE_TSS);
11153 }
11154
11155 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
11156 bool init)
11157 {
11158 struct net_device *dev = tp->dev;
11159 int i, err;
11160
11161 /*
11162 * Setup interrupts first so we know how
11163 * many NAPI resources to allocate
11164 */
11165 tg3_ints_init(tp);
11166
11167 tg3_rss_check_indir_tbl(tp);
11168
11169 /* The placement of this call is tied
11170 * to the setup and use of Host TX descriptors.
11171 */
11172 err = tg3_alloc_consistent(tp);
11173 if (err)
11174 goto err_out1;
11175
11176 tg3_napi_init(tp);
11177
11178 tg3_napi_enable(tp);
11179
11180 for (i = 0; i < tp->irq_cnt; i++) {
11181 struct tg3_napi *tnapi = &tp->napi[i];
11182 err = tg3_request_irq(tp, i);
11183 if (err) {
11184 for (i--; i >= 0; i--) {
11185 tnapi = &tp->napi[i];
11186 free_irq(tnapi->irq_vec, tnapi);
11187 }
11188 goto err_out2;
11189 }
11190 }
11191
11192 tg3_full_lock(tp, 0);
11193
11194 err = tg3_init_hw(tp, reset_phy);
11195 if (err) {
11196 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11197 tg3_free_rings(tp);
11198 }
11199
11200 tg3_full_unlock(tp);
11201
11202 if (err)
11203 goto err_out3;
11204
11205 if (test_irq && tg3_flag(tp, USING_MSI)) {
11206 err = tg3_test_msi(tp);
11207
11208 if (err) {
11209 tg3_full_lock(tp, 0);
11210 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11211 tg3_free_rings(tp);
11212 tg3_full_unlock(tp);
11213
11214 goto err_out2;
11215 }
11216
11217 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
11218 u32 val = tr32(PCIE_TRANSACTION_CFG);
11219
11220 tw32(PCIE_TRANSACTION_CFG,
11221 val | PCIE_TRANS_CFG_1SHOT_MSI);
11222 }
11223 }
11224
11225 tg3_phy_start(tp);
11226
11227 tg3_hwmon_open(tp);
11228
11229 tg3_full_lock(tp, 0);
11230
11231 tg3_timer_start(tp);
11232 tg3_flag_set(tp, INIT_COMPLETE);
11233 tg3_enable_ints(tp);
11234
11235 if (init)
11236 tg3_ptp_init(tp);
11237 else
11238 tg3_ptp_resume(tp);
11239
11240
11241 tg3_full_unlock(tp);
11242
11243 netif_tx_start_all_queues(dev);
11244
11245 /*
11246 * Reset loopback feature if it was turned on while the device was down
11247 * make sure that it's installed properly now.
11248 */
11249 if (dev->features & NETIF_F_LOOPBACK)
11250 tg3_set_loopback(dev, dev->features);
11251
11252 return 0;
11253
11254 err_out3:
11255 for (i = tp->irq_cnt - 1; i >= 0; i--) {
11256 struct tg3_napi *tnapi = &tp->napi[i];
11257 free_irq(tnapi->irq_vec, tnapi);
11258 }
11259
11260 err_out2:
11261 tg3_napi_disable(tp);
11262 tg3_napi_fini(tp);
11263 tg3_free_consistent(tp);
11264
11265 err_out1:
11266 tg3_ints_fini(tp);
11267
11268 return err;
11269 }
11270
11271 static void tg3_stop(struct tg3 *tp)
11272 {
11273 int i;
11274
11275 tg3_reset_task_cancel(tp);
11276 tg3_netif_stop(tp);
11277
11278 tg3_timer_stop(tp);
11279
11280 tg3_hwmon_close(tp);
11281
11282 tg3_phy_stop(tp);
11283
11284 tg3_full_lock(tp, 1);
11285
11286 tg3_disable_ints(tp);
11287
11288 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11289 tg3_free_rings(tp);
11290 tg3_flag_clear(tp, INIT_COMPLETE);
11291
11292 tg3_full_unlock(tp);
11293
11294 for (i = tp->irq_cnt - 1; i >= 0; i--) {
11295 struct tg3_napi *tnapi = &tp->napi[i];
11296 free_irq(tnapi->irq_vec, tnapi);
11297 }
11298
11299 tg3_ints_fini(tp);
11300
11301 tg3_napi_fini(tp);
11302
11303 tg3_free_consistent(tp);
11304 }
11305
11306 static int tg3_open(struct net_device *dev)
11307 {
11308 struct tg3 *tp = netdev_priv(dev);
11309 int err;
11310
11311 if (tp->fw_needed) {
11312 err = tg3_request_firmware(tp);
11313 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
11314 if (err) {
11315 netdev_warn(tp->dev, "EEE capability disabled\n");
11316 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11317 } else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
11318 netdev_warn(tp->dev, "EEE capability restored\n");
11319 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
11320 }
11321 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
11322 if (err)
11323 return err;
11324 } else if (err) {
11325 netdev_warn(tp->dev, "TSO capability disabled\n");
11326 tg3_flag_clear(tp, TSO_CAPABLE);
11327 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
11328 netdev_notice(tp->dev, "TSO capability restored\n");
11329 tg3_flag_set(tp, TSO_CAPABLE);
11330 }
11331 }
11332
11333 tg3_carrier_off(tp);
11334
11335 err = tg3_power_up(tp);
11336 if (err)
11337 return err;
11338
11339 tg3_full_lock(tp, 0);
11340
11341 tg3_disable_ints(tp);
11342 tg3_flag_clear(tp, INIT_COMPLETE);
11343
11344 tg3_full_unlock(tp);
11345
11346 err = tg3_start(tp,
11347 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN),
11348 true, true);
11349 if (err) {
11350 tg3_frob_aux_power(tp, false);
11351 pci_set_power_state(tp->pdev, PCI_D3hot);
11352 }
11353
11354 if (tg3_flag(tp, PTP_CAPABLE)) {
11355 tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
11356 &tp->pdev->dev);
11357 if (IS_ERR(tp->ptp_clock))
11358 tp->ptp_clock = NULL;
11359 }
11360
11361 return err;
11362 }
11363
11364 static int tg3_close(struct net_device *dev)
11365 {
11366 struct tg3 *tp = netdev_priv(dev);
11367
11368 tg3_ptp_fini(tp);
11369
11370 tg3_stop(tp);
11371
11372 /* Clear stats across close / open calls */
11373 memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
11374 memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
11375
11376 tg3_power_down(tp);
11377
11378 tg3_carrier_off(tp);
11379
11380 return 0;
11381 }
11382
11383 static inline u64 get_stat64(tg3_stat64_t *val)
11384 {
11385 return ((u64)val->high << 32) | ((u64)val->low);
11386 }
11387
11388 static u64 tg3_calc_crc_errors(struct tg3 *tp)
11389 {
11390 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11391
11392 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11393 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
11394 tg3_asic_rev(tp) == ASIC_REV_5701)) {
11395 u32 val;
11396
11397 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
11398 tg3_writephy(tp, MII_TG3_TEST1,
11399 val | MII_TG3_TEST1_CRC_EN);
11400 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
11401 } else
11402 val = 0;
11403
11404 tp->phy_crc_errors += val;
11405
11406 return tp->phy_crc_errors;
11407 }
11408
11409 return get_stat64(&hw_stats->rx_fcs_errors);
11410 }
11411
11412 #define ESTAT_ADD(member) \
11413 estats->member = old_estats->member + \
11414 get_stat64(&hw_stats->member)
11415
11416 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
11417 {
11418 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
11419 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11420
11421 ESTAT_ADD(rx_octets);
11422 ESTAT_ADD(rx_fragments);
11423 ESTAT_ADD(rx_ucast_packets);
11424 ESTAT_ADD(rx_mcast_packets);
11425 ESTAT_ADD(rx_bcast_packets);
11426 ESTAT_ADD(rx_fcs_errors);
11427 ESTAT_ADD(rx_align_errors);
11428 ESTAT_ADD(rx_xon_pause_rcvd);
11429 ESTAT_ADD(rx_xoff_pause_rcvd);
11430 ESTAT_ADD(rx_mac_ctrl_rcvd);
11431 ESTAT_ADD(rx_xoff_entered);
11432 ESTAT_ADD(rx_frame_too_long_errors);
11433 ESTAT_ADD(rx_jabbers);
11434 ESTAT_ADD(rx_undersize_packets);
11435 ESTAT_ADD(rx_in_length_errors);
11436 ESTAT_ADD(rx_out_length_errors);
11437 ESTAT_ADD(rx_64_or_less_octet_packets);
11438 ESTAT_ADD(rx_65_to_127_octet_packets);
11439 ESTAT_ADD(rx_128_to_255_octet_packets);
11440 ESTAT_ADD(rx_256_to_511_octet_packets);
11441 ESTAT_ADD(rx_512_to_1023_octet_packets);
11442 ESTAT_ADD(rx_1024_to_1522_octet_packets);
11443 ESTAT_ADD(rx_1523_to_2047_octet_packets);
11444 ESTAT_ADD(rx_2048_to_4095_octet_packets);
11445 ESTAT_ADD(rx_4096_to_8191_octet_packets);
11446 ESTAT_ADD(rx_8192_to_9022_octet_packets);
11447
11448 ESTAT_ADD(tx_octets);
11449 ESTAT_ADD(tx_collisions);
11450 ESTAT_ADD(tx_xon_sent);
11451 ESTAT_ADD(tx_xoff_sent);
11452 ESTAT_ADD(tx_flow_control);
11453 ESTAT_ADD(tx_mac_errors);
11454 ESTAT_ADD(tx_single_collisions);
11455 ESTAT_ADD(tx_mult_collisions);
11456 ESTAT_ADD(tx_deferred);
11457 ESTAT_ADD(tx_excessive_collisions);
11458 ESTAT_ADD(tx_late_collisions);
11459 ESTAT_ADD(tx_collide_2times);
11460 ESTAT_ADD(tx_collide_3times);
11461 ESTAT_ADD(tx_collide_4times);
11462 ESTAT_ADD(tx_collide_5times);
11463 ESTAT_ADD(tx_collide_6times);
11464 ESTAT_ADD(tx_collide_7times);
11465 ESTAT_ADD(tx_collide_8times);
11466 ESTAT_ADD(tx_collide_9times);
11467 ESTAT_ADD(tx_collide_10times);
11468 ESTAT_ADD(tx_collide_11times);
11469 ESTAT_ADD(tx_collide_12times);
11470 ESTAT_ADD(tx_collide_13times);
11471 ESTAT_ADD(tx_collide_14times);
11472 ESTAT_ADD(tx_collide_15times);
11473 ESTAT_ADD(tx_ucast_packets);
11474 ESTAT_ADD(tx_mcast_packets);
11475 ESTAT_ADD(tx_bcast_packets);
11476 ESTAT_ADD(tx_carrier_sense_errors);
11477 ESTAT_ADD(tx_discards);
11478 ESTAT_ADD(tx_errors);
11479
11480 ESTAT_ADD(dma_writeq_full);
11481 ESTAT_ADD(dma_write_prioq_full);
11482 ESTAT_ADD(rxbds_empty);
11483 ESTAT_ADD(rx_discards);
11484 ESTAT_ADD(rx_errors);
11485 ESTAT_ADD(rx_threshold_hit);
11486
11487 ESTAT_ADD(dma_readq_full);
11488 ESTAT_ADD(dma_read_prioq_full);
11489 ESTAT_ADD(tx_comp_queue_full);
11490
11491 ESTAT_ADD(ring_set_send_prod_index);
11492 ESTAT_ADD(ring_status_update);
11493 ESTAT_ADD(nic_irqs);
11494 ESTAT_ADD(nic_avoided_irqs);
11495 ESTAT_ADD(nic_tx_threshold_hit);
11496
11497 ESTAT_ADD(mbuf_lwm_thresh_hit);
11498 }
11499
11500 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
11501 {
11502 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
11503 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11504
11505 stats->rx_packets = old_stats->rx_packets +
11506 get_stat64(&hw_stats->rx_ucast_packets) +
11507 get_stat64(&hw_stats->rx_mcast_packets) +
11508 get_stat64(&hw_stats->rx_bcast_packets);
11509
11510 stats->tx_packets = old_stats->tx_packets +
11511 get_stat64(&hw_stats->tx_ucast_packets) +
11512 get_stat64(&hw_stats->tx_mcast_packets) +
11513 get_stat64(&hw_stats->tx_bcast_packets);
11514
11515 stats->rx_bytes = old_stats->rx_bytes +
11516 get_stat64(&hw_stats->rx_octets);
11517 stats->tx_bytes = old_stats->tx_bytes +
11518 get_stat64(&hw_stats->tx_octets);
11519
11520 stats->rx_errors = old_stats->rx_errors +
11521 get_stat64(&hw_stats->rx_errors);
11522 stats->tx_errors = old_stats->tx_errors +
11523 get_stat64(&hw_stats->tx_errors) +
11524 get_stat64(&hw_stats->tx_mac_errors) +
11525 get_stat64(&hw_stats->tx_carrier_sense_errors) +
11526 get_stat64(&hw_stats->tx_discards);
11527
11528 stats->multicast = old_stats->multicast +
11529 get_stat64(&hw_stats->rx_mcast_packets);
11530 stats->collisions = old_stats->collisions +
11531 get_stat64(&hw_stats->tx_collisions);
11532
11533 stats->rx_length_errors = old_stats->rx_length_errors +
11534 get_stat64(&hw_stats->rx_frame_too_long_errors) +
11535 get_stat64(&hw_stats->rx_undersize_packets);
11536
11537 stats->rx_over_errors = old_stats->rx_over_errors +
11538 get_stat64(&hw_stats->rxbds_empty);
11539 stats->rx_frame_errors = old_stats->rx_frame_errors +
11540 get_stat64(&hw_stats->rx_align_errors);
11541 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
11542 get_stat64(&hw_stats->tx_discards);
11543 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
11544 get_stat64(&hw_stats->tx_carrier_sense_errors);
11545
11546 stats->rx_crc_errors = old_stats->rx_crc_errors +
11547 tg3_calc_crc_errors(tp);
11548
11549 stats->rx_missed_errors = old_stats->rx_missed_errors +
11550 get_stat64(&hw_stats->rx_discards);
11551
11552 stats->rx_dropped = tp->rx_dropped;
11553 stats->tx_dropped = tp->tx_dropped;
11554 }
11555
11556 static int tg3_get_regs_len(struct net_device *dev)
11557 {
11558 return TG3_REG_BLK_SIZE;
11559 }
11560
11561 static void tg3_get_regs(struct net_device *dev,
11562 struct ethtool_regs *regs, void *_p)
11563 {
11564 struct tg3 *tp = netdev_priv(dev);
11565
11566 regs->version = 0;
11567
11568 memset(_p, 0, TG3_REG_BLK_SIZE);
11569
11570 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11571 return;
11572
11573 tg3_full_lock(tp, 0);
11574
11575 tg3_dump_legacy_regs(tp, (u32 *)_p);
11576
11577 tg3_full_unlock(tp);
11578 }
11579
11580 static int tg3_get_eeprom_len(struct net_device *dev)
11581 {
11582 struct tg3 *tp = netdev_priv(dev);
11583
11584 return tp->nvram_size;
11585 }
11586
11587 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11588 {
11589 struct tg3 *tp = netdev_priv(dev);
11590 int ret;
11591 u8 *pd;
11592 u32 i, offset, len, b_offset, b_count;
11593 __be32 val;
11594
11595 if (tg3_flag(tp, NO_NVRAM))
11596 return -EINVAL;
11597
11598 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11599 return -EAGAIN;
11600
11601 offset = eeprom->offset;
11602 len = eeprom->len;
11603 eeprom->len = 0;
11604
11605 eeprom->magic = TG3_EEPROM_MAGIC;
11606
11607 if (offset & 3) {
11608 /* adjustments to start on required 4 byte boundary */
11609 b_offset = offset & 3;
11610 b_count = 4 - b_offset;
11611 if (b_count > len) {
11612 /* i.e. offset=1 len=2 */
11613 b_count = len;
11614 }
11615 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
11616 if (ret)
11617 return ret;
11618 memcpy(data, ((char *)&val) + b_offset, b_count);
11619 len -= b_count;
11620 offset += b_count;
11621 eeprom->len += b_count;
11622 }
11623
11624 /* read bytes up to the last 4 byte boundary */
11625 pd = &data[eeprom->len];
11626 for (i = 0; i < (len - (len & 3)); i += 4) {
11627 ret = tg3_nvram_read_be32(tp, offset + i, &val);
11628 if (ret) {
11629 eeprom->len += i;
11630 return ret;
11631 }
11632 memcpy(pd + i, &val, 4);
11633 }
11634 eeprom->len += i;
11635
11636 if (len & 3) {
11637 /* read last bytes not ending on 4 byte boundary */
11638 pd = &data[eeprom->len];
11639 b_count = len & 3;
11640 b_offset = offset + len - b_count;
11641 ret = tg3_nvram_read_be32(tp, b_offset, &val);
11642 if (ret)
11643 return ret;
11644 memcpy(pd, &val, b_count);
11645 eeprom->len += b_count;
11646 }
11647 return 0;
11648 }
11649
11650 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11651 {
11652 struct tg3 *tp = netdev_priv(dev);
11653 int ret;
11654 u32 offset, len, b_offset, odd_len;
11655 u8 *buf;
11656 __be32 start, end;
11657
11658 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11659 return -EAGAIN;
11660
11661 if (tg3_flag(tp, NO_NVRAM) ||
11662 eeprom->magic != TG3_EEPROM_MAGIC)
11663 return -EINVAL;
11664
11665 offset = eeprom->offset;
11666 len = eeprom->len;
11667
11668 if ((b_offset = (offset & 3))) {
11669 /* adjustments to start on required 4 byte boundary */
11670 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
11671 if (ret)
11672 return ret;
11673 len += b_offset;
11674 offset &= ~3;
11675 if (len < 4)
11676 len = 4;
11677 }
11678
11679 odd_len = 0;
11680 if (len & 3) {
11681 /* adjustments to end on required 4 byte boundary */
11682 odd_len = 1;
11683 len = (len + 3) & ~3;
11684 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
11685 if (ret)
11686 return ret;
11687 }
11688
11689 buf = data;
11690 if (b_offset || odd_len) {
11691 buf = kmalloc(len, GFP_KERNEL);
11692 if (!buf)
11693 return -ENOMEM;
11694 if (b_offset)
11695 memcpy(buf, &start, 4);
11696 if (odd_len)
11697 memcpy(buf+len-4, &end, 4);
11698 memcpy(buf + b_offset, data, eeprom->len);
11699 }
11700
11701 ret = tg3_nvram_write_block(tp, offset, len, buf);
11702
11703 if (buf != data)
11704 kfree(buf);
11705
11706 return ret;
11707 }
11708
11709 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11710 {
11711 struct tg3 *tp = netdev_priv(dev);
11712
11713 if (tg3_flag(tp, USE_PHYLIB)) {
11714 struct phy_device *phydev;
11715 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11716 return -EAGAIN;
11717 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11718 return phy_ethtool_gset(phydev, cmd);
11719 }
11720
11721 cmd->supported = (SUPPORTED_Autoneg);
11722
11723 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
11724 cmd->supported |= (SUPPORTED_1000baseT_Half |
11725 SUPPORTED_1000baseT_Full);
11726
11727 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11728 cmd->supported |= (SUPPORTED_100baseT_Half |
11729 SUPPORTED_100baseT_Full |
11730 SUPPORTED_10baseT_Half |
11731 SUPPORTED_10baseT_Full |
11732 SUPPORTED_TP);
11733 cmd->port = PORT_TP;
11734 } else {
11735 cmd->supported |= SUPPORTED_FIBRE;
11736 cmd->port = PORT_FIBRE;
11737 }
11738
11739 cmd->advertising = tp->link_config.advertising;
11740 if (tg3_flag(tp, PAUSE_AUTONEG)) {
11741 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
11742 if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11743 cmd->advertising |= ADVERTISED_Pause;
11744 } else {
11745 cmd->advertising |= ADVERTISED_Pause |
11746 ADVERTISED_Asym_Pause;
11747 }
11748 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11749 cmd->advertising |= ADVERTISED_Asym_Pause;
11750 }
11751 }
11752 if (netif_running(dev) && tp->link_up) {
11753 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
11754 cmd->duplex = tp->link_config.active_duplex;
11755 cmd->lp_advertising = tp->link_config.rmt_adv;
11756 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11757 if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
11758 cmd->eth_tp_mdix = ETH_TP_MDI_X;
11759 else
11760 cmd->eth_tp_mdix = ETH_TP_MDI;
11761 }
11762 } else {
11763 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
11764 cmd->duplex = DUPLEX_UNKNOWN;
11765 cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
11766 }
11767 cmd->phy_address = tp->phy_addr;
11768 cmd->transceiver = XCVR_INTERNAL;
11769 cmd->autoneg = tp->link_config.autoneg;
11770 cmd->maxtxpkt = 0;
11771 cmd->maxrxpkt = 0;
11772 return 0;
11773 }
11774
11775 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11776 {
11777 struct tg3 *tp = netdev_priv(dev);
11778 u32 speed = ethtool_cmd_speed(cmd);
11779
11780 if (tg3_flag(tp, USE_PHYLIB)) {
11781 struct phy_device *phydev;
11782 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11783 return -EAGAIN;
11784 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11785 return phy_ethtool_sset(phydev, cmd);
11786 }
11787
11788 if (cmd->autoneg != AUTONEG_ENABLE &&
11789 cmd->autoneg != AUTONEG_DISABLE)
11790 return -EINVAL;
11791
11792 if (cmd->autoneg == AUTONEG_DISABLE &&
11793 cmd->duplex != DUPLEX_FULL &&
11794 cmd->duplex != DUPLEX_HALF)
11795 return -EINVAL;
11796
11797 if (cmd->autoneg == AUTONEG_ENABLE) {
11798 u32 mask = ADVERTISED_Autoneg |
11799 ADVERTISED_Pause |
11800 ADVERTISED_Asym_Pause;
11801
11802 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
11803 mask |= ADVERTISED_1000baseT_Half |
11804 ADVERTISED_1000baseT_Full;
11805
11806 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
11807 mask |= ADVERTISED_100baseT_Half |
11808 ADVERTISED_100baseT_Full |
11809 ADVERTISED_10baseT_Half |
11810 ADVERTISED_10baseT_Full |
11811 ADVERTISED_TP;
11812 else
11813 mask |= ADVERTISED_FIBRE;
11814
11815 if (cmd->advertising & ~mask)
11816 return -EINVAL;
11817
11818 mask &= (ADVERTISED_1000baseT_Half |
11819 ADVERTISED_1000baseT_Full |
11820 ADVERTISED_100baseT_Half |
11821 ADVERTISED_100baseT_Full |
11822 ADVERTISED_10baseT_Half |
11823 ADVERTISED_10baseT_Full);
11824
11825 cmd->advertising &= mask;
11826 } else {
11827 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
11828 if (speed != SPEED_1000)
11829 return -EINVAL;
11830
11831 if (cmd->duplex != DUPLEX_FULL)
11832 return -EINVAL;
11833 } else {
11834 if (speed != SPEED_100 &&
11835 speed != SPEED_10)
11836 return -EINVAL;
11837 }
11838 }
11839
11840 tg3_full_lock(tp, 0);
11841
11842 tp->link_config.autoneg = cmd->autoneg;
11843 if (cmd->autoneg == AUTONEG_ENABLE) {
11844 tp->link_config.advertising = (cmd->advertising |
11845 ADVERTISED_Autoneg);
11846 tp->link_config.speed = SPEED_UNKNOWN;
11847 tp->link_config.duplex = DUPLEX_UNKNOWN;
11848 } else {
11849 tp->link_config.advertising = 0;
11850 tp->link_config.speed = speed;
11851 tp->link_config.duplex = cmd->duplex;
11852 }
11853
11854 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
11855
11856 tg3_warn_mgmt_link_flap(tp);
11857
11858 if (netif_running(dev))
11859 tg3_setup_phy(tp, true);
11860
11861 tg3_full_unlock(tp);
11862
11863 return 0;
11864 }
11865
11866 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
11867 {
11868 struct tg3 *tp = netdev_priv(dev);
11869
11870 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
11871 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
11872 strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
11873 strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
11874 }
11875
11876 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11877 {
11878 struct tg3 *tp = netdev_priv(dev);
11879
11880 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
11881 wol->supported = WAKE_MAGIC;
11882 else
11883 wol->supported = 0;
11884 wol->wolopts = 0;
11885 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
11886 wol->wolopts = WAKE_MAGIC;
11887 memset(&wol->sopass, 0, sizeof(wol->sopass));
11888 }
11889
11890 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11891 {
11892 struct tg3 *tp = netdev_priv(dev);
11893 struct device *dp = &tp->pdev->dev;
11894
11895 if (wol->wolopts & ~WAKE_MAGIC)
11896 return -EINVAL;
11897 if ((wol->wolopts & WAKE_MAGIC) &&
11898 !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
11899 return -EINVAL;
11900
11901 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
11902
11903 spin_lock_bh(&tp->lock);
11904 if (device_may_wakeup(dp))
11905 tg3_flag_set(tp, WOL_ENABLE);
11906 else
11907 tg3_flag_clear(tp, WOL_ENABLE);
11908 spin_unlock_bh(&tp->lock);
11909
11910 return 0;
11911 }
11912
11913 static u32 tg3_get_msglevel(struct net_device *dev)
11914 {
11915 struct tg3 *tp = netdev_priv(dev);
11916 return tp->msg_enable;
11917 }
11918
11919 static void tg3_set_msglevel(struct net_device *dev, u32 value)
11920 {
11921 struct tg3 *tp = netdev_priv(dev);
11922 tp->msg_enable = value;
11923 }
11924
11925 static int tg3_nway_reset(struct net_device *dev)
11926 {
11927 struct tg3 *tp = netdev_priv(dev);
11928 int r;
11929
11930 if (!netif_running(dev))
11931 return -EAGAIN;
11932
11933 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11934 return -EINVAL;
11935
11936 tg3_warn_mgmt_link_flap(tp);
11937
11938 if (tg3_flag(tp, USE_PHYLIB)) {
11939 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11940 return -EAGAIN;
11941 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
11942 } else {
11943 u32 bmcr;
11944
11945 spin_lock_bh(&tp->lock);
11946 r = -EINVAL;
11947 tg3_readphy(tp, MII_BMCR, &bmcr);
11948 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
11949 ((bmcr & BMCR_ANENABLE) ||
11950 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
11951 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
11952 BMCR_ANENABLE);
11953 r = 0;
11954 }
11955 spin_unlock_bh(&tp->lock);
11956 }
11957
11958 return r;
11959 }
11960
11961 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
11962 {
11963 struct tg3 *tp = netdev_priv(dev);
11964
11965 ering->rx_max_pending = tp->rx_std_ring_mask;
11966 if (tg3_flag(tp, JUMBO_RING_ENABLE))
11967 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
11968 else
11969 ering->rx_jumbo_max_pending = 0;
11970
11971 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
11972
11973 ering->rx_pending = tp->rx_pending;
11974 if (tg3_flag(tp, JUMBO_RING_ENABLE))
11975 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
11976 else
11977 ering->rx_jumbo_pending = 0;
11978
11979 ering->tx_pending = tp->napi[0].tx_pending;
11980 }
11981
11982 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
11983 {
11984 struct tg3 *tp = netdev_priv(dev);
11985 int i, irq_sync = 0, err = 0;
11986
11987 if ((ering->rx_pending > tp->rx_std_ring_mask) ||
11988 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
11989 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
11990 (ering->tx_pending <= MAX_SKB_FRAGS) ||
11991 (tg3_flag(tp, TSO_BUG) &&
11992 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
11993 return -EINVAL;
11994
11995 if (netif_running(dev)) {
11996 tg3_phy_stop(tp);
11997 tg3_netif_stop(tp);
11998 irq_sync = 1;
11999 }
12000
12001 tg3_full_lock(tp, irq_sync);
12002
12003 tp->rx_pending = ering->rx_pending;
12004
12005 if (tg3_flag(tp, MAX_RXPEND_64) &&
12006 tp->rx_pending > 63)
12007 tp->rx_pending = 63;
12008 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
12009
12010 for (i = 0; i < tp->irq_max; i++)
12011 tp->napi[i].tx_pending = ering->tx_pending;
12012
12013 if (netif_running(dev)) {
12014 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12015 err = tg3_restart_hw(tp, false);
12016 if (!err)
12017 tg3_netif_start(tp);
12018 }
12019
12020 tg3_full_unlock(tp);
12021
12022 if (irq_sync && !err)
12023 tg3_phy_start(tp);
12024
12025 return err;
12026 }
12027
12028 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12029 {
12030 struct tg3 *tp = netdev_priv(dev);
12031
12032 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
12033
12034 if (tp->link_config.flowctrl & FLOW_CTRL_RX)
12035 epause->rx_pause = 1;
12036 else
12037 epause->rx_pause = 0;
12038
12039 if (tp->link_config.flowctrl & FLOW_CTRL_TX)
12040 epause->tx_pause = 1;
12041 else
12042 epause->tx_pause = 0;
12043 }
12044
12045 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12046 {
12047 struct tg3 *tp = netdev_priv(dev);
12048 int err = 0;
12049
12050 if (tp->link_config.autoneg == AUTONEG_ENABLE)
12051 tg3_warn_mgmt_link_flap(tp);
12052
12053 if (tg3_flag(tp, USE_PHYLIB)) {
12054 u32 newadv;
12055 struct phy_device *phydev;
12056
12057 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
12058
12059 if (!(phydev->supported & SUPPORTED_Pause) ||
12060 (!(phydev->supported & SUPPORTED_Asym_Pause) &&
12061 (epause->rx_pause != epause->tx_pause)))
12062 return -EINVAL;
12063
12064 tp->link_config.flowctrl = 0;
12065 if (epause->rx_pause) {
12066 tp->link_config.flowctrl |= FLOW_CTRL_RX;
12067
12068 if (epause->tx_pause) {
12069 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12070 newadv = ADVERTISED_Pause;
12071 } else
12072 newadv = ADVERTISED_Pause |
12073 ADVERTISED_Asym_Pause;
12074 } else if (epause->tx_pause) {
12075 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12076 newadv = ADVERTISED_Asym_Pause;
12077 } else
12078 newadv = 0;
12079
12080 if (epause->autoneg)
12081 tg3_flag_set(tp, PAUSE_AUTONEG);
12082 else
12083 tg3_flag_clear(tp, PAUSE_AUTONEG);
12084
12085 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
12086 u32 oldadv = phydev->advertising &
12087 (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
12088 if (oldadv != newadv) {
12089 phydev->advertising &=
12090 ~(ADVERTISED_Pause |
12091 ADVERTISED_Asym_Pause);
12092 phydev->advertising |= newadv;
12093 if (phydev->autoneg) {
12094 /*
12095 * Always renegotiate the link to
12096 * inform our link partner of our
12097 * flow control settings, even if the
12098 * flow control is forced. Let
12099 * tg3_adjust_link() do the final
12100 * flow control setup.
12101 */
12102 return phy_start_aneg(phydev);
12103 }
12104 }
12105
12106 if (!epause->autoneg)
12107 tg3_setup_flow_control(tp, 0, 0);
12108 } else {
12109 tp->link_config.advertising &=
12110 ~(ADVERTISED_Pause |
12111 ADVERTISED_Asym_Pause);
12112 tp->link_config.advertising |= newadv;
12113 }
12114 } else {
12115 int irq_sync = 0;
12116
12117 if (netif_running(dev)) {
12118 tg3_netif_stop(tp);
12119 irq_sync = 1;
12120 }
12121
12122 tg3_full_lock(tp, irq_sync);
12123
12124 if (epause->autoneg)
12125 tg3_flag_set(tp, PAUSE_AUTONEG);
12126 else
12127 tg3_flag_clear(tp, PAUSE_AUTONEG);
12128 if (epause->rx_pause)
12129 tp->link_config.flowctrl |= FLOW_CTRL_RX;
12130 else
12131 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
12132 if (epause->tx_pause)
12133 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12134 else
12135 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
12136
12137 if (netif_running(dev)) {
12138 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12139 err = tg3_restart_hw(tp, false);
12140 if (!err)
12141 tg3_netif_start(tp);
12142 }
12143
12144 tg3_full_unlock(tp);
12145 }
12146
12147 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12148
12149 return err;
12150 }
12151
12152 static int tg3_get_sset_count(struct net_device *dev, int sset)
12153 {
12154 switch (sset) {
12155 case ETH_SS_TEST:
12156 return TG3_NUM_TEST;
12157 case ETH_SS_STATS:
12158 return TG3_NUM_STATS;
12159 default:
12160 return -EOPNOTSUPP;
12161 }
12162 }
12163
12164 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
12165 u32 *rules __always_unused)
12166 {
12167 struct tg3 *tp = netdev_priv(dev);
12168
12169 if (!tg3_flag(tp, SUPPORT_MSIX))
12170 return -EOPNOTSUPP;
12171
12172 switch (info->cmd) {
12173 case ETHTOOL_GRXRINGS:
12174 if (netif_running(tp->dev))
12175 info->data = tp->rxq_cnt;
12176 else {
12177 info->data = num_online_cpus();
12178 if (info->data > TG3_RSS_MAX_NUM_QS)
12179 info->data = TG3_RSS_MAX_NUM_QS;
12180 }
12181
12182 /* The first interrupt vector only
12183 * handles link interrupts.
12184 */
12185 info->data -= 1;
12186 return 0;
12187
12188 default:
12189 return -EOPNOTSUPP;
12190 }
12191 }
12192
12193 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
12194 {
12195 u32 size = 0;
12196 struct tg3 *tp = netdev_priv(dev);
12197
12198 if (tg3_flag(tp, SUPPORT_MSIX))
12199 size = TG3_RSS_INDIR_TBL_SIZE;
12200
12201 return size;
12202 }
12203
12204 static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
12205 {
12206 struct tg3 *tp = netdev_priv(dev);
12207 int i;
12208
12209 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12210 indir[i] = tp->rss_ind_tbl[i];
12211
12212 return 0;
12213 }
12214
12215 static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
12216 {
12217 struct tg3 *tp = netdev_priv(dev);
12218 size_t i;
12219
12220 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12221 tp->rss_ind_tbl[i] = indir[i];
12222
12223 if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
12224 return 0;
12225
12226 /* It is legal to write the indirection
12227 * table while the device is running.
12228 */
12229 tg3_full_lock(tp, 0);
12230 tg3_rss_write_indir_tbl(tp);
12231 tg3_full_unlock(tp);
12232
12233 return 0;
12234 }
12235
12236 static void tg3_get_channels(struct net_device *dev,
12237 struct ethtool_channels *channel)
12238 {
12239 struct tg3 *tp = netdev_priv(dev);
12240 u32 deflt_qs = netif_get_num_default_rss_queues();
12241
12242 channel->max_rx = tp->rxq_max;
12243 channel->max_tx = tp->txq_max;
12244
12245 if (netif_running(dev)) {
12246 channel->rx_count = tp->rxq_cnt;
12247 channel->tx_count = tp->txq_cnt;
12248 } else {
12249 if (tp->rxq_req)
12250 channel->rx_count = tp->rxq_req;
12251 else
12252 channel->rx_count = min(deflt_qs, tp->rxq_max);
12253
12254 if (tp->txq_req)
12255 channel->tx_count = tp->txq_req;
12256 else
12257 channel->tx_count = min(deflt_qs, tp->txq_max);
12258 }
12259 }
12260
12261 static int tg3_set_channels(struct net_device *dev,
12262 struct ethtool_channels *channel)
12263 {
12264 struct tg3 *tp = netdev_priv(dev);
12265
12266 if (!tg3_flag(tp, SUPPORT_MSIX))
12267 return -EOPNOTSUPP;
12268
12269 if (channel->rx_count > tp->rxq_max ||
12270 channel->tx_count > tp->txq_max)
12271 return -EINVAL;
12272
12273 tp->rxq_req = channel->rx_count;
12274 tp->txq_req = channel->tx_count;
12275
12276 if (!netif_running(dev))
12277 return 0;
12278
12279 tg3_stop(tp);
12280
12281 tg3_carrier_off(tp);
12282
12283 tg3_start(tp, true, false, false);
12284
12285 return 0;
12286 }
12287
12288 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
12289 {
12290 switch (stringset) {
12291 case ETH_SS_STATS:
12292 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
12293 break;
12294 case ETH_SS_TEST:
12295 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
12296 break;
12297 default:
12298 WARN_ON(1); /* we need a WARN() */
12299 break;
12300 }
12301 }
12302
12303 static int tg3_set_phys_id(struct net_device *dev,
12304 enum ethtool_phys_id_state state)
12305 {
12306 struct tg3 *tp = netdev_priv(dev);
12307
12308 if (!netif_running(tp->dev))
12309 return -EAGAIN;
12310
12311 switch (state) {
12312 case ETHTOOL_ID_ACTIVE:
12313 return 1; /* cycle on/off once per second */
12314
12315 case ETHTOOL_ID_ON:
12316 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12317 LED_CTRL_1000MBPS_ON |
12318 LED_CTRL_100MBPS_ON |
12319 LED_CTRL_10MBPS_ON |
12320 LED_CTRL_TRAFFIC_OVERRIDE |
12321 LED_CTRL_TRAFFIC_BLINK |
12322 LED_CTRL_TRAFFIC_LED);
12323 break;
12324
12325 case ETHTOOL_ID_OFF:
12326 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12327 LED_CTRL_TRAFFIC_OVERRIDE);
12328 break;
12329
12330 case ETHTOOL_ID_INACTIVE:
12331 tw32(MAC_LED_CTRL, tp->led_ctrl);
12332 break;
12333 }
12334
12335 return 0;
12336 }
12337
12338 static void tg3_get_ethtool_stats(struct net_device *dev,
12339 struct ethtool_stats *estats, u64 *tmp_stats)
12340 {
12341 struct tg3 *tp = netdev_priv(dev);
12342
12343 if (tp->hw_stats)
12344 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
12345 else
12346 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
12347 }
12348
12349 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
12350 {
12351 int i;
12352 __be32 *buf;
12353 u32 offset = 0, len = 0;
12354 u32 magic, val;
12355
12356 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
12357 return NULL;
12358
12359 if (magic == TG3_EEPROM_MAGIC) {
12360 for (offset = TG3_NVM_DIR_START;
12361 offset < TG3_NVM_DIR_END;
12362 offset += TG3_NVM_DIRENT_SIZE) {
12363 if (tg3_nvram_read(tp, offset, &val))
12364 return NULL;
12365
12366 if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
12367 TG3_NVM_DIRTYPE_EXTVPD)
12368 break;
12369 }
12370
12371 if (offset != TG3_NVM_DIR_END) {
12372 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
12373 if (tg3_nvram_read(tp, offset + 4, &offset))
12374 return NULL;
12375
12376 offset = tg3_nvram_logical_addr(tp, offset);
12377 }
12378 }
12379
12380 if (!offset || !len) {
12381 offset = TG3_NVM_VPD_OFF;
12382 len = TG3_NVM_VPD_LEN;
12383 }
12384
12385 buf = kmalloc(len, GFP_KERNEL);
12386 if (buf == NULL)
12387 return NULL;
12388
12389 if (magic == TG3_EEPROM_MAGIC) {
12390 for (i = 0; i < len; i += 4) {
12391 /* The data is in little-endian format in NVRAM.
12392 * Use the big-endian read routines to preserve
12393 * the byte order as it exists in NVRAM.
12394 */
12395 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
12396 goto error;
12397 }
12398 } else {
12399 u8 *ptr;
12400 ssize_t cnt;
12401 unsigned int pos = 0;
12402
12403 ptr = (u8 *)&buf[0];
12404 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
12405 cnt = pci_read_vpd(tp->pdev, pos,
12406 len - pos, ptr);
12407 if (cnt == -ETIMEDOUT || cnt == -EINTR)
12408 cnt = 0;
12409 else if (cnt < 0)
12410 goto error;
12411 }
12412 if (pos != len)
12413 goto error;
12414 }
12415
12416 *vpdlen = len;
12417
12418 return buf;
12419
12420 error:
12421 kfree(buf);
12422 return NULL;
12423 }
12424
12425 #define NVRAM_TEST_SIZE 0x100
12426 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
12427 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
12428 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
12429 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
12430 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
12431 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
12432 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12433 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12434
12435 static int tg3_test_nvram(struct tg3 *tp)
12436 {
12437 u32 csum, magic, len;
12438 __be32 *buf;
12439 int i, j, k, err = 0, size;
12440
12441 if (tg3_flag(tp, NO_NVRAM))
12442 return 0;
12443
12444 if (tg3_nvram_read(tp, 0, &magic) != 0)
12445 return -EIO;
12446
12447 if (magic == TG3_EEPROM_MAGIC)
12448 size = NVRAM_TEST_SIZE;
12449 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
12450 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
12451 TG3_EEPROM_SB_FORMAT_1) {
12452 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
12453 case TG3_EEPROM_SB_REVISION_0:
12454 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
12455 break;
12456 case TG3_EEPROM_SB_REVISION_2:
12457 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
12458 break;
12459 case TG3_EEPROM_SB_REVISION_3:
12460 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
12461 break;
12462 case TG3_EEPROM_SB_REVISION_4:
12463 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
12464 break;
12465 case TG3_EEPROM_SB_REVISION_5:
12466 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
12467 break;
12468 case TG3_EEPROM_SB_REVISION_6:
12469 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
12470 break;
12471 default:
12472 return -EIO;
12473 }
12474 } else
12475 return 0;
12476 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12477 size = NVRAM_SELFBOOT_HW_SIZE;
12478 else
12479 return -EIO;
12480
12481 buf = kmalloc(size, GFP_KERNEL);
12482 if (buf == NULL)
12483 return -ENOMEM;
12484
12485 err = -EIO;
12486 for (i = 0, j = 0; i < size; i += 4, j++) {
12487 err = tg3_nvram_read_be32(tp, i, &buf[j]);
12488 if (err)
12489 break;
12490 }
12491 if (i < size)
12492 goto out;
12493
12494 /* Selfboot format */
12495 magic = be32_to_cpu(buf[0]);
12496 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
12497 TG3_EEPROM_MAGIC_FW) {
12498 u8 *buf8 = (u8 *) buf, csum8 = 0;
12499
12500 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
12501 TG3_EEPROM_SB_REVISION_2) {
12502 /* For rev 2, the csum doesn't include the MBA. */
12503 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
12504 csum8 += buf8[i];
12505 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
12506 csum8 += buf8[i];
12507 } else {
12508 for (i = 0; i < size; i++)
12509 csum8 += buf8[i];
12510 }
12511
12512 if (csum8 == 0) {
12513 err = 0;
12514 goto out;
12515 }
12516
12517 err = -EIO;
12518 goto out;
12519 }
12520
12521 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
12522 TG3_EEPROM_MAGIC_HW) {
12523 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
12524 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
12525 u8 *buf8 = (u8 *) buf;
12526
12527 /* Separate the parity bits and the data bytes. */
12528 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
12529 if ((i == 0) || (i == 8)) {
12530 int l;
12531 u8 msk;
12532
12533 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
12534 parity[k++] = buf8[i] & msk;
12535 i++;
12536 } else if (i == 16) {
12537 int l;
12538 u8 msk;
12539
12540 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
12541 parity[k++] = buf8[i] & msk;
12542 i++;
12543
12544 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
12545 parity[k++] = buf8[i] & msk;
12546 i++;
12547 }
12548 data[j++] = buf8[i];
12549 }
12550
12551 err = -EIO;
12552 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
12553 u8 hw8 = hweight8(data[i]);
12554
12555 if ((hw8 & 0x1) && parity[i])
12556 goto out;
12557 else if (!(hw8 & 0x1) && !parity[i])
12558 goto out;
12559 }
12560 err = 0;
12561 goto out;
12562 }
12563
12564 err = -EIO;
12565
12566 /* Bootstrap checksum at offset 0x10 */
12567 csum = calc_crc((unsigned char *) buf, 0x10);
12568 if (csum != le32_to_cpu(buf[0x10/4]))
12569 goto out;
12570
12571 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
12572 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
12573 if (csum != le32_to_cpu(buf[0xfc/4]))
12574 goto out;
12575
12576 kfree(buf);
12577
12578 buf = tg3_vpd_readblock(tp, &len);
12579 if (!buf)
12580 return -ENOMEM;
12581
12582 i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
12583 if (i > 0) {
12584 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
12585 if (j < 0)
12586 goto out;
12587
12588 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
12589 goto out;
12590
12591 i += PCI_VPD_LRDT_TAG_SIZE;
12592 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
12593 PCI_VPD_RO_KEYWORD_CHKSUM);
12594 if (j > 0) {
12595 u8 csum8 = 0;
12596
12597 j += PCI_VPD_INFO_FLD_HDR_SIZE;
12598
12599 for (i = 0; i <= j; i++)
12600 csum8 += ((u8 *)buf)[i];
12601
12602 if (csum8)
12603 goto out;
12604 }
12605 }
12606
12607 err = 0;
12608
12609 out:
12610 kfree(buf);
12611 return err;
12612 }
12613
12614 #define TG3_SERDES_TIMEOUT_SEC 2
12615 #define TG3_COPPER_TIMEOUT_SEC 6
12616
12617 static int tg3_test_link(struct tg3 *tp)
12618 {
12619 int i, max;
12620
12621 if (!netif_running(tp->dev))
12622 return -ENODEV;
12623
12624 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
12625 max = TG3_SERDES_TIMEOUT_SEC;
12626 else
12627 max = TG3_COPPER_TIMEOUT_SEC;
12628
12629 for (i = 0; i < max; i++) {
12630 if (tp->link_up)
12631 return 0;
12632
12633 if (msleep_interruptible(1000))
12634 break;
12635 }
12636
12637 return -EIO;
12638 }
12639
12640 /* Only test the commonly used registers */
12641 static int tg3_test_registers(struct tg3 *tp)
12642 {
12643 int i, is_5705, is_5750;
12644 u32 offset, read_mask, write_mask, val, save_val, read_val;
12645 static struct {
12646 u16 offset;
12647 u16 flags;
12648 #define TG3_FL_5705 0x1
12649 #define TG3_FL_NOT_5705 0x2
12650 #define TG3_FL_NOT_5788 0x4
12651 #define TG3_FL_NOT_5750 0x8
12652 u32 read_mask;
12653 u32 write_mask;
12654 } reg_tbl[] = {
12655 /* MAC Control Registers */
12656 { MAC_MODE, TG3_FL_NOT_5705,
12657 0x00000000, 0x00ef6f8c },
12658 { MAC_MODE, TG3_FL_5705,
12659 0x00000000, 0x01ef6b8c },
12660 { MAC_STATUS, TG3_FL_NOT_5705,
12661 0x03800107, 0x00000000 },
12662 { MAC_STATUS, TG3_FL_5705,
12663 0x03800100, 0x00000000 },
12664 { MAC_ADDR_0_HIGH, 0x0000,
12665 0x00000000, 0x0000ffff },
12666 { MAC_ADDR_0_LOW, 0x0000,
12667 0x00000000, 0xffffffff },
12668 { MAC_RX_MTU_SIZE, 0x0000,
12669 0x00000000, 0x0000ffff },
12670 { MAC_TX_MODE, 0x0000,
12671 0x00000000, 0x00000070 },
12672 { MAC_TX_LENGTHS, 0x0000,
12673 0x00000000, 0x00003fff },
12674 { MAC_RX_MODE, TG3_FL_NOT_5705,
12675 0x00000000, 0x000007fc },
12676 { MAC_RX_MODE, TG3_FL_5705,
12677 0x00000000, 0x000007dc },
12678 { MAC_HASH_REG_0, 0x0000,
12679 0x00000000, 0xffffffff },
12680 { MAC_HASH_REG_1, 0x0000,
12681 0x00000000, 0xffffffff },
12682 { MAC_HASH_REG_2, 0x0000,
12683 0x00000000, 0xffffffff },
12684 { MAC_HASH_REG_3, 0x0000,
12685 0x00000000, 0xffffffff },
12686
12687 /* Receive Data and Receive BD Initiator Control Registers. */
12688 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
12689 0x00000000, 0xffffffff },
12690 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
12691 0x00000000, 0xffffffff },
12692 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
12693 0x00000000, 0x00000003 },
12694 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
12695 0x00000000, 0xffffffff },
12696 { RCVDBDI_STD_BD+0, 0x0000,
12697 0x00000000, 0xffffffff },
12698 { RCVDBDI_STD_BD+4, 0x0000,
12699 0x00000000, 0xffffffff },
12700 { RCVDBDI_STD_BD+8, 0x0000,
12701 0x00000000, 0xffff0002 },
12702 { RCVDBDI_STD_BD+0xc, 0x0000,
12703 0x00000000, 0xffffffff },
12704
12705 /* Receive BD Initiator Control Registers. */
12706 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
12707 0x00000000, 0xffffffff },
12708 { RCVBDI_STD_THRESH, TG3_FL_5705,
12709 0x00000000, 0x000003ff },
12710 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
12711 0x00000000, 0xffffffff },
12712
12713 /* Host Coalescing Control Registers. */
12714 { HOSTCC_MODE, TG3_FL_NOT_5705,
12715 0x00000000, 0x00000004 },
12716 { HOSTCC_MODE, TG3_FL_5705,
12717 0x00000000, 0x000000f6 },
12718 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
12719 0x00000000, 0xffffffff },
12720 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
12721 0x00000000, 0x000003ff },
12722 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
12723 0x00000000, 0xffffffff },
12724 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
12725 0x00000000, 0x000003ff },
12726 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
12727 0x00000000, 0xffffffff },
12728 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12729 0x00000000, 0x000000ff },
12730 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
12731 0x00000000, 0xffffffff },
12732 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12733 0x00000000, 0x000000ff },
12734 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
12735 0x00000000, 0xffffffff },
12736 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
12737 0x00000000, 0xffffffff },
12738 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12739 0x00000000, 0xffffffff },
12740 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12741 0x00000000, 0x000000ff },
12742 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12743 0x00000000, 0xffffffff },
12744 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12745 0x00000000, 0x000000ff },
12746 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
12747 0x00000000, 0xffffffff },
12748 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
12749 0x00000000, 0xffffffff },
12750 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
12751 0x00000000, 0xffffffff },
12752 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
12753 0x00000000, 0xffffffff },
12754 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
12755 0x00000000, 0xffffffff },
12756 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
12757 0xffffffff, 0x00000000 },
12758 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
12759 0xffffffff, 0x00000000 },
12760
12761 /* Buffer Manager Control Registers. */
12762 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
12763 0x00000000, 0x007fff80 },
12764 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
12765 0x00000000, 0x007fffff },
12766 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
12767 0x00000000, 0x0000003f },
12768 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
12769 0x00000000, 0x000001ff },
12770 { BUFMGR_MB_HIGH_WATER, 0x0000,
12771 0x00000000, 0x000001ff },
12772 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
12773 0xffffffff, 0x00000000 },
12774 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
12775 0xffffffff, 0x00000000 },
12776
12777 /* Mailbox Registers */
12778 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
12779 0x00000000, 0x000001ff },
12780 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
12781 0x00000000, 0x000001ff },
12782 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
12783 0x00000000, 0x000007ff },
12784 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
12785 0x00000000, 0x000001ff },
12786
12787 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
12788 };
12789
12790 is_5705 = is_5750 = 0;
12791 if (tg3_flag(tp, 5705_PLUS)) {
12792 is_5705 = 1;
12793 if (tg3_flag(tp, 5750_PLUS))
12794 is_5750 = 1;
12795 }
12796
12797 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
12798 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
12799 continue;
12800
12801 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
12802 continue;
12803
12804 if (tg3_flag(tp, IS_5788) &&
12805 (reg_tbl[i].flags & TG3_FL_NOT_5788))
12806 continue;
12807
12808 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
12809 continue;
12810
12811 offset = (u32) reg_tbl[i].offset;
12812 read_mask = reg_tbl[i].read_mask;
12813 write_mask = reg_tbl[i].write_mask;
12814
12815 /* Save the original register content */
12816 save_val = tr32(offset);
12817
12818 /* Determine the read-only value. */
12819 read_val = save_val & read_mask;
12820
12821 /* Write zero to the register, then make sure the read-only bits
12822 * are not changed and the read/write bits are all zeros.
12823 */
12824 tw32(offset, 0);
12825
12826 val = tr32(offset);
12827
12828 /* Test the read-only and read/write bits. */
12829 if (((val & read_mask) != read_val) || (val & write_mask))
12830 goto out;
12831
12832 /* Write ones to all the bits defined by RdMask and WrMask, then
12833 * make sure the read-only bits are not changed and the
12834 * read/write bits are all ones.
12835 */
12836 tw32(offset, read_mask | write_mask);
12837
12838 val = tr32(offset);
12839
12840 /* Test the read-only bits. */
12841 if ((val & read_mask) != read_val)
12842 goto out;
12843
12844 /* Test the read/write bits. */
12845 if ((val & write_mask) != write_mask)
12846 goto out;
12847
12848 tw32(offset, save_val);
12849 }
12850
12851 return 0;
12852
12853 out:
12854 if (netif_msg_hw(tp))
12855 netdev_err(tp->dev,
12856 "Register test failed at offset %x\n", offset);
12857 tw32(offset, save_val);
12858 return -EIO;
12859 }
12860
12861 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
12862 {
12863 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
12864 int i;
12865 u32 j;
12866
12867 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
12868 for (j = 0; j < len; j += 4) {
12869 u32 val;
12870
12871 tg3_write_mem(tp, offset + j, test_pattern[i]);
12872 tg3_read_mem(tp, offset + j, &val);
12873 if (val != test_pattern[i])
12874 return -EIO;
12875 }
12876 }
12877 return 0;
12878 }
12879
12880 static int tg3_test_memory(struct tg3 *tp)
12881 {
12882 static struct mem_entry {
12883 u32 offset;
12884 u32 len;
12885 } mem_tbl_570x[] = {
12886 { 0x00000000, 0x00b50},
12887 { 0x00002000, 0x1c000},
12888 { 0xffffffff, 0x00000}
12889 }, mem_tbl_5705[] = {
12890 { 0x00000100, 0x0000c},
12891 { 0x00000200, 0x00008},
12892 { 0x00004000, 0x00800},
12893 { 0x00006000, 0x01000},
12894 { 0x00008000, 0x02000},
12895 { 0x00010000, 0x0e000},
12896 { 0xffffffff, 0x00000}
12897 }, mem_tbl_5755[] = {
12898 { 0x00000200, 0x00008},
12899 { 0x00004000, 0x00800},
12900 { 0x00006000, 0x00800},
12901 { 0x00008000, 0x02000},
12902 { 0x00010000, 0x0c000},
12903 { 0xffffffff, 0x00000}
12904 }, mem_tbl_5906[] = {
12905 { 0x00000200, 0x00008},
12906 { 0x00004000, 0x00400},
12907 { 0x00006000, 0x00400},
12908 { 0x00008000, 0x01000},
12909 { 0x00010000, 0x01000},
12910 { 0xffffffff, 0x00000}
12911 }, mem_tbl_5717[] = {
12912 { 0x00000200, 0x00008},
12913 { 0x00010000, 0x0a000},
12914 { 0x00020000, 0x13c00},
12915 { 0xffffffff, 0x00000}
12916 }, mem_tbl_57765[] = {
12917 { 0x00000200, 0x00008},
12918 { 0x00004000, 0x00800},
12919 { 0x00006000, 0x09800},
12920 { 0x00010000, 0x0a000},
12921 { 0xffffffff, 0x00000}
12922 };
12923 struct mem_entry *mem_tbl;
12924 int err = 0;
12925 int i;
12926
12927 if (tg3_flag(tp, 5717_PLUS))
12928 mem_tbl = mem_tbl_5717;
12929 else if (tg3_flag(tp, 57765_CLASS) ||
12930 tg3_asic_rev(tp) == ASIC_REV_5762)
12931 mem_tbl = mem_tbl_57765;
12932 else if (tg3_flag(tp, 5755_PLUS))
12933 mem_tbl = mem_tbl_5755;
12934 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
12935 mem_tbl = mem_tbl_5906;
12936 else if (tg3_flag(tp, 5705_PLUS))
12937 mem_tbl = mem_tbl_5705;
12938 else
12939 mem_tbl = mem_tbl_570x;
12940
12941 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
12942 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
12943 if (err)
12944 break;
12945 }
12946
12947 return err;
12948 }
12949
12950 #define TG3_TSO_MSS 500
12951
12952 #define TG3_TSO_IP_HDR_LEN 20
12953 #define TG3_TSO_TCP_HDR_LEN 20
12954 #define TG3_TSO_TCP_OPT_LEN 12
12955
12956 static const u8 tg3_tso_header[] = {
12957 0x08, 0x00,
12958 0x45, 0x00, 0x00, 0x00,
12959 0x00, 0x00, 0x40, 0x00,
12960 0x40, 0x06, 0x00, 0x00,
12961 0x0a, 0x00, 0x00, 0x01,
12962 0x0a, 0x00, 0x00, 0x02,
12963 0x0d, 0x00, 0xe0, 0x00,
12964 0x00, 0x00, 0x01, 0x00,
12965 0x00, 0x00, 0x02, 0x00,
12966 0x80, 0x10, 0x10, 0x00,
12967 0x14, 0x09, 0x00, 0x00,
12968 0x01, 0x01, 0x08, 0x0a,
12969 0x11, 0x11, 0x11, 0x11,
12970 0x11, 0x11, 0x11, 0x11,
12971 };
12972
12973 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
12974 {
12975 u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
12976 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
12977 u32 budget;
12978 struct sk_buff *skb;
12979 u8 *tx_data, *rx_data;
12980 dma_addr_t map;
12981 int num_pkts, tx_len, rx_len, i, err;
12982 struct tg3_rx_buffer_desc *desc;
12983 struct tg3_napi *tnapi, *rnapi;
12984 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
12985
12986 tnapi = &tp->napi[0];
12987 rnapi = &tp->napi[0];
12988 if (tp->irq_cnt > 1) {
12989 if (tg3_flag(tp, ENABLE_RSS))
12990 rnapi = &tp->napi[1];
12991 if (tg3_flag(tp, ENABLE_TSS))
12992 tnapi = &tp->napi[1];
12993 }
12994 coal_now = tnapi->coal_now | rnapi->coal_now;
12995
12996 err = -EIO;
12997
12998 tx_len = pktsz;
12999 skb = netdev_alloc_skb(tp->dev, tx_len);
13000 if (!skb)
13001 return -ENOMEM;
13002
13003 tx_data = skb_put(skb, tx_len);
13004 memcpy(tx_data, tp->dev->dev_addr, 6);
13005 memset(tx_data + 6, 0x0, 8);
13006
13007 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
13008
13009 if (tso_loopback) {
13010 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
13011
13012 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
13013 TG3_TSO_TCP_OPT_LEN;
13014
13015 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
13016 sizeof(tg3_tso_header));
13017 mss = TG3_TSO_MSS;
13018
13019 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
13020 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
13021
13022 /* Set the total length field in the IP header */
13023 iph->tot_len = htons((u16)(mss + hdr_len));
13024
13025 base_flags = (TXD_FLAG_CPU_PRE_DMA |
13026 TXD_FLAG_CPU_POST_DMA);
13027
13028 if (tg3_flag(tp, HW_TSO_1) ||
13029 tg3_flag(tp, HW_TSO_2) ||
13030 tg3_flag(tp, HW_TSO_3)) {
13031 struct tcphdr *th;
13032 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
13033 th = (struct tcphdr *)&tx_data[val];
13034 th->check = 0;
13035 } else
13036 base_flags |= TXD_FLAG_TCPUDP_CSUM;
13037
13038 if (tg3_flag(tp, HW_TSO_3)) {
13039 mss |= (hdr_len & 0xc) << 12;
13040 if (hdr_len & 0x10)
13041 base_flags |= 0x00000010;
13042 base_flags |= (hdr_len & 0x3e0) << 5;
13043 } else if (tg3_flag(tp, HW_TSO_2))
13044 mss |= hdr_len << 9;
13045 else if (tg3_flag(tp, HW_TSO_1) ||
13046 tg3_asic_rev(tp) == ASIC_REV_5705) {
13047 mss |= (TG3_TSO_TCP_OPT_LEN << 9);
13048 } else {
13049 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
13050 }
13051
13052 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
13053 } else {
13054 num_pkts = 1;
13055 data_off = ETH_HLEN;
13056
13057 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
13058 tx_len > VLAN_ETH_FRAME_LEN)
13059 base_flags |= TXD_FLAG_JMB_PKT;
13060 }
13061
13062 for (i = data_off; i < tx_len; i++)
13063 tx_data[i] = (u8) (i & 0xff);
13064
13065 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
13066 if (pci_dma_mapping_error(tp->pdev, map)) {
13067 dev_kfree_skb(skb);
13068 return -EIO;
13069 }
13070
13071 val = tnapi->tx_prod;
13072 tnapi->tx_buffers[val].skb = skb;
13073 dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
13074
13075 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13076 rnapi->coal_now);
13077
13078 udelay(10);
13079
13080 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
13081
13082 budget = tg3_tx_avail(tnapi);
13083 if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
13084 base_flags | TXD_FLAG_END, mss, 0)) {
13085 tnapi->tx_buffers[val].skb = NULL;
13086 dev_kfree_skb(skb);
13087 return -EIO;
13088 }
13089
13090 tnapi->tx_prod++;
13091
13092 /* Sync BD data before updating mailbox */
13093 wmb();
13094
13095 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
13096 tr32_mailbox(tnapi->prodmbox);
13097
13098 udelay(10);
13099
13100 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
13101 for (i = 0; i < 35; i++) {
13102 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13103 coal_now);
13104
13105 udelay(10);
13106
13107 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
13108 rx_idx = rnapi->hw_status->idx[0].rx_producer;
13109 if ((tx_idx == tnapi->tx_prod) &&
13110 (rx_idx == (rx_start_idx + num_pkts)))
13111 break;
13112 }
13113
13114 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
13115 dev_kfree_skb(skb);
13116
13117 if (tx_idx != tnapi->tx_prod)
13118 goto out;
13119
13120 if (rx_idx != rx_start_idx + num_pkts)
13121 goto out;
13122
13123 val = data_off;
13124 while (rx_idx != rx_start_idx) {
13125 desc = &rnapi->rx_rcb[rx_start_idx++];
13126 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
13127 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
13128
13129 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
13130 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
13131 goto out;
13132
13133 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
13134 - ETH_FCS_LEN;
13135
13136 if (!tso_loopback) {
13137 if (rx_len != tx_len)
13138 goto out;
13139
13140 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
13141 if (opaque_key != RXD_OPAQUE_RING_STD)
13142 goto out;
13143 } else {
13144 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
13145 goto out;
13146 }
13147 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
13148 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
13149 >> RXD_TCPCSUM_SHIFT != 0xffff) {
13150 goto out;
13151 }
13152
13153 if (opaque_key == RXD_OPAQUE_RING_STD) {
13154 rx_data = tpr->rx_std_buffers[desc_idx].data;
13155 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
13156 mapping);
13157 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
13158 rx_data = tpr->rx_jmb_buffers[desc_idx].data;
13159 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
13160 mapping);
13161 } else
13162 goto out;
13163
13164 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
13165 PCI_DMA_FROMDEVICE);
13166
13167 rx_data += TG3_RX_OFFSET(tp);
13168 for (i = data_off; i < rx_len; i++, val++) {
13169 if (*(rx_data + i) != (u8) (val & 0xff))
13170 goto out;
13171 }
13172 }
13173
13174 err = 0;
13175
13176 /* tg3_free_rings will unmap and free the rx_data */
13177 out:
13178 return err;
13179 }
13180
13181 #define TG3_STD_LOOPBACK_FAILED 1
13182 #define TG3_JMB_LOOPBACK_FAILED 2
13183 #define TG3_TSO_LOOPBACK_FAILED 4
13184 #define TG3_LOOPBACK_FAILED \
13185 (TG3_STD_LOOPBACK_FAILED | \
13186 TG3_JMB_LOOPBACK_FAILED | \
13187 TG3_TSO_LOOPBACK_FAILED)
13188
13189 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
13190 {
13191 int err = -EIO;
13192 u32 eee_cap;
13193 u32 jmb_pkt_sz = 9000;
13194
13195 if (tp->dma_limit)
13196 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
13197
13198 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
13199 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
13200
13201 if (!netif_running(tp->dev)) {
13202 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13203 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13204 if (do_extlpbk)
13205 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13206 goto done;
13207 }
13208
13209 err = tg3_reset_hw(tp, true);
13210 if (err) {
13211 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13212 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13213 if (do_extlpbk)
13214 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13215 goto done;
13216 }
13217
13218 if (tg3_flag(tp, ENABLE_RSS)) {
13219 int i;
13220
13221 /* Reroute all rx packets to the 1st queue */
13222 for (i = MAC_RSS_INDIR_TBL_0;
13223 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
13224 tw32(i, 0x0);
13225 }
13226
13227 /* HW errata - mac loopback fails in some cases on 5780.
13228 * Normal traffic and PHY loopback are not affected by
13229 * errata. Also, the MAC loopback test is deprecated for
13230 * all newer ASIC revisions.
13231 */
13232 if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
13233 !tg3_flag(tp, CPMU_PRESENT)) {
13234 tg3_mac_loopback(tp, true);
13235
13236 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13237 data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13238
13239 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13240 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13241 data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13242
13243 tg3_mac_loopback(tp, false);
13244 }
13245
13246 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
13247 !tg3_flag(tp, USE_PHYLIB)) {
13248 int i;
13249
13250 tg3_phy_lpbk_set(tp, 0, false);
13251
13252 /* Wait for link */
13253 for (i = 0; i < 100; i++) {
13254 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
13255 break;
13256 mdelay(1);
13257 }
13258
13259 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13260 data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13261 if (tg3_flag(tp, TSO_CAPABLE) &&
13262 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13263 data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
13264 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13265 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13266 data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13267
13268 if (do_extlpbk) {
13269 tg3_phy_lpbk_set(tp, 0, true);
13270
13271 /* All link indications report up, but the hardware
13272 * isn't really ready for about 20 msec. Double it
13273 * to be sure.
13274 */
13275 mdelay(40);
13276
13277 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13278 data[TG3_EXT_LOOPB_TEST] |=
13279 TG3_STD_LOOPBACK_FAILED;
13280 if (tg3_flag(tp, TSO_CAPABLE) &&
13281 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13282 data[TG3_EXT_LOOPB_TEST] |=
13283 TG3_TSO_LOOPBACK_FAILED;
13284 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13285 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13286 data[TG3_EXT_LOOPB_TEST] |=
13287 TG3_JMB_LOOPBACK_FAILED;
13288 }
13289
13290 /* Re-enable gphy autopowerdown. */
13291 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
13292 tg3_phy_toggle_apd(tp, true);
13293 }
13294
13295 err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
13296 data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
13297
13298 done:
13299 tp->phy_flags |= eee_cap;
13300
13301 return err;
13302 }
13303
13304 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
13305 u64 *data)
13306 {
13307 struct tg3 *tp = netdev_priv(dev);
13308 bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
13309
13310 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
13311 tg3_power_up(tp)) {
13312 etest->flags |= ETH_TEST_FL_FAILED;
13313 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
13314 return;
13315 }
13316
13317 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
13318
13319 if (tg3_test_nvram(tp) != 0) {
13320 etest->flags |= ETH_TEST_FL_FAILED;
13321 data[TG3_NVRAM_TEST] = 1;
13322 }
13323 if (!doextlpbk && tg3_test_link(tp)) {
13324 etest->flags |= ETH_TEST_FL_FAILED;
13325 data[TG3_LINK_TEST] = 1;
13326 }
13327 if (etest->flags & ETH_TEST_FL_OFFLINE) {
13328 int err, err2 = 0, irq_sync = 0;
13329
13330 if (netif_running(dev)) {
13331 tg3_phy_stop(tp);
13332 tg3_netif_stop(tp);
13333 irq_sync = 1;
13334 }
13335
13336 tg3_full_lock(tp, irq_sync);
13337 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
13338 err = tg3_nvram_lock(tp);
13339 tg3_halt_cpu(tp, RX_CPU_BASE);
13340 if (!tg3_flag(tp, 5705_PLUS))
13341 tg3_halt_cpu(tp, TX_CPU_BASE);
13342 if (!err)
13343 tg3_nvram_unlock(tp);
13344
13345 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
13346 tg3_phy_reset(tp);
13347
13348 if (tg3_test_registers(tp) != 0) {
13349 etest->flags |= ETH_TEST_FL_FAILED;
13350 data[TG3_REGISTER_TEST] = 1;
13351 }
13352
13353 if (tg3_test_memory(tp) != 0) {
13354 etest->flags |= ETH_TEST_FL_FAILED;
13355 data[TG3_MEMORY_TEST] = 1;
13356 }
13357
13358 if (doextlpbk)
13359 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
13360
13361 if (tg3_test_loopback(tp, data, doextlpbk))
13362 etest->flags |= ETH_TEST_FL_FAILED;
13363
13364 tg3_full_unlock(tp);
13365
13366 if (tg3_test_interrupt(tp) != 0) {
13367 etest->flags |= ETH_TEST_FL_FAILED;
13368 data[TG3_INTERRUPT_TEST] = 1;
13369 }
13370
13371 tg3_full_lock(tp, 0);
13372
13373 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13374 if (netif_running(dev)) {
13375 tg3_flag_set(tp, INIT_COMPLETE);
13376 err2 = tg3_restart_hw(tp, true);
13377 if (!err2)
13378 tg3_netif_start(tp);
13379 }
13380
13381 tg3_full_unlock(tp);
13382
13383 if (irq_sync && !err2)
13384 tg3_phy_start(tp);
13385 }
13386 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
13387 tg3_power_down(tp);
13388
13389 }
13390
13391 static int tg3_hwtstamp_ioctl(struct net_device *dev,
13392 struct ifreq *ifr, int cmd)
13393 {
13394 struct tg3 *tp = netdev_priv(dev);
13395 struct hwtstamp_config stmpconf;
13396
13397 if (!tg3_flag(tp, PTP_CAPABLE))
13398 return -EINVAL;
13399
13400 if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
13401 return -EFAULT;
13402
13403 if (stmpconf.flags)
13404 return -EINVAL;
13405
13406 switch (stmpconf.tx_type) {
13407 case HWTSTAMP_TX_ON:
13408 tg3_flag_set(tp, TX_TSTAMP_EN);
13409 break;
13410 case HWTSTAMP_TX_OFF:
13411 tg3_flag_clear(tp, TX_TSTAMP_EN);
13412 break;
13413 default:
13414 return -ERANGE;
13415 }
13416
13417 switch (stmpconf.rx_filter) {
13418 case HWTSTAMP_FILTER_NONE:
13419 tp->rxptpctl = 0;
13420 break;
13421 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
13422 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13423 TG3_RX_PTP_CTL_ALL_V1_EVENTS;
13424 break;
13425 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
13426 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13427 TG3_RX_PTP_CTL_SYNC_EVNT;
13428 break;
13429 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
13430 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13431 TG3_RX_PTP_CTL_DELAY_REQ;
13432 break;
13433 case HWTSTAMP_FILTER_PTP_V2_EVENT:
13434 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13435 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13436 break;
13437 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
13438 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13439 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13440 break;
13441 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
13442 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13443 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13444 break;
13445 case HWTSTAMP_FILTER_PTP_V2_SYNC:
13446 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13447 TG3_RX_PTP_CTL_SYNC_EVNT;
13448 break;
13449 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13450 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13451 TG3_RX_PTP_CTL_SYNC_EVNT;
13452 break;
13453 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13454 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13455 TG3_RX_PTP_CTL_SYNC_EVNT;
13456 break;
13457 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13458 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13459 TG3_RX_PTP_CTL_DELAY_REQ;
13460 break;
13461 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13462 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13463 TG3_RX_PTP_CTL_DELAY_REQ;
13464 break;
13465 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13466 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13467 TG3_RX_PTP_CTL_DELAY_REQ;
13468 break;
13469 default:
13470 return -ERANGE;
13471 }
13472
13473 if (netif_running(dev) && tp->rxptpctl)
13474 tw32(TG3_RX_PTP_CTL,
13475 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13476
13477 return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13478 -EFAULT : 0;
13479 }
13480
13481 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
13482 {
13483 struct mii_ioctl_data *data = if_mii(ifr);
13484 struct tg3 *tp = netdev_priv(dev);
13485 int err;
13486
13487 if (tg3_flag(tp, USE_PHYLIB)) {
13488 struct phy_device *phydev;
13489 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
13490 return -EAGAIN;
13491 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
13492 return phy_mii_ioctl(phydev, ifr, cmd);
13493 }
13494
13495 switch (cmd) {
13496 case SIOCGMIIPHY:
13497 data->phy_id = tp->phy_addr;
13498
13499 /* fallthru */
13500 case SIOCGMIIREG: {
13501 u32 mii_regval;
13502
13503 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13504 break; /* We have no PHY */
13505
13506 if (!netif_running(dev))
13507 return -EAGAIN;
13508
13509 spin_lock_bh(&tp->lock);
13510 err = __tg3_readphy(tp, data->phy_id & 0x1f,
13511 data->reg_num & 0x1f, &mii_regval);
13512 spin_unlock_bh(&tp->lock);
13513
13514 data->val_out = mii_regval;
13515
13516 return err;
13517 }
13518
13519 case SIOCSMIIREG:
13520 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13521 break; /* We have no PHY */
13522
13523 if (!netif_running(dev))
13524 return -EAGAIN;
13525
13526 spin_lock_bh(&tp->lock);
13527 err = __tg3_writephy(tp, data->phy_id & 0x1f,
13528 data->reg_num & 0x1f, data->val_in);
13529 spin_unlock_bh(&tp->lock);
13530
13531 return err;
13532
13533 case SIOCSHWTSTAMP:
13534 return tg3_hwtstamp_ioctl(dev, ifr, cmd);
13535
13536 default:
13537 /* do nothing */
13538 break;
13539 }
13540 return -EOPNOTSUPP;
13541 }
13542
13543 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13544 {
13545 struct tg3 *tp = netdev_priv(dev);
13546
13547 memcpy(ec, &tp->coal, sizeof(*ec));
13548 return 0;
13549 }
13550
13551 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13552 {
13553 struct tg3 *tp = netdev_priv(dev);
13554 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
13555 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
13556
13557 if (!tg3_flag(tp, 5705_PLUS)) {
13558 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
13559 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
13560 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
13561 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
13562 }
13563
13564 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
13565 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
13566 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
13567 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
13568 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
13569 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
13570 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
13571 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
13572 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
13573 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
13574 return -EINVAL;
13575
13576 /* No rx interrupts will be generated if both are zero */
13577 if ((ec->rx_coalesce_usecs == 0) &&
13578 (ec->rx_max_coalesced_frames == 0))
13579 return -EINVAL;
13580
13581 /* No tx interrupts will be generated if both are zero */
13582 if ((ec->tx_coalesce_usecs == 0) &&
13583 (ec->tx_max_coalesced_frames == 0))
13584 return -EINVAL;
13585
13586 /* Only copy relevant parameters, ignore all others. */
13587 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
13588 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
13589 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
13590 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
13591 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
13592 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
13593 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
13594 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
13595 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
13596
13597 if (netif_running(dev)) {
13598 tg3_full_lock(tp, 0);
13599 __tg3_set_coalesce(tp, &tp->coal);
13600 tg3_full_unlock(tp);
13601 }
13602 return 0;
13603 }
13604
13605 static const struct ethtool_ops tg3_ethtool_ops = {
13606 .get_settings = tg3_get_settings,
13607 .set_settings = tg3_set_settings,
13608 .get_drvinfo = tg3_get_drvinfo,
13609 .get_regs_len = tg3_get_regs_len,
13610 .get_regs = tg3_get_regs,
13611 .get_wol = tg3_get_wol,
13612 .set_wol = tg3_set_wol,
13613 .get_msglevel = tg3_get_msglevel,
13614 .set_msglevel = tg3_set_msglevel,
13615 .nway_reset = tg3_nway_reset,
13616 .get_link = ethtool_op_get_link,
13617 .get_eeprom_len = tg3_get_eeprom_len,
13618 .get_eeprom = tg3_get_eeprom,
13619 .set_eeprom = tg3_set_eeprom,
13620 .get_ringparam = tg3_get_ringparam,
13621 .set_ringparam = tg3_set_ringparam,
13622 .get_pauseparam = tg3_get_pauseparam,
13623 .set_pauseparam = tg3_set_pauseparam,
13624 .self_test = tg3_self_test,
13625 .get_strings = tg3_get_strings,
13626 .set_phys_id = tg3_set_phys_id,
13627 .get_ethtool_stats = tg3_get_ethtool_stats,
13628 .get_coalesce = tg3_get_coalesce,
13629 .set_coalesce = tg3_set_coalesce,
13630 .get_sset_count = tg3_get_sset_count,
13631 .get_rxnfc = tg3_get_rxnfc,
13632 .get_rxfh_indir_size = tg3_get_rxfh_indir_size,
13633 .get_rxfh_indir = tg3_get_rxfh_indir,
13634 .set_rxfh_indir = tg3_set_rxfh_indir,
13635 .get_channels = tg3_get_channels,
13636 .set_channels = tg3_set_channels,
13637 .get_ts_info = tg3_get_ts_info,
13638 };
13639
13640 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
13641 struct rtnl_link_stats64 *stats)
13642 {
13643 struct tg3 *tp = netdev_priv(dev);
13644
13645 spin_lock_bh(&tp->lock);
13646 if (!tp->hw_stats) {
13647 spin_unlock_bh(&tp->lock);
13648 return &tp->net_stats_prev;
13649 }
13650
13651 tg3_get_nstats(tp, stats);
13652 spin_unlock_bh(&tp->lock);
13653
13654 return stats;
13655 }
13656
13657 static void tg3_set_rx_mode(struct net_device *dev)
13658 {
13659 struct tg3 *tp = netdev_priv(dev);
13660
13661 if (!netif_running(dev))
13662 return;
13663
13664 tg3_full_lock(tp, 0);
13665 __tg3_set_rx_mode(dev);
13666 tg3_full_unlock(tp);
13667 }
13668
13669 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
13670 int new_mtu)
13671 {
13672 dev->mtu = new_mtu;
13673
13674 if (new_mtu > ETH_DATA_LEN) {
13675 if (tg3_flag(tp, 5780_CLASS)) {
13676 netdev_update_features(dev);
13677 tg3_flag_clear(tp, TSO_CAPABLE);
13678 } else {
13679 tg3_flag_set(tp, JUMBO_RING_ENABLE);
13680 }
13681 } else {
13682 if (tg3_flag(tp, 5780_CLASS)) {
13683 tg3_flag_set(tp, TSO_CAPABLE);
13684 netdev_update_features(dev);
13685 }
13686 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
13687 }
13688 }
13689
13690 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
13691 {
13692 struct tg3 *tp = netdev_priv(dev);
13693 int err;
13694 bool reset_phy = false;
13695
13696 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
13697 return -EINVAL;
13698
13699 if (!netif_running(dev)) {
13700 /* We'll just catch it later when the
13701 * device is up'd.
13702 */
13703 tg3_set_mtu(dev, tp, new_mtu);
13704 return 0;
13705 }
13706
13707 tg3_phy_stop(tp);
13708
13709 tg3_netif_stop(tp);
13710
13711 tg3_full_lock(tp, 1);
13712
13713 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13714
13715 tg3_set_mtu(dev, tp, new_mtu);
13716
13717 /* Reset PHY, otherwise the read DMA engine will be in a mode that
13718 * breaks all requests to 256 bytes.
13719 */
13720 if (tg3_asic_rev(tp) == ASIC_REV_57766)
13721 reset_phy = true;
13722
13723 err = tg3_restart_hw(tp, reset_phy);
13724
13725 if (!err)
13726 tg3_netif_start(tp);
13727
13728 tg3_full_unlock(tp);
13729
13730 if (!err)
13731 tg3_phy_start(tp);
13732
13733 return err;
13734 }
13735
13736 static const struct net_device_ops tg3_netdev_ops = {
13737 .ndo_open = tg3_open,
13738 .ndo_stop = tg3_close,
13739 .ndo_start_xmit = tg3_start_xmit,
13740 .ndo_get_stats64 = tg3_get_stats64,
13741 .ndo_validate_addr = eth_validate_addr,
13742 .ndo_set_rx_mode = tg3_set_rx_mode,
13743 .ndo_set_mac_address = tg3_set_mac_addr,
13744 .ndo_do_ioctl = tg3_ioctl,
13745 .ndo_tx_timeout = tg3_tx_timeout,
13746 .ndo_change_mtu = tg3_change_mtu,
13747 .ndo_fix_features = tg3_fix_features,
13748 .ndo_set_features = tg3_set_features,
13749 #ifdef CONFIG_NET_POLL_CONTROLLER
13750 .ndo_poll_controller = tg3_poll_controller,
13751 #endif
13752 };
13753
13754 static void tg3_get_eeprom_size(struct tg3 *tp)
13755 {
13756 u32 cursize, val, magic;
13757
13758 tp->nvram_size = EEPROM_CHIP_SIZE;
13759
13760 if (tg3_nvram_read(tp, 0, &magic) != 0)
13761 return;
13762
13763 if ((magic != TG3_EEPROM_MAGIC) &&
13764 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
13765 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
13766 return;
13767
13768 /*
13769 * Size the chip by reading offsets at increasing powers of two.
13770 * When we encounter our validation signature, we know the addressing
13771 * has wrapped around, and thus have our chip size.
13772 */
13773 cursize = 0x10;
13774
13775 while (cursize < tp->nvram_size) {
13776 if (tg3_nvram_read(tp, cursize, &val) != 0)
13777 return;
13778
13779 if (val == magic)
13780 break;
13781
13782 cursize <<= 1;
13783 }
13784
13785 tp->nvram_size = cursize;
13786 }
13787
13788 static void tg3_get_nvram_size(struct tg3 *tp)
13789 {
13790 u32 val;
13791
13792 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
13793 return;
13794
13795 /* Selfboot format */
13796 if (val != TG3_EEPROM_MAGIC) {
13797 tg3_get_eeprom_size(tp);
13798 return;
13799 }
13800
13801 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
13802 if (val != 0) {
13803 /* This is confusing. We want to operate on the
13804 * 16-bit value at offset 0xf2. The tg3_nvram_read()
13805 * call will read from NVRAM and byteswap the data
13806 * according to the byteswapping settings for all
13807 * other register accesses. This ensures the data we
13808 * want will always reside in the lower 16-bits.
13809 * However, the data in NVRAM is in LE format, which
13810 * means the data from the NVRAM read will always be
13811 * opposite the endianness of the CPU. The 16-bit
13812 * byteswap then brings the data to CPU endianness.
13813 */
13814 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
13815 return;
13816 }
13817 }
13818 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13819 }
13820
13821 static void tg3_get_nvram_info(struct tg3 *tp)
13822 {
13823 u32 nvcfg1;
13824
13825 nvcfg1 = tr32(NVRAM_CFG1);
13826 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
13827 tg3_flag_set(tp, FLASH);
13828 } else {
13829 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13830 tw32(NVRAM_CFG1, nvcfg1);
13831 }
13832
13833 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
13834 tg3_flag(tp, 5780_CLASS)) {
13835 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
13836 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
13837 tp->nvram_jedecnum = JEDEC_ATMEL;
13838 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
13839 tg3_flag_set(tp, NVRAM_BUFFERED);
13840 break;
13841 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
13842 tp->nvram_jedecnum = JEDEC_ATMEL;
13843 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
13844 break;
13845 case FLASH_VENDOR_ATMEL_EEPROM:
13846 tp->nvram_jedecnum = JEDEC_ATMEL;
13847 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13848 tg3_flag_set(tp, NVRAM_BUFFERED);
13849 break;
13850 case FLASH_VENDOR_ST:
13851 tp->nvram_jedecnum = JEDEC_ST;
13852 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
13853 tg3_flag_set(tp, NVRAM_BUFFERED);
13854 break;
13855 case FLASH_VENDOR_SAIFUN:
13856 tp->nvram_jedecnum = JEDEC_SAIFUN;
13857 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
13858 break;
13859 case FLASH_VENDOR_SST_SMALL:
13860 case FLASH_VENDOR_SST_LARGE:
13861 tp->nvram_jedecnum = JEDEC_SST;
13862 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
13863 break;
13864 }
13865 } else {
13866 tp->nvram_jedecnum = JEDEC_ATMEL;
13867 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
13868 tg3_flag_set(tp, NVRAM_BUFFERED);
13869 }
13870 }
13871
13872 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
13873 {
13874 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
13875 case FLASH_5752PAGE_SIZE_256:
13876 tp->nvram_pagesize = 256;
13877 break;
13878 case FLASH_5752PAGE_SIZE_512:
13879 tp->nvram_pagesize = 512;
13880 break;
13881 case FLASH_5752PAGE_SIZE_1K:
13882 tp->nvram_pagesize = 1024;
13883 break;
13884 case FLASH_5752PAGE_SIZE_2K:
13885 tp->nvram_pagesize = 2048;
13886 break;
13887 case FLASH_5752PAGE_SIZE_4K:
13888 tp->nvram_pagesize = 4096;
13889 break;
13890 case FLASH_5752PAGE_SIZE_264:
13891 tp->nvram_pagesize = 264;
13892 break;
13893 case FLASH_5752PAGE_SIZE_528:
13894 tp->nvram_pagesize = 528;
13895 break;
13896 }
13897 }
13898
13899 static void tg3_get_5752_nvram_info(struct tg3 *tp)
13900 {
13901 u32 nvcfg1;
13902
13903 nvcfg1 = tr32(NVRAM_CFG1);
13904
13905 /* NVRAM protection for TPM */
13906 if (nvcfg1 & (1 << 27))
13907 tg3_flag_set(tp, PROTECTED_NVRAM);
13908
13909 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13910 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
13911 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
13912 tp->nvram_jedecnum = JEDEC_ATMEL;
13913 tg3_flag_set(tp, NVRAM_BUFFERED);
13914 break;
13915 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13916 tp->nvram_jedecnum = JEDEC_ATMEL;
13917 tg3_flag_set(tp, NVRAM_BUFFERED);
13918 tg3_flag_set(tp, FLASH);
13919 break;
13920 case FLASH_5752VENDOR_ST_M45PE10:
13921 case FLASH_5752VENDOR_ST_M45PE20:
13922 case FLASH_5752VENDOR_ST_M45PE40:
13923 tp->nvram_jedecnum = JEDEC_ST;
13924 tg3_flag_set(tp, NVRAM_BUFFERED);
13925 tg3_flag_set(tp, FLASH);
13926 break;
13927 }
13928
13929 if (tg3_flag(tp, FLASH)) {
13930 tg3_nvram_get_pagesize(tp, nvcfg1);
13931 } else {
13932 /* For eeprom, set pagesize to maximum eeprom size */
13933 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13934
13935 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13936 tw32(NVRAM_CFG1, nvcfg1);
13937 }
13938 }
13939
13940 static void tg3_get_5755_nvram_info(struct tg3 *tp)
13941 {
13942 u32 nvcfg1, protect = 0;
13943
13944 nvcfg1 = tr32(NVRAM_CFG1);
13945
13946 /* NVRAM protection for TPM */
13947 if (nvcfg1 & (1 << 27)) {
13948 tg3_flag_set(tp, PROTECTED_NVRAM);
13949 protect = 1;
13950 }
13951
13952 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
13953 switch (nvcfg1) {
13954 case FLASH_5755VENDOR_ATMEL_FLASH_1:
13955 case FLASH_5755VENDOR_ATMEL_FLASH_2:
13956 case FLASH_5755VENDOR_ATMEL_FLASH_3:
13957 case FLASH_5755VENDOR_ATMEL_FLASH_5:
13958 tp->nvram_jedecnum = JEDEC_ATMEL;
13959 tg3_flag_set(tp, NVRAM_BUFFERED);
13960 tg3_flag_set(tp, FLASH);
13961 tp->nvram_pagesize = 264;
13962 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
13963 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
13964 tp->nvram_size = (protect ? 0x3e200 :
13965 TG3_NVRAM_SIZE_512KB);
13966 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
13967 tp->nvram_size = (protect ? 0x1f200 :
13968 TG3_NVRAM_SIZE_256KB);
13969 else
13970 tp->nvram_size = (protect ? 0x1f200 :
13971 TG3_NVRAM_SIZE_128KB);
13972 break;
13973 case FLASH_5752VENDOR_ST_M45PE10:
13974 case FLASH_5752VENDOR_ST_M45PE20:
13975 case FLASH_5752VENDOR_ST_M45PE40:
13976 tp->nvram_jedecnum = JEDEC_ST;
13977 tg3_flag_set(tp, NVRAM_BUFFERED);
13978 tg3_flag_set(tp, FLASH);
13979 tp->nvram_pagesize = 256;
13980 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
13981 tp->nvram_size = (protect ?
13982 TG3_NVRAM_SIZE_64KB :
13983 TG3_NVRAM_SIZE_128KB);
13984 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
13985 tp->nvram_size = (protect ?
13986 TG3_NVRAM_SIZE_64KB :
13987 TG3_NVRAM_SIZE_256KB);
13988 else
13989 tp->nvram_size = (protect ?
13990 TG3_NVRAM_SIZE_128KB :
13991 TG3_NVRAM_SIZE_512KB);
13992 break;
13993 }
13994 }
13995
13996 static void tg3_get_5787_nvram_info(struct tg3 *tp)
13997 {
13998 u32 nvcfg1;
13999
14000 nvcfg1 = tr32(NVRAM_CFG1);
14001
14002 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14003 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
14004 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14005 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
14006 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14007 tp->nvram_jedecnum = JEDEC_ATMEL;
14008 tg3_flag_set(tp, NVRAM_BUFFERED);
14009 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14010
14011 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14012 tw32(NVRAM_CFG1, nvcfg1);
14013 break;
14014 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14015 case FLASH_5755VENDOR_ATMEL_FLASH_1:
14016 case FLASH_5755VENDOR_ATMEL_FLASH_2:
14017 case FLASH_5755VENDOR_ATMEL_FLASH_3:
14018 tp->nvram_jedecnum = JEDEC_ATMEL;
14019 tg3_flag_set(tp, NVRAM_BUFFERED);
14020 tg3_flag_set(tp, FLASH);
14021 tp->nvram_pagesize = 264;
14022 break;
14023 case FLASH_5752VENDOR_ST_M45PE10:
14024 case FLASH_5752VENDOR_ST_M45PE20:
14025 case FLASH_5752VENDOR_ST_M45PE40:
14026 tp->nvram_jedecnum = JEDEC_ST;
14027 tg3_flag_set(tp, NVRAM_BUFFERED);
14028 tg3_flag_set(tp, FLASH);
14029 tp->nvram_pagesize = 256;
14030 break;
14031 }
14032 }
14033
14034 static void tg3_get_5761_nvram_info(struct tg3 *tp)
14035 {
14036 u32 nvcfg1, protect = 0;
14037
14038 nvcfg1 = tr32(NVRAM_CFG1);
14039
14040 /* NVRAM protection for TPM */
14041 if (nvcfg1 & (1 << 27)) {
14042 tg3_flag_set(tp, PROTECTED_NVRAM);
14043 protect = 1;
14044 }
14045
14046 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14047 switch (nvcfg1) {
14048 case FLASH_5761VENDOR_ATMEL_ADB021D:
14049 case FLASH_5761VENDOR_ATMEL_ADB041D:
14050 case FLASH_5761VENDOR_ATMEL_ADB081D:
14051 case FLASH_5761VENDOR_ATMEL_ADB161D:
14052 case FLASH_5761VENDOR_ATMEL_MDB021D:
14053 case FLASH_5761VENDOR_ATMEL_MDB041D:
14054 case FLASH_5761VENDOR_ATMEL_MDB081D:
14055 case FLASH_5761VENDOR_ATMEL_MDB161D:
14056 tp->nvram_jedecnum = JEDEC_ATMEL;
14057 tg3_flag_set(tp, NVRAM_BUFFERED);
14058 tg3_flag_set(tp, FLASH);
14059 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14060 tp->nvram_pagesize = 256;
14061 break;
14062 case FLASH_5761VENDOR_ST_A_M45PE20:
14063 case FLASH_5761VENDOR_ST_A_M45PE40:
14064 case FLASH_5761VENDOR_ST_A_M45PE80:
14065 case FLASH_5761VENDOR_ST_A_M45PE16:
14066 case FLASH_5761VENDOR_ST_M_M45PE20:
14067 case FLASH_5761VENDOR_ST_M_M45PE40:
14068 case FLASH_5761VENDOR_ST_M_M45PE80:
14069 case FLASH_5761VENDOR_ST_M_M45PE16:
14070 tp->nvram_jedecnum = JEDEC_ST;
14071 tg3_flag_set(tp, NVRAM_BUFFERED);
14072 tg3_flag_set(tp, FLASH);
14073 tp->nvram_pagesize = 256;
14074 break;
14075 }
14076
14077 if (protect) {
14078 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
14079 } else {
14080 switch (nvcfg1) {
14081 case FLASH_5761VENDOR_ATMEL_ADB161D:
14082 case FLASH_5761VENDOR_ATMEL_MDB161D:
14083 case FLASH_5761VENDOR_ST_A_M45PE16:
14084 case FLASH_5761VENDOR_ST_M_M45PE16:
14085 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
14086 break;
14087 case FLASH_5761VENDOR_ATMEL_ADB081D:
14088 case FLASH_5761VENDOR_ATMEL_MDB081D:
14089 case FLASH_5761VENDOR_ST_A_M45PE80:
14090 case FLASH_5761VENDOR_ST_M_M45PE80:
14091 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14092 break;
14093 case FLASH_5761VENDOR_ATMEL_ADB041D:
14094 case FLASH_5761VENDOR_ATMEL_MDB041D:
14095 case FLASH_5761VENDOR_ST_A_M45PE40:
14096 case FLASH_5761VENDOR_ST_M_M45PE40:
14097 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14098 break;
14099 case FLASH_5761VENDOR_ATMEL_ADB021D:
14100 case FLASH_5761VENDOR_ATMEL_MDB021D:
14101 case FLASH_5761VENDOR_ST_A_M45PE20:
14102 case FLASH_5761VENDOR_ST_M_M45PE20:
14103 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14104 break;
14105 }
14106 }
14107 }
14108
14109 static void tg3_get_5906_nvram_info(struct tg3 *tp)
14110 {
14111 tp->nvram_jedecnum = JEDEC_ATMEL;
14112 tg3_flag_set(tp, NVRAM_BUFFERED);
14113 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14114 }
14115
14116 static void tg3_get_57780_nvram_info(struct tg3 *tp)
14117 {
14118 u32 nvcfg1;
14119
14120 nvcfg1 = tr32(NVRAM_CFG1);
14121
14122 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14123 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14124 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14125 tp->nvram_jedecnum = JEDEC_ATMEL;
14126 tg3_flag_set(tp, NVRAM_BUFFERED);
14127 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14128
14129 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14130 tw32(NVRAM_CFG1, nvcfg1);
14131 return;
14132 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14133 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14134 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14135 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14136 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14137 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14138 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14139 tp->nvram_jedecnum = JEDEC_ATMEL;
14140 tg3_flag_set(tp, NVRAM_BUFFERED);
14141 tg3_flag_set(tp, FLASH);
14142
14143 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14144 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14145 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14146 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14147 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14148 break;
14149 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14150 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14151 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14152 break;
14153 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14154 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14155 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14156 break;
14157 }
14158 break;
14159 case FLASH_5752VENDOR_ST_M45PE10:
14160 case FLASH_5752VENDOR_ST_M45PE20:
14161 case FLASH_5752VENDOR_ST_M45PE40:
14162 tp->nvram_jedecnum = JEDEC_ST;
14163 tg3_flag_set(tp, NVRAM_BUFFERED);
14164 tg3_flag_set(tp, FLASH);
14165
14166 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14167 case FLASH_5752VENDOR_ST_M45PE10:
14168 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14169 break;
14170 case FLASH_5752VENDOR_ST_M45PE20:
14171 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14172 break;
14173 case FLASH_5752VENDOR_ST_M45PE40:
14174 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14175 break;
14176 }
14177 break;
14178 default:
14179 tg3_flag_set(tp, NO_NVRAM);
14180 return;
14181 }
14182
14183 tg3_nvram_get_pagesize(tp, nvcfg1);
14184 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14185 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14186 }
14187
14188
14189 static void tg3_get_5717_nvram_info(struct tg3 *tp)
14190 {
14191 u32 nvcfg1;
14192
14193 nvcfg1 = tr32(NVRAM_CFG1);
14194
14195 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14196 case FLASH_5717VENDOR_ATMEL_EEPROM:
14197 case FLASH_5717VENDOR_MICRO_EEPROM:
14198 tp->nvram_jedecnum = JEDEC_ATMEL;
14199 tg3_flag_set(tp, NVRAM_BUFFERED);
14200 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14201
14202 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14203 tw32(NVRAM_CFG1, nvcfg1);
14204 return;
14205 case FLASH_5717VENDOR_ATMEL_MDB011D:
14206 case FLASH_5717VENDOR_ATMEL_ADB011B:
14207 case FLASH_5717VENDOR_ATMEL_ADB011D:
14208 case FLASH_5717VENDOR_ATMEL_MDB021D:
14209 case FLASH_5717VENDOR_ATMEL_ADB021B:
14210 case FLASH_5717VENDOR_ATMEL_ADB021D:
14211 case FLASH_5717VENDOR_ATMEL_45USPT:
14212 tp->nvram_jedecnum = JEDEC_ATMEL;
14213 tg3_flag_set(tp, NVRAM_BUFFERED);
14214 tg3_flag_set(tp, FLASH);
14215
14216 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14217 case FLASH_5717VENDOR_ATMEL_MDB021D:
14218 /* Detect size with tg3_nvram_get_size() */
14219 break;
14220 case FLASH_5717VENDOR_ATMEL_ADB021B:
14221 case FLASH_5717VENDOR_ATMEL_ADB021D:
14222 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14223 break;
14224 default:
14225 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14226 break;
14227 }
14228 break;
14229 case FLASH_5717VENDOR_ST_M_M25PE10:
14230 case FLASH_5717VENDOR_ST_A_M25PE10:
14231 case FLASH_5717VENDOR_ST_M_M45PE10:
14232 case FLASH_5717VENDOR_ST_A_M45PE10:
14233 case FLASH_5717VENDOR_ST_M_M25PE20:
14234 case FLASH_5717VENDOR_ST_A_M25PE20:
14235 case FLASH_5717VENDOR_ST_M_M45PE20:
14236 case FLASH_5717VENDOR_ST_A_M45PE20:
14237 case FLASH_5717VENDOR_ST_25USPT:
14238 case FLASH_5717VENDOR_ST_45USPT:
14239 tp->nvram_jedecnum = JEDEC_ST;
14240 tg3_flag_set(tp, NVRAM_BUFFERED);
14241 tg3_flag_set(tp, FLASH);
14242
14243 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14244 case FLASH_5717VENDOR_ST_M_M25PE20:
14245 case FLASH_5717VENDOR_ST_M_M45PE20:
14246 /* Detect size with tg3_nvram_get_size() */
14247 break;
14248 case FLASH_5717VENDOR_ST_A_M25PE20:
14249 case FLASH_5717VENDOR_ST_A_M45PE20:
14250 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14251 break;
14252 default:
14253 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14254 break;
14255 }
14256 break;
14257 default:
14258 tg3_flag_set(tp, NO_NVRAM);
14259 return;
14260 }
14261
14262 tg3_nvram_get_pagesize(tp, nvcfg1);
14263 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14264 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14265 }
14266
14267 static void tg3_get_5720_nvram_info(struct tg3 *tp)
14268 {
14269 u32 nvcfg1, nvmpinstrp;
14270
14271 nvcfg1 = tr32(NVRAM_CFG1);
14272 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
14273
14274 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14275 if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
14276 tg3_flag_set(tp, NO_NVRAM);
14277 return;
14278 }
14279
14280 switch (nvmpinstrp) {
14281 case FLASH_5762_EEPROM_HD:
14282 nvmpinstrp = FLASH_5720_EEPROM_HD;
14283 break;
14284 case FLASH_5762_EEPROM_LD:
14285 nvmpinstrp = FLASH_5720_EEPROM_LD;
14286 break;
14287 case FLASH_5720VENDOR_M_ST_M45PE20:
14288 /* This pinstrap supports multiple sizes, so force it
14289 * to read the actual size from location 0xf0.
14290 */
14291 nvmpinstrp = FLASH_5720VENDOR_ST_45USPT;
14292 break;
14293 }
14294 }
14295
14296 switch (nvmpinstrp) {
14297 case FLASH_5720_EEPROM_HD:
14298 case FLASH_5720_EEPROM_LD:
14299 tp->nvram_jedecnum = JEDEC_ATMEL;
14300 tg3_flag_set(tp, NVRAM_BUFFERED);
14301
14302 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14303 tw32(NVRAM_CFG1, nvcfg1);
14304 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
14305 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14306 else
14307 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
14308 return;
14309 case FLASH_5720VENDOR_M_ATMEL_DB011D:
14310 case FLASH_5720VENDOR_A_ATMEL_DB011B:
14311 case FLASH_5720VENDOR_A_ATMEL_DB011D:
14312 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14313 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14314 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14315 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14316 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14317 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14318 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14319 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14320 case FLASH_5720VENDOR_ATMEL_45USPT:
14321 tp->nvram_jedecnum = JEDEC_ATMEL;
14322 tg3_flag_set(tp, NVRAM_BUFFERED);
14323 tg3_flag_set(tp, FLASH);
14324
14325 switch (nvmpinstrp) {
14326 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14327 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14328 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14329 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14330 break;
14331 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14332 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14333 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14334 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14335 break;
14336 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14337 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14338 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14339 break;
14340 default:
14341 if (tg3_asic_rev(tp) != ASIC_REV_5762)
14342 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14343 break;
14344 }
14345 break;
14346 case FLASH_5720VENDOR_M_ST_M25PE10:
14347 case FLASH_5720VENDOR_M_ST_M45PE10:
14348 case FLASH_5720VENDOR_A_ST_M25PE10:
14349 case FLASH_5720VENDOR_A_ST_M45PE10:
14350 case FLASH_5720VENDOR_M_ST_M25PE20:
14351 case FLASH_5720VENDOR_M_ST_M45PE20:
14352 case FLASH_5720VENDOR_A_ST_M25PE20:
14353 case FLASH_5720VENDOR_A_ST_M45PE20:
14354 case FLASH_5720VENDOR_M_ST_M25PE40:
14355 case FLASH_5720VENDOR_M_ST_M45PE40:
14356 case FLASH_5720VENDOR_A_ST_M25PE40:
14357 case FLASH_5720VENDOR_A_ST_M45PE40:
14358 case FLASH_5720VENDOR_M_ST_M25PE80:
14359 case FLASH_5720VENDOR_M_ST_M45PE80:
14360 case FLASH_5720VENDOR_A_ST_M25PE80:
14361 case FLASH_5720VENDOR_A_ST_M45PE80:
14362 case FLASH_5720VENDOR_ST_25USPT:
14363 case FLASH_5720VENDOR_ST_45USPT:
14364 tp->nvram_jedecnum = JEDEC_ST;
14365 tg3_flag_set(tp, NVRAM_BUFFERED);
14366 tg3_flag_set(tp, FLASH);
14367
14368 switch (nvmpinstrp) {
14369 case FLASH_5720VENDOR_M_ST_M25PE20:
14370 case FLASH_5720VENDOR_M_ST_M45PE20:
14371 case FLASH_5720VENDOR_A_ST_M25PE20:
14372 case FLASH_5720VENDOR_A_ST_M45PE20:
14373 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14374 break;
14375 case FLASH_5720VENDOR_M_ST_M25PE40:
14376 case FLASH_5720VENDOR_M_ST_M45PE40:
14377 case FLASH_5720VENDOR_A_ST_M25PE40:
14378 case FLASH_5720VENDOR_A_ST_M45PE40:
14379 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14380 break;
14381 case FLASH_5720VENDOR_M_ST_M25PE80:
14382 case FLASH_5720VENDOR_M_ST_M45PE80:
14383 case FLASH_5720VENDOR_A_ST_M25PE80:
14384 case FLASH_5720VENDOR_A_ST_M45PE80:
14385 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14386 break;
14387 default:
14388 if (tg3_asic_rev(tp) != ASIC_REV_5762)
14389 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14390 break;
14391 }
14392 break;
14393 default:
14394 tg3_flag_set(tp, NO_NVRAM);
14395 return;
14396 }
14397
14398 tg3_nvram_get_pagesize(tp, nvcfg1);
14399 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14400 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14401
14402 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14403 u32 val;
14404
14405 if (tg3_nvram_read(tp, 0, &val))
14406 return;
14407
14408 if (val != TG3_EEPROM_MAGIC &&
14409 (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
14410 tg3_flag_set(tp, NO_NVRAM);
14411 }
14412 }
14413
14414 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
14415 static void tg3_nvram_init(struct tg3 *tp)
14416 {
14417 if (tg3_flag(tp, IS_SSB_CORE)) {
14418 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
14419 tg3_flag_clear(tp, NVRAM);
14420 tg3_flag_clear(tp, NVRAM_BUFFERED);
14421 tg3_flag_set(tp, NO_NVRAM);
14422 return;
14423 }
14424
14425 tw32_f(GRC_EEPROM_ADDR,
14426 (EEPROM_ADDR_FSM_RESET |
14427 (EEPROM_DEFAULT_CLOCK_PERIOD <<
14428 EEPROM_ADDR_CLKPERD_SHIFT)));
14429
14430 msleep(1);
14431
14432 /* Enable seeprom accesses. */
14433 tw32_f(GRC_LOCAL_CTRL,
14434 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
14435 udelay(100);
14436
14437 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14438 tg3_asic_rev(tp) != ASIC_REV_5701) {
14439 tg3_flag_set(tp, NVRAM);
14440
14441 if (tg3_nvram_lock(tp)) {
14442 netdev_warn(tp->dev,
14443 "Cannot get nvram lock, %s failed\n",
14444 __func__);
14445 return;
14446 }
14447 tg3_enable_nvram_access(tp);
14448
14449 tp->nvram_size = 0;
14450
14451 if (tg3_asic_rev(tp) == ASIC_REV_5752)
14452 tg3_get_5752_nvram_info(tp);
14453 else if (tg3_asic_rev(tp) == ASIC_REV_5755)
14454 tg3_get_5755_nvram_info(tp);
14455 else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
14456 tg3_asic_rev(tp) == ASIC_REV_5784 ||
14457 tg3_asic_rev(tp) == ASIC_REV_5785)
14458 tg3_get_5787_nvram_info(tp);
14459 else if (tg3_asic_rev(tp) == ASIC_REV_5761)
14460 tg3_get_5761_nvram_info(tp);
14461 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
14462 tg3_get_5906_nvram_info(tp);
14463 else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
14464 tg3_flag(tp, 57765_CLASS))
14465 tg3_get_57780_nvram_info(tp);
14466 else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
14467 tg3_asic_rev(tp) == ASIC_REV_5719)
14468 tg3_get_5717_nvram_info(tp);
14469 else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
14470 tg3_asic_rev(tp) == ASIC_REV_5762)
14471 tg3_get_5720_nvram_info(tp);
14472 else
14473 tg3_get_nvram_info(tp);
14474
14475 if (tp->nvram_size == 0)
14476 tg3_get_nvram_size(tp);
14477
14478 tg3_disable_nvram_access(tp);
14479 tg3_nvram_unlock(tp);
14480
14481 } else {
14482 tg3_flag_clear(tp, NVRAM);
14483 tg3_flag_clear(tp, NVRAM_BUFFERED);
14484
14485 tg3_get_eeprom_size(tp);
14486 }
14487 }
14488
14489 struct subsys_tbl_ent {
14490 u16 subsys_vendor, subsys_devid;
14491 u32 phy_id;
14492 };
14493
14494 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
14495 /* Broadcom boards. */
14496 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14497 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
14498 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14499 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
14500 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14501 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
14502 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14503 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
14504 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14505 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
14506 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14507 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
14508 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14509 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
14510 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14511 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
14512 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14513 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
14514 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14515 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
14516 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14517 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
14518
14519 /* 3com boards. */
14520 { TG3PCI_SUBVENDOR_ID_3COM,
14521 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
14522 { TG3PCI_SUBVENDOR_ID_3COM,
14523 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
14524 { TG3PCI_SUBVENDOR_ID_3COM,
14525 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
14526 { TG3PCI_SUBVENDOR_ID_3COM,
14527 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
14528 { TG3PCI_SUBVENDOR_ID_3COM,
14529 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
14530
14531 /* DELL boards. */
14532 { TG3PCI_SUBVENDOR_ID_DELL,
14533 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
14534 { TG3PCI_SUBVENDOR_ID_DELL,
14535 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
14536 { TG3PCI_SUBVENDOR_ID_DELL,
14537 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
14538 { TG3PCI_SUBVENDOR_ID_DELL,
14539 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
14540
14541 /* Compaq boards. */
14542 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14543 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
14544 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14545 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
14546 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14547 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
14548 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14549 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
14550 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14551 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
14552
14553 /* IBM boards. */
14554 { TG3PCI_SUBVENDOR_ID_IBM,
14555 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
14556 };
14557
14558 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
14559 {
14560 int i;
14561
14562 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
14563 if ((subsys_id_to_phy_id[i].subsys_vendor ==
14564 tp->pdev->subsystem_vendor) &&
14565 (subsys_id_to_phy_id[i].subsys_devid ==
14566 tp->pdev->subsystem_device))
14567 return &subsys_id_to_phy_id[i];
14568 }
14569 return NULL;
14570 }
14571
14572 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
14573 {
14574 u32 val;
14575
14576 tp->phy_id = TG3_PHY_ID_INVALID;
14577 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14578
14579 /* Assume an onboard device and WOL capable by default. */
14580 tg3_flag_set(tp, EEPROM_WRITE_PROT);
14581 tg3_flag_set(tp, WOL_CAP);
14582
14583 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
14584 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
14585 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14586 tg3_flag_set(tp, IS_NIC);
14587 }
14588 val = tr32(VCPU_CFGSHDW);
14589 if (val & VCPU_CFGSHDW_ASPM_DBNC)
14590 tg3_flag_set(tp, ASPM_WORKAROUND);
14591 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
14592 (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
14593 tg3_flag_set(tp, WOL_ENABLE);
14594 device_set_wakeup_enable(&tp->pdev->dev, true);
14595 }
14596 goto done;
14597 }
14598
14599 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
14600 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
14601 u32 nic_cfg, led_cfg;
14602 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
14603 int eeprom_phy_serdes = 0;
14604
14605 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
14606 tp->nic_sram_data_cfg = nic_cfg;
14607
14608 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
14609 ver >>= NIC_SRAM_DATA_VER_SHIFT;
14610 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14611 tg3_asic_rev(tp) != ASIC_REV_5701 &&
14612 tg3_asic_rev(tp) != ASIC_REV_5703 &&
14613 (ver > 0) && (ver < 0x100))
14614 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
14615
14616 if (tg3_asic_rev(tp) == ASIC_REV_5785)
14617 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
14618
14619 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
14620 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
14621 eeprom_phy_serdes = 1;
14622
14623 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
14624 if (nic_phy_id != 0) {
14625 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
14626 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
14627
14628 eeprom_phy_id = (id1 >> 16) << 10;
14629 eeprom_phy_id |= (id2 & 0xfc00) << 16;
14630 eeprom_phy_id |= (id2 & 0x03ff) << 0;
14631 } else
14632 eeprom_phy_id = 0;
14633
14634 tp->phy_id = eeprom_phy_id;
14635 if (eeprom_phy_serdes) {
14636 if (!tg3_flag(tp, 5705_PLUS))
14637 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14638 else
14639 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
14640 }
14641
14642 if (tg3_flag(tp, 5750_PLUS))
14643 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
14644 SHASTA_EXT_LED_MODE_MASK);
14645 else
14646 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
14647
14648 switch (led_cfg) {
14649 default:
14650 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
14651 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14652 break;
14653
14654 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
14655 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14656 break;
14657
14658 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
14659 tp->led_ctrl = LED_CTRL_MODE_MAC;
14660
14661 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
14662 * read on some older 5700/5701 bootcode.
14663 */
14664 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
14665 tg3_asic_rev(tp) == ASIC_REV_5701)
14666 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14667
14668 break;
14669
14670 case SHASTA_EXT_LED_SHARED:
14671 tp->led_ctrl = LED_CTRL_MODE_SHARED;
14672 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
14673 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
14674 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14675 LED_CTRL_MODE_PHY_2);
14676 break;
14677
14678 case SHASTA_EXT_LED_MAC:
14679 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
14680 break;
14681
14682 case SHASTA_EXT_LED_COMBO:
14683 tp->led_ctrl = LED_CTRL_MODE_COMBO;
14684 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
14685 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14686 LED_CTRL_MODE_PHY_2);
14687 break;
14688
14689 }
14690
14691 if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
14692 tg3_asic_rev(tp) == ASIC_REV_5701) &&
14693 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
14694 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14695
14696 if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
14697 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14698
14699 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
14700 tg3_flag_set(tp, EEPROM_WRITE_PROT);
14701 if ((tp->pdev->subsystem_vendor ==
14702 PCI_VENDOR_ID_ARIMA) &&
14703 (tp->pdev->subsystem_device == 0x205a ||
14704 tp->pdev->subsystem_device == 0x2063))
14705 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14706 } else {
14707 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14708 tg3_flag_set(tp, IS_NIC);
14709 }
14710
14711 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
14712 tg3_flag_set(tp, ENABLE_ASF);
14713 if (tg3_flag(tp, 5750_PLUS))
14714 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
14715 }
14716
14717 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
14718 tg3_flag(tp, 5750_PLUS))
14719 tg3_flag_set(tp, ENABLE_APE);
14720
14721 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
14722 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
14723 tg3_flag_clear(tp, WOL_CAP);
14724
14725 if (tg3_flag(tp, WOL_CAP) &&
14726 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
14727 tg3_flag_set(tp, WOL_ENABLE);
14728 device_set_wakeup_enable(&tp->pdev->dev, true);
14729 }
14730
14731 if (cfg2 & (1 << 17))
14732 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
14733
14734 /* serdes signal pre-emphasis in register 0x590 set by */
14735 /* bootcode if bit 18 is set */
14736 if (cfg2 & (1 << 18))
14737 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
14738
14739 if ((tg3_flag(tp, 57765_PLUS) ||
14740 (tg3_asic_rev(tp) == ASIC_REV_5784 &&
14741 tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
14742 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
14743 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
14744
14745 if (tg3_flag(tp, PCI_EXPRESS)) {
14746 u32 cfg3;
14747
14748 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
14749 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
14750 !tg3_flag(tp, 57765_PLUS) &&
14751 (cfg3 & NIC_SRAM_ASPM_DEBOUNCE))
14752 tg3_flag_set(tp, ASPM_WORKAROUND);
14753 if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID)
14754 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
14755 if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK)
14756 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
14757 }
14758
14759 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
14760 tg3_flag_set(tp, RGMII_INBAND_DISABLE);
14761 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
14762 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
14763 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
14764 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
14765 }
14766 done:
14767 if (tg3_flag(tp, WOL_CAP))
14768 device_set_wakeup_enable(&tp->pdev->dev,
14769 tg3_flag(tp, WOL_ENABLE));
14770 else
14771 device_set_wakeup_capable(&tp->pdev->dev, false);
14772 }
14773
14774 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
14775 {
14776 int i, err;
14777 u32 val2, off = offset * 8;
14778
14779 err = tg3_nvram_lock(tp);
14780 if (err)
14781 return err;
14782
14783 tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
14784 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
14785 APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
14786 tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
14787 udelay(10);
14788
14789 for (i = 0; i < 100; i++) {
14790 val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
14791 if (val2 & APE_OTP_STATUS_CMD_DONE) {
14792 *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
14793 break;
14794 }
14795 udelay(10);
14796 }
14797
14798 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
14799
14800 tg3_nvram_unlock(tp);
14801 if (val2 & APE_OTP_STATUS_CMD_DONE)
14802 return 0;
14803
14804 return -EBUSY;
14805 }
14806
14807 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
14808 {
14809 int i;
14810 u32 val;
14811
14812 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
14813 tw32(OTP_CTRL, cmd);
14814
14815 /* Wait for up to 1 ms for command to execute. */
14816 for (i = 0; i < 100; i++) {
14817 val = tr32(OTP_STATUS);
14818 if (val & OTP_STATUS_CMD_DONE)
14819 break;
14820 udelay(10);
14821 }
14822
14823 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
14824 }
14825
14826 /* Read the gphy configuration from the OTP region of the chip. The gphy
14827 * configuration is a 32-bit value that straddles the alignment boundary.
14828 * We do two 32-bit reads and then shift and merge the results.
14829 */
14830 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
14831 {
14832 u32 bhalf_otp, thalf_otp;
14833
14834 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
14835
14836 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
14837 return 0;
14838
14839 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
14840
14841 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
14842 return 0;
14843
14844 thalf_otp = tr32(OTP_READ_DATA);
14845
14846 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
14847
14848 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
14849 return 0;
14850
14851 bhalf_otp = tr32(OTP_READ_DATA);
14852
14853 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
14854 }
14855
14856 static void tg3_phy_init_link_config(struct tg3 *tp)
14857 {
14858 u32 adv = ADVERTISED_Autoneg;
14859
14860 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
14861 adv |= ADVERTISED_1000baseT_Half |
14862 ADVERTISED_1000baseT_Full;
14863
14864 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14865 adv |= ADVERTISED_100baseT_Half |
14866 ADVERTISED_100baseT_Full |
14867 ADVERTISED_10baseT_Half |
14868 ADVERTISED_10baseT_Full |
14869 ADVERTISED_TP;
14870 else
14871 adv |= ADVERTISED_FIBRE;
14872
14873 tp->link_config.advertising = adv;
14874 tp->link_config.speed = SPEED_UNKNOWN;
14875 tp->link_config.duplex = DUPLEX_UNKNOWN;
14876 tp->link_config.autoneg = AUTONEG_ENABLE;
14877 tp->link_config.active_speed = SPEED_UNKNOWN;
14878 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
14879
14880 tp->old_link = -1;
14881 }
14882
14883 static int tg3_phy_probe(struct tg3 *tp)
14884 {
14885 u32 hw_phy_id_1, hw_phy_id_2;
14886 u32 hw_phy_id, hw_phy_id_masked;
14887 int err;
14888
14889 /* flow control autonegotiation is default behavior */
14890 tg3_flag_set(tp, PAUSE_AUTONEG);
14891 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
14892
14893 if (tg3_flag(tp, ENABLE_APE)) {
14894 switch (tp->pci_fn) {
14895 case 0:
14896 tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
14897 break;
14898 case 1:
14899 tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
14900 break;
14901 case 2:
14902 tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
14903 break;
14904 case 3:
14905 tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
14906 break;
14907 }
14908 }
14909
14910 if (!tg3_flag(tp, ENABLE_ASF) &&
14911 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
14912 !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
14913 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
14914 TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
14915
14916 if (tg3_flag(tp, USE_PHYLIB))
14917 return tg3_phy_init(tp);
14918
14919 /* Reading the PHY ID register can conflict with ASF
14920 * firmware access to the PHY hardware.
14921 */
14922 err = 0;
14923 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
14924 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
14925 } else {
14926 /* Now read the physical PHY_ID from the chip and verify
14927 * that it is sane. If it doesn't look good, we fall back
14928 * to either the hard-coded table based PHY_ID and failing
14929 * that the value found in the eeprom area.
14930 */
14931 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
14932 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
14933
14934 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
14935 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
14936 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
14937
14938 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
14939 }
14940
14941 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
14942 tp->phy_id = hw_phy_id;
14943 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
14944 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14945 else
14946 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
14947 } else {
14948 if (tp->phy_id != TG3_PHY_ID_INVALID) {
14949 /* Do nothing, phy ID already set up in
14950 * tg3_get_eeprom_hw_cfg().
14951 */
14952 } else {
14953 struct subsys_tbl_ent *p;
14954
14955 /* No eeprom signature? Try the hardcoded
14956 * subsys device table.
14957 */
14958 p = tg3_lookup_by_subsys(tp);
14959 if (p) {
14960 tp->phy_id = p->phy_id;
14961 } else if (!tg3_flag(tp, IS_SSB_CORE)) {
14962 /* For now we saw the IDs 0xbc050cd0,
14963 * 0xbc050f80 and 0xbc050c30 on devices
14964 * connected to an BCM4785 and there are
14965 * probably more. Just assume that the phy is
14966 * supported when it is connected to a SSB core
14967 * for now.
14968 */
14969 return -ENODEV;
14970 }
14971
14972 if (!tp->phy_id ||
14973 tp->phy_id == TG3_PHY_ID_BCM8002)
14974 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14975 }
14976 }
14977
14978 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
14979 (tg3_asic_rev(tp) == ASIC_REV_5719 ||
14980 tg3_asic_rev(tp) == ASIC_REV_5720 ||
14981 tg3_asic_rev(tp) == ASIC_REV_57766 ||
14982 tg3_asic_rev(tp) == ASIC_REV_5762 ||
14983 (tg3_asic_rev(tp) == ASIC_REV_5717 &&
14984 tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
14985 (tg3_asic_rev(tp) == ASIC_REV_57765 &&
14986 tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0)))
14987 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
14988
14989 tg3_phy_init_link_config(tp);
14990
14991 if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
14992 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
14993 !tg3_flag(tp, ENABLE_APE) &&
14994 !tg3_flag(tp, ENABLE_ASF)) {
14995 u32 bmsr, dummy;
14996
14997 tg3_readphy(tp, MII_BMSR, &bmsr);
14998 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
14999 (bmsr & BMSR_LSTATUS))
15000 goto skip_phy_reset;
15001
15002 err = tg3_phy_reset(tp);
15003 if (err)
15004 return err;
15005
15006 tg3_phy_set_wirespeed(tp);
15007
15008 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
15009 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
15010 tp->link_config.flowctrl);
15011
15012 tg3_writephy(tp, MII_BMCR,
15013 BMCR_ANENABLE | BMCR_ANRESTART);
15014 }
15015 }
15016
15017 skip_phy_reset:
15018 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
15019 err = tg3_init_5401phy_dsp(tp);
15020 if (err)
15021 return err;
15022
15023 err = tg3_init_5401phy_dsp(tp);
15024 }
15025
15026 return err;
15027 }
15028
15029 static void tg3_read_vpd(struct tg3 *tp)
15030 {
15031 u8 *vpd_data;
15032 unsigned int block_end, rosize, len;
15033 u32 vpdlen;
15034 int j, i = 0;
15035
15036 vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
15037 if (!vpd_data)
15038 goto out_no_vpd;
15039
15040 i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
15041 if (i < 0)
15042 goto out_not_found;
15043
15044 rosize = pci_vpd_lrdt_size(&vpd_data[i]);
15045 block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
15046 i += PCI_VPD_LRDT_TAG_SIZE;
15047
15048 if (block_end > vpdlen)
15049 goto out_not_found;
15050
15051 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15052 PCI_VPD_RO_KEYWORD_MFR_ID);
15053 if (j > 0) {
15054 len = pci_vpd_info_field_size(&vpd_data[j]);
15055
15056 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15057 if (j + len > block_end || len != 4 ||
15058 memcmp(&vpd_data[j], "1028", 4))
15059 goto partno;
15060
15061 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15062 PCI_VPD_RO_KEYWORD_VENDOR0);
15063 if (j < 0)
15064 goto partno;
15065
15066 len = pci_vpd_info_field_size(&vpd_data[j]);
15067
15068 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15069 if (j + len > block_end)
15070 goto partno;
15071
15072 if (len >= sizeof(tp->fw_ver))
15073 len = sizeof(tp->fw_ver) - 1;
15074 memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
15075 snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len,
15076 &vpd_data[j]);
15077 }
15078
15079 partno:
15080 i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15081 PCI_VPD_RO_KEYWORD_PARTNO);
15082 if (i < 0)
15083 goto out_not_found;
15084
15085 len = pci_vpd_info_field_size(&vpd_data[i]);
15086
15087 i += PCI_VPD_INFO_FLD_HDR_SIZE;
15088 if (len > TG3_BPN_SIZE ||
15089 (len + i) > vpdlen)
15090 goto out_not_found;
15091
15092 memcpy(tp->board_part_number, &vpd_data[i], len);
15093
15094 out_not_found:
15095 kfree(vpd_data);
15096 if (tp->board_part_number[0])
15097 return;
15098
15099 out_no_vpd:
15100 if (tg3_asic_rev(tp) == ASIC_REV_5717) {
15101 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15102 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
15103 strcpy(tp->board_part_number, "BCM5717");
15104 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
15105 strcpy(tp->board_part_number, "BCM5718");
15106 else
15107 goto nomatch;
15108 } else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
15109 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
15110 strcpy(tp->board_part_number, "BCM57780");
15111 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
15112 strcpy(tp->board_part_number, "BCM57760");
15113 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
15114 strcpy(tp->board_part_number, "BCM57790");
15115 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
15116 strcpy(tp->board_part_number, "BCM57788");
15117 else
15118 goto nomatch;
15119 } else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
15120 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
15121 strcpy(tp->board_part_number, "BCM57761");
15122 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
15123 strcpy(tp->board_part_number, "BCM57765");
15124 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
15125 strcpy(tp->board_part_number, "BCM57781");
15126 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
15127 strcpy(tp->board_part_number, "BCM57785");
15128 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
15129 strcpy(tp->board_part_number, "BCM57791");
15130 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
15131 strcpy(tp->board_part_number, "BCM57795");
15132 else
15133 goto nomatch;
15134 } else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
15135 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
15136 strcpy(tp->board_part_number, "BCM57762");
15137 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
15138 strcpy(tp->board_part_number, "BCM57766");
15139 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
15140 strcpy(tp->board_part_number, "BCM57782");
15141 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15142 strcpy(tp->board_part_number, "BCM57786");
15143 else
15144 goto nomatch;
15145 } else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15146 strcpy(tp->board_part_number, "BCM95906");
15147 } else {
15148 nomatch:
15149 strcpy(tp->board_part_number, "none");
15150 }
15151 }
15152
15153 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
15154 {
15155 u32 val;
15156
15157 if (tg3_nvram_read(tp, offset, &val) ||
15158 (val & 0xfc000000) != 0x0c000000 ||
15159 tg3_nvram_read(tp, offset + 4, &val) ||
15160 val != 0)
15161 return 0;
15162
15163 return 1;
15164 }
15165
15166 static void tg3_read_bc_ver(struct tg3 *tp)
15167 {
15168 u32 val, offset, start, ver_offset;
15169 int i, dst_off;
15170 bool newver = false;
15171
15172 if (tg3_nvram_read(tp, 0xc, &offset) ||
15173 tg3_nvram_read(tp, 0x4, &start))
15174 return;
15175
15176 offset = tg3_nvram_logical_addr(tp, offset);
15177
15178 if (tg3_nvram_read(tp, offset, &val))
15179 return;
15180
15181 if ((val & 0xfc000000) == 0x0c000000) {
15182 if (tg3_nvram_read(tp, offset + 4, &val))
15183 return;
15184
15185 if (val == 0)
15186 newver = true;
15187 }
15188
15189 dst_off = strlen(tp->fw_ver);
15190
15191 if (newver) {
15192 if (TG3_VER_SIZE - dst_off < 16 ||
15193 tg3_nvram_read(tp, offset + 8, &ver_offset))
15194 return;
15195
15196 offset = offset + ver_offset - start;
15197 for (i = 0; i < 16; i += 4) {
15198 __be32 v;
15199 if (tg3_nvram_read_be32(tp, offset + i, &v))
15200 return;
15201
15202 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
15203 }
15204 } else {
15205 u32 major, minor;
15206
15207 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
15208 return;
15209
15210 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
15211 TG3_NVM_BCVER_MAJSFT;
15212 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
15213 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
15214 "v%d.%02d", major, minor);
15215 }
15216 }
15217
15218 static void tg3_read_hwsb_ver(struct tg3 *tp)
15219 {
15220 u32 val, major, minor;
15221
15222 /* Use native endian representation */
15223 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
15224 return;
15225
15226 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
15227 TG3_NVM_HWSB_CFG1_MAJSFT;
15228 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
15229 TG3_NVM_HWSB_CFG1_MINSFT;
15230
15231 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
15232 }
15233
15234 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
15235 {
15236 u32 offset, major, minor, build;
15237
15238 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
15239
15240 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
15241 return;
15242
15243 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
15244 case TG3_EEPROM_SB_REVISION_0:
15245 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
15246 break;
15247 case TG3_EEPROM_SB_REVISION_2:
15248 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
15249 break;
15250 case TG3_EEPROM_SB_REVISION_3:
15251 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
15252 break;
15253 case TG3_EEPROM_SB_REVISION_4:
15254 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
15255 break;
15256 case TG3_EEPROM_SB_REVISION_5:
15257 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
15258 break;
15259 case TG3_EEPROM_SB_REVISION_6:
15260 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
15261 break;
15262 default:
15263 return;
15264 }
15265
15266 if (tg3_nvram_read(tp, offset, &val))
15267 return;
15268
15269 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
15270 TG3_EEPROM_SB_EDH_BLD_SHFT;
15271 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
15272 TG3_EEPROM_SB_EDH_MAJ_SHFT;
15273 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
15274
15275 if (minor > 99 || build > 26)
15276 return;
15277
15278 offset = strlen(tp->fw_ver);
15279 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
15280 " v%d.%02d", major, minor);
15281
15282 if (build > 0) {
15283 offset = strlen(tp->fw_ver);
15284 if (offset < TG3_VER_SIZE - 1)
15285 tp->fw_ver[offset] = 'a' + build - 1;
15286 }
15287 }
15288
15289 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
15290 {
15291 u32 val, offset, start;
15292 int i, vlen;
15293
15294 for (offset = TG3_NVM_DIR_START;
15295 offset < TG3_NVM_DIR_END;
15296 offset += TG3_NVM_DIRENT_SIZE) {
15297 if (tg3_nvram_read(tp, offset, &val))
15298 return;
15299
15300 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
15301 break;
15302 }
15303
15304 if (offset == TG3_NVM_DIR_END)
15305 return;
15306
15307 if (!tg3_flag(tp, 5705_PLUS))
15308 start = 0x08000000;
15309 else if (tg3_nvram_read(tp, offset - 4, &start))
15310 return;
15311
15312 if (tg3_nvram_read(tp, offset + 4, &offset) ||
15313 !tg3_fw_img_is_valid(tp, offset) ||
15314 tg3_nvram_read(tp, offset + 8, &val))
15315 return;
15316
15317 offset += val - start;
15318
15319 vlen = strlen(tp->fw_ver);
15320
15321 tp->fw_ver[vlen++] = ',';
15322 tp->fw_ver[vlen++] = ' ';
15323
15324 for (i = 0; i < 4; i++) {
15325 __be32 v;
15326 if (tg3_nvram_read_be32(tp, offset, &v))
15327 return;
15328
15329 offset += sizeof(v);
15330
15331 if (vlen > TG3_VER_SIZE - sizeof(v)) {
15332 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
15333 break;
15334 }
15335
15336 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
15337 vlen += sizeof(v);
15338 }
15339 }
15340
15341 static void tg3_probe_ncsi(struct tg3 *tp)
15342 {
15343 u32 apedata;
15344
15345 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
15346 if (apedata != APE_SEG_SIG_MAGIC)
15347 return;
15348
15349 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
15350 if (!(apedata & APE_FW_STATUS_READY))
15351 return;
15352
15353 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
15354 tg3_flag_set(tp, APE_HAS_NCSI);
15355 }
15356
15357 static void tg3_read_dash_ver(struct tg3 *tp)
15358 {
15359 int vlen;
15360 u32 apedata;
15361 char *fwtype;
15362
15363 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
15364
15365 if (tg3_flag(tp, APE_HAS_NCSI))
15366 fwtype = "NCSI";
15367 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
15368 fwtype = "SMASH";
15369 else
15370 fwtype = "DASH";
15371
15372 vlen = strlen(tp->fw_ver);
15373
15374 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
15375 fwtype,
15376 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
15377 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
15378 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
15379 (apedata & APE_FW_VERSION_BLDMSK));
15380 }
15381
15382 static void tg3_read_otp_ver(struct tg3 *tp)
15383 {
15384 u32 val, val2;
15385
15386 if (tg3_asic_rev(tp) != ASIC_REV_5762)
15387 return;
15388
15389 if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
15390 !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
15391 TG3_OTP_MAGIC0_VALID(val)) {
15392 u64 val64 = (u64) val << 32 | val2;
15393 u32 ver = 0;
15394 int i, vlen;
15395
15396 for (i = 0; i < 7; i++) {
15397 if ((val64 & 0xff) == 0)
15398 break;
15399 ver = val64 & 0xff;
15400 val64 >>= 8;
15401 }
15402 vlen = strlen(tp->fw_ver);
15403 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
15404 }
15405 }
15406
15407 static void tg3_read_fw_ver(struct tg3 *tp)
15408 {
15409 u32 val;
15410 bool vpd_vers = false;
15411
15412 if (tp->fw_ver[0] != 0)
15413 vpd_vers = true;
15414
15415 if (tg3_flag(tp, NO_NVRAM)) {
15416 strcat(tp->fw_ver, "sb");
15417 tg3_read_otp_ver(tp);
15418 return;
15419 }
15420
15421 if (tg3_nvram_read(tp, 0, &val))
15422 return;
15423
15424 if (val == TG3_EEPROM_MAGIC)
15425 tg3_read_bc_ver(tp);
15426 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
15427 tg3_read_sb_ver(tp, val);
15428 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
15429 tg3_read_hwsb_ver(tp);
15430
15431 if (tg3_flag(tp, ENABLE_ASF)) {
15432 if (tg3_flag(tp, ENABLE_APE)) {
15433 tg3_probe_ncsi(tp);
15434 if (!vpd_vers)
15435 tg3_read_dash_ver(tp);
15436 } else if (!vpd_vers) {
15437 tg3_read_mgmtfw_ver(tp);
15438 }
15439 }
15440
15441 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
15442 }
15443
15444 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
15445 {
15446 if (tg3_flag(tp, LRG_PROD_RING_CAP))
15447 return TG3_RX_RET_MAX_SIZE_5717;
15448 else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
15449 return TG3_RX_RET_MAX_SIZE_5700;
15450 else
15451 return TG3_RX_RET_MAX_SIZE_5705;
15452 }
15453
15454 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
15455 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
15456 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
15457 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
15458 { },
15459 };
15460
15461 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
15462 {
15463 struct pci_dev *peer;
15464 unsigned int func, devnr = tp->pdev->devfn & ~7;
15465
15466 for (func = 0; func < 8; func++) {
15467 peer = pci_get_slot(tp->pdev->bus, devnr | func);
15468 if (peer && peer != tp->pdev)
15469 break;
15470 pci_dev_put(peer);
15471 }
15472 /* 5704 can be configured in single-port mode, set peer to
15473 * tp->pdev in that case.
15474 */
15475 if (!peer) {
15476 peer = tp->pdev;
15477 return peer;
15478 }
15479
15480 /*
15481 * We don't need to keep the refcount elevated; there's no way
15482 * to remove one half of this device without removing the other
15483 */
15484 pci_dev_put(peer);
15485
15486 return peer;
15487 }
15488
15489 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
15490 {
15491 tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
15492 if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
15493 u32 reg;
15494
15495 /* All devices that use the alternate
15496 * ASIC REV location have a CPMU.
15497 */
15498 tg3_flag_set(tp, CPMU_PRESENT);
15499
15500 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15501 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
15502 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15503 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15504 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
15505 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
15506 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
15507 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727)
15508 reg = TG3PCI_GEN2_PRODID_ASICREV;
15509 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
15510 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
15511 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
15512 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
15513 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
15514 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
15515 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
15516 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
15517 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
15518 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15519 reg = TG3PCI_GEN15_PRODID_ASICREV;
15520 else
15521 reg = TG3PCI_PRODID_ASICREV;
15522
15523 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
15524 }
15525
15526 /* Wrong chip ID in 5752 A0. This code can be removed later
15527 * as A0 is not in production.
15528 */
15529 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
15530 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
15531
15532 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
15533 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
15534
15535 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15536 tg3_asic_rev(tp) == ASIC_REV_5719 ||
15537 tg3_asic_rev(tp) == ASIC_REV_5720)
15538 tg3_flag_set(tp, 5717_PLUS);
15539
15540 if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
15541 tg3_asic_rev(tp) == ASIC_REV_57766)
15542 tg3_flag_set(tp, 57765_CLASS);
15543
15544 if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
15545 tg3_asic_rev(tp) == ASIC_REV_5762)
15546 tg3_flag_set(tp, 57765_PLUS);
15547
15548 /* Intentionally exclude ASIC_REV_5906 */
15549 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
15550 tg3_asic_rev(tp) == ASIC_REV_5787 ||
15551 tg3_asic_rev(tp) == ASIC_REV_5784 ||
15552 tg3_asic_rev(tp) == ASIC_REV_5761 ||
15553 tg3_asic_rev(tp) == ASIC_REV_5785 ||
15554 tg3_asic_rev(tp) == ASIC_REV_57780 ||
15555 tg3_flag(tp, 57765_PLUS))
15556 tg3_flag_set(tp, 5755_PLUS);
15557
15558 if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
15559 tg3_asic_rev(tp) == ASIC_REV_5714)
15560 tg3_flag_set(tp, 5780_CLASS);
15561
15562 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
15563 tg3_asic_rev(tp) == ASIC_REV_5752 ||
15564 tg3_asic_rev(tp) == ASIC_REV_5906 ||
15565 tg3_flag(tp, 5755_PLUS) ||
15566 tg3_flag(tp, 5780_CLASS))
15567 tg3_flag_set(tp, 5750_PLUS);
15568
15569 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
15570 tg3_flag(tp, 5750_PLUS))
15571 tg3_flag_set(tp, 5705_PLUS);
15572 }
15573
15574 static bool tg3_10_100_only_device(struct tg3 *tp,
15575 const struct pci_device_id *ent)
15576 {
15577 u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
15578
15579 if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
15580 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
15581 (tp->phy_flags & TG3_PHYFLG_IS_FET))
15582 return true;
15583
15584 if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
15585 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
15586 if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
15587 return true;
15588 } else {
15589 return true;
15590 }
15591 }
15592
15593 return false;
15594 }
15595
15596 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
15597 {
15598 u32 misc_ctrl_reg;
15599 u32 pci_state_reg, grc_misc_cfg;
15600 u32 val;
15601 u16 pci_cmd;
15602 int err;
15603
15604 /* Force memory write invalidate off. If we leave it on,
15605 * then on 5700_BX chips we have to enable a workaround.
15606 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
15607 * to match the cacheline size. The Broadcom driver have this
15608 * workaround but turns MWI off all the times so never uses
15609 * it. This seems to suggest that the workaround is insufficient.
15610 */
15611 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15612 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
15613 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15614
15615 /* Important! -- Make sure register accesses are byteswapped
15616 * correctly. Also, for those chips that require it, make
15617 * sure that indirect register accesses are enabled before
15618 * the first operation.
15619 */
15620 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15621 &misc_ctrl_reg);
15622 tp->misc_host_ctrl |= (misc_ctrl_reg &
15623 MISC_HOST_CTRL_CHIPREV);
15624 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15625 tp->misc_host_ctrl);
15626
15627 tg3_detect_asic_rev(tp, misc_ctrl_reg);
15628
15629 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
15630 * we need to disable memory and use config. cycles
15631 * only to access all registers. The 5702/03 chips
15632 * can mistakenly decode the special cycles from the
15633 * ICH chipsets as memory write cycles, causing corruption
15634 * of register and memory space. Only certain ICH bridges
15635 * will drive special cycles with non-zero data during the
15636 * address phase which can fall within the 5703's address
15637 * range. This is not an ICH bug as the PCI spec allows
15638 * non-zero address during special cycles. However, only
15639 * these ICH bridges are known to drive non-zero addresses
15640 * during special cycles.
15641 *
15642 * Since special cycles do not cross PCI bridges, we only
15643 * enable this workaround if the 5703 is on the secondary
15644 * bus of these ICH bridges.
15645 */
15646 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
15647 (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
15648 static struct tg3_dev_id {
15649 u32 vendor;
15650 u32 device;
15651 u32 rev;
15652 } ich_chipsets[] = {
15653 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
15654 PCI_ANY_ID },
15655 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
15656 PCI_ANY_ID },
15657 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
15658 0xa },
15659 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
15660 PCI_ANY_ID },
15661 { },
15662 };
15663 struct tg3_dev_id *pci_id = &ich_chipsets[0];
15664 struct pci_dev *bridge = NULL;
15665
15666 while (pci_id->vendor != 0) {
15667 bridge = pci_get_device(pci_id->vendor, pci_id->device,
15668 bridge);
15669 if (!bridge) {
15670 pci_id++;
15671 continue;
15672 }
15673 if (pci_id->rev != PCI_ANY_ID) {
15674 if (bridge->revision > pci_id->rev)
15675 continue;
15676 }
15677 if (bridge->subordinate &&
15678 (bridge->subordinate->number ==
15679 tp->pdev->bus->number)) {
15680 tg3_flag_set(tp, ICH_WORKAROUND);
15681 pci_dev_put(bridge);
15682 break;
15683 }
15684 }
15685 }
15686
15687 if (tg3_asic_rev(tp) == ASIC_REV_5701) {
15688 static struct tg3_dev_id {
15689 u32 vendor;
15690 u32 device;
15691 } bridge_chipsets[] = {
15692 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
15693 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
15694 { },
15695 };
15696 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
15697 struct pci_dev *bridge = NULL;
15698
15699 while (pci_id->vendor != 0) {
15700 bridge = pci_get_device(pci_id->vendor,
15701 pci_id->device,
15702 bridge);
15703 if (!bridge) {
15704 pci_id++;
15705 continue;
15706 }
15707 if (bridge->subordinate &&
15708 (bridge->subordinate->number <=
15709 tp->pdev->bus->number) &&
15710 (bridge->subordinate->busn_res.end >=
15711 tp->pdev->bus->number)) {
15712 tg3_flag_set(tp, 5701_DMA_BUG);
15713 pci_dev_put(bridge);
15714 break;
15715 }
15716 }
15717 }
15718
15719 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
15720 * DMA addresses > 40-bit. This bridge may have other additional
15721 * 57xx devices behind it in some 4-port NIC designs for example.
15722 * Any tg3 device found behind the bridge will also need the 40-bit
15723 * DMA workaround.
15724 */
15725 if (tg3_flag(tp, 5780_CLASS)) {
15726 tg3_flag_set(tp, 40BIT_DMA_BUG);
15727 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
15728 } else {
15729 struct pci_dev *bridge = NULL;
15730
15731 do {
15732 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
15733 PCI_DEVICE_ID_SERVERWORKS_EPB,
15734 bridge);
15735 if (bridge && bridge->subordinate &&
15736 (bridge->subordinate->number <=
15737 tp->pdev->bus->number) &&
15738 (bridge->subordinate->busn_res.end >=
15739 tp->pdev->bus->number)) {
15740 tg3_flag_set(tp, 40BIT_DMA_BUG);
15741 pci_dev_put(bridge);
15742 break;
15743 }
15744 } while (bridge);
15745 }
15746
15747 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
15748 tg3_asic_rev(tp) == ASIC_REV_5714)
15749 tp->pdev_peer = tg3_find_peer(tp);
15750
15751 /* Determine TSO capabilities */
15752 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
15753 ; /* Do nothing. HW bug. */
15754 else if (tg3_flag(tp, 57765_PLUS))
15755 tg3_flag_set(tp, HW_TSO_3);
15756 else if (tg3_flag(tp, 5755_PLUS) ||
15757 tg3_asic_rev(tp) == ASIC_REV_5906)
15758 tg3_flag_set(tp, HW_TSO_2);
15759 else if (tg3_flag(tp, 5750_PLUS)) {
15760 tg3_flag_set(tp, HW_TSO_1);
15761 tg3_flag_set(tp, TSO_BUG);
15762 if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
15763 tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
15764 tg3_flag_clear(tp, TSO_BUG);
15765 } else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15766 tg3_asic_rev(tp) != ASIC_REV_5701 &&
15767 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
15768 tg3_flag_set(tp, FW_TSO);
15769 tg3_flag_set(tp, TSO_BUG);
15770 if (tg3_asic_rev(tp) == ASIC_REV_5705)
15771 tp->fw_needed = FIRMWARE_TG3TSO5;
15772 else
15773 tp->fw_needed = FIRMWARE_TG3TSO;
15774 }
15775
15776 /* Selectively allow TSO based on operating conditions */
15777 if (tg3_flag(tp, HW_TSO_1) ||
15778 tg3_flag(tp, HW_TSO_2) ||
15779 tg3_flag(tp, HW_TSO_3) ||
15780 tg3_flag(tp, FW_TSO)) {
15781 /* For firmware TSO, assume ASF is disabled.
15782 * We'll disable TSO later if we discover ASF
15783 * is enabled in tg3_get_eeprom_hw_cfg().
15784 */
15785 tg3_flag_set(tp, TSO_CAPABLE);
15786 } else {
15787 tg3_flag_clear(tp, TSO_CAPABLE);
15788 tg3_flag_clear(tp, TSO_BUG);
15789 tp->fw_needed = NULL;
15790 }
15791
15792 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
15793 tp->fw_needed = FIRMWARE_TG3;
15794
15795 if (tg3_asic_rev(tp) == ASIC_REV_57766)
15796 tp->fw_needed = FIRMWARE_TG357766;
15797
15798 tp->irq_max = 1;
15799
15800 if (tg3_flag(tp, 5750_PLUS)) {
15801 tg3_flag_set(tp, SUPPORT_MSI);
15802 if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
15803 tg3_chip_rev(tp) == CHIPREV_5750_BX ||
15804 (tg3_asic_rev(tp) == ASIC_REV_5714 &&
15805 tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
15806 tp->pdev_peer == tp->pdev))
15807 tg3_flag_clear(tp, SUPPORT_MSI);
15808
15809 if (tg3_flag(tp, 5755_PLUS) ||
15810 tg3_asic_rev(tp) == ASIC_REV_5906) {
15811 tg3_flag_set(tp, 1SHOT_MSI);
15812 }
15813
15814 if (tg3_flag(tp, 57765_PLUS)) {
15815 tg3_flag_set(tp, SUPPORT_MSIX);
15816 tp->irq_max = TG3_IRQ_MAX_VECS;
15817 }
15818 }
15819
15820 tp->txq_max = 1;
15821 tp->rxq_max = 1;
15822 if (tp->irq_max > 1) {
15823 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
15824 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
15825
15826 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15827 tg3_asic_rev(tp) == ASIC_REV_5720)
15828 tp->txq_max = tp->irq_max - 1;
15829 }
15830
15831 if (tg3_flag(tp, 5755_PLUS) ||
15832 tg3_asic_rev(tp) == ASIC_REV_5906)
15833 tg3_flag_set(tp, SHORT_DMA_BUG);
15834
15835 if (tg3_asic_rev(tp) == ASIC_REV_5719)
15836 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
15837
15838 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15839 tg3_asic_rev(tp) == ASIC_REV_5719 ||
15840 tg3_asic_rev(tp) == ASIC_REV_5720 ||
15841 tg3_asic_rev(tp) == ASIC_REV_5762)
15842 tg3_flag_set(tp, LRG_PROD_RING_CAP);
15843
15844 if (tg3_flag(tp, 57765_PLUS) &&
15845 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
15846 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
15847
15848 if (!tg3_flag(tp, 5705_PLUS) ||
15849 tg3_flag(tp, 5780_CLASS) ||
15850 tg3_flag(tp, USE_JUMBO_BDFLAG))
15851 tg3_flag_set(tp, JUMBO_CAPABLE);
15852
15853 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
15854 &pci_state_reg);
15855
15856 if (pci_is_pcie(tp->pdev)) {
15857 u16 lnkctl;
15858
15859 tg3_flag_set(tp, PCI_EXPRESS);
15860
15861 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
15862 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
15863 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15864 tg3_flag_clear(tp, HW_TSO_2);
15865 tg3_flag_clear(tp, TSO_CAPABLE);
15866 }
15867 if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
15868 tg3_asic_rev(tp) == ASIC_REV_5761 ||
15869 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
15870 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
15871 tg3_flag_set(tp, CLKREQ_BUG);
15872 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
15873 tg3_flag_set(tp, L1PLLPD_EN);
15874 }
15875 } else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
15876 /* BCM5785 devices are effectively PCIe devices, and should
15877 * follow PCIe codepaths, but do not have a PCIe capabilities
15878 * section.
15879 */
15880 tg3_flag_set(tp, PCI_EXPRESS);
15881 } else if (!tg3_flag(tp, 5705_PLUS) ||
15882 tg3_flag(tp, 5780_CLASS)) {
15883 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
15884 if (!tp->pcix_cap) {
15885 dev_err(&tp->pdev->dev,
15886 "Cannot find PCI-X capability, aborting\n");
15887 return -EIO;
15888 }
15889
15890 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
15891 tg3_flag_set(tp, PCIX_MODE);
15892 }
15893
15894 /* If we have an AMD 762 or VIA K8T800 chipset, write
15895 * reordering to the mailbox registers done by the host
15896 * controller can cause major troubles. We read back from
15897 * every mailbox register write to force the writes to be
15898 * posted to the chip in order.
15899 */
15900 if (pci_dev_present(tg3_write_reorder_chipsets) &&
15901 !tg3_flag(tp, PCI_EXPRESS))
15902 tg3_flag_set(tp, MBOX_WRITE_REORDER);
15903
15904 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
15905 &tp->pci_cacheline_sz);
15906 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
15907 &tp->pci_lat_timer);
15908 if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
15909 tp->pci_lat_timer < 64) {
15910 tp->pci_lat_timer = 64;
15911 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
15912 tp->pci_lat_timer);
15913 }
15914
15915 /* Important! -- It is critical that the PCI-X hw workaround
15916 * situation is decided before the first MMIO register access.
15917 */
15918 if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
15919 /* 5700 BX chips need to have their TX producer index
15920 * mailboxes written twice to workaround a bug.
15921 */
15922 tg3_flag_set(tp, TXD_MBOX_HWBUG);
15923
15924 /* If we are in PCI-X mode, enable register write workaround.
15925 *
15926 * The workaround is to use indirect register accesses
15927 * for all chip writes not to mailbox registers.
15928 */
15929 if (tg3_flag(tp, PCIX_MODE)) {
15930 u32 pm_reg;
15931
15932 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
15933
15934 /* The chip can have it's power management PCI config
15935 * space registers clobbered due to this bug.
15936 * So explicitly force the chip into D0 here.
15937 */
15938 pci_read_config_dword(tp->pdev,
15939 tp->pm_cap + PCI_PM_CTRL,
15940 &pm_reg);
15941 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
15942 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
15943 pci_write_config_dword(tp->pdev,
15944 tp->pm_cap + PCI_PM_CTRL,
15945 pm_reg);
15946
15947 /* Also, force SERR#/PERR# in PCI command. */
15948 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15949 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
15950 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15951 }
15952 }
15953
15954 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
15955 tg3_flag_set(tp, PCI_HIGH_SPEED);
15956 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
15957 tg3_flag_set(tp, PCI_32BIT);
15958
15959 /* Chip-specific fixup from Broadcom driver */
15960 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
15961 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
15962 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
15963 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
15964 }
15965
15966 /* Default fast path register access methods */
15967 tp->read32 = tg3_read32;
15968 tp->write32 = tg3_write32;
15969 tp->read32_mbox = tg3_read32;
15970 tp->write32_mbox = tg3_write32;
15971 tp->write32_tx_mbox = tg3_write32;
15972 tp->write32_rx_mbox = tg3_write32;
15973
15974 /* Various workaround register access methods */
15975 if (tg3_flag(tp, PCIX_TARGET_HWBUG))
15976 tp->write32 = tg3_write_indirect_reg32;
15977 else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
15978 (tg3_flag(tp, PCI_EXPRESS) &&
15979 tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
15980 /*
15981 * Back to back register writes can cause problems on these
15982 * chips, the workaround is to read back all reg writes
15983 * except those to mailbox regs.
15984 *
15985 * See tg3_write_indirect_reg32().
15986 */
15987 tp->write32 = tg3_write_flush_reg32;
15988 }
15989
15990 if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
15991 tp->write32_tx_mbox = tg3_write32_tx_mbox;
15992 if (tg3_flag(tp, MBOX_WRITE_REORDER))
15993 tp->write32_rx_mbox = tg3_write_flush_reg32;
15994 }
15995
15996 if (tg3_flag(tp, ICH_WORKAROUND)) {
15997 tp->read32 = tg3_read_indirect_reg32;
15998 tp->write32 = tg3_write_indirect_reg32;
15999 tp->read32_mbox = tg3_read_indirect_mbox;
16000 tp->write32_mbox = tg3_write_indirect_mbox;
16001 tp->write32_tx_mbox = tg3_write_indirect_mbox;
16002 tp->write32_rx_mbox = tg3_write_indirect_mbox;
16003
16004 iounmap(tp->regs);
16005 tp->regs = NULL;
16006
16007 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16008 pci_cmd &= ~PCI_COMMAND_MEMORY;
16009 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16010 }
16011 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16012 tp->read32_mbox = tg3_read32_mbox_5906;
16013 tp->write32_mbox = tg3_write32_mbox_5906;
16014 tp->write32_tx_mbox = tg3_write32_mbox_5906;
16015 tp->write32_rx_mbox = tg3_write32_mbox_5906;
16016 }
16017
16018 if (tp->write32 == tg3_write_indirect_reg32 ||
16019 (tg3_flag(tp, PCIX_MODE) &&
16020 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16021 tg3_asic_rev(tp) == ASIC_REV_5701)))
16022 tg3_flag_set(tp, SRAM_USE_CONFIG);
16023
16024 /* The memory arbiter has to be enabled in order for SRAM accesses
16025 * to succeed. Normally on powerup the tg3 chip firmware will make
16026 * sure it is enabled, but other entities such as system netboot
16027 * code might disable it.
16028 */
16029 val = tr32(MEMARB_MODE);
16030 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
16031
16032 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
16033 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16034 tg3_flag(tp, 5780_CLASS)) {
16035 if (tg3_flag(tp, PCIX_MODE)) {
16036 pci_read_config_dword(tp->pdev,
16037 tp->pcix_cap + PCI_X_STATUS,
16038 &val);
16039 tp->pci_fn = val & 0x7;
16040 }
16041 } else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16042 tg3_asic_rev(tp) == ASIC_REV_5719 ||
16043 tg3_asic_rev(tp) == ASIC_REV_5720) {
16044 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
16045 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
16046 val = tr32(TG3_CPMU_STATUS);
16047
16048 if (tg3_asic_rev(tp) == ASIC_REV_5717)
16049 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
16050 else
16051 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
16052 TG3_CPMU_STATUS_FSHFT_5719;
16053 }
16054
16055 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
16056 tp->write32_tx_mbox = tg3_write_flush_reg32;
16057 tp->write32_rx_mbox = tg3_write_flush_reg32;
16058 }
16059
16060 /* Get eeprom hw config before calling tg3_set_power_state().
16061 * In particular, the TG3_FLAG_IS_NIC flag must be
16062 * determined before calling tg3_set_power_state() so that
16063 * we know whether or not to switch out of Vaux power.
16064 * When the flag is set, it means that GPIO1 is used for eeprom
16065 * write protect and also implies that it is a LOM where GPIOs
16066 * are not used to switch power.
16067 */
16068 tg3_get_eeprom_hw_cfg(tp);
16069
16070 if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
16071 tg3_flag_clear(tp, TSO_CAPABLE);
16072 tg3_flag_clear(tp, TSO_BUG);
16073 tp->fw_needed = NULL;
16074 }
16075
16076 if (tg3_flag(tp, ENABLE_APE)) {
16077 /* Allow reads and writes to the
16078 * APE register and memory space.
16079 */
16080 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
16081 PCISTATE_ALLOW_APE_SHMEM_WR |
16082 PCISTATE_ALLOW_APE_PSPACE_WR;
16083 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
16084 pci_state_reg);
16085
16086 tg3_ape_lock_init(tp);
16087 }
16088
16089 /* Set up tp->grc_local_ctrl before calling
16090 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
16091 * will bring 5700's external PHY out of reset.
16092 * It is also used as eeprom write protect on LOMs.
16093 */
16094 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
16095 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16096 tg3_flag(tp, EEPROM_WRITE_PROT))
16097 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
16098 GRC_LCLCTRL_GPIO_OUTPUT1);
16099 /* Unused GPIO3 must be driven as output on 5752 because there
16100 * are no pull-up resistors on unused GPIO pins.
16101 */
16102 else if (tg3_asic_rev(tp) == ASIC_REV_5752)
16103 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
16104
16105 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16106 tg3_asic_rev(tp) == ASIC_REV_57780 ||
16107 tg3_flag(tp, 57765_CLASS))
16108 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16109
16110 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16111 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
16112 /* Turn off the debug UART. */
16113 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16114 if (tg3_flag(tp, IS_NIC))
16115 /* Keep VMain power. */
16116 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
16117 GRC_LCLCTRL_GPIO_OUTPUT0;
16118 }
16119
16120 if (tg3_asic_rev(tp) == ASIC_REV_5762)
16121 tp->grc_local_ctrl |=
16122 tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
16123
16124 /* Switch out of Vaux if it is a NIC */
16125 tg3_pwrsrc_switch_to_vmain(tp);
16126
16127 /* Derive initial jumbo mode from MTU assigned in
16128 * ether_setup() via the alloc_etherdev() call
16129 */
16130 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
16131 tg3_flag_set(tp, JUMBO_RING_ENABLE);
16132
16133 /* Determine WakeOnLan speed to use. */
16134 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16135 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16136 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16137 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
16138 tg3_flag_clear(tp, WOL_SPEED_100MB);
16139 } else {
16140 tg3_flag_set(tp, WOL_SPEED_100MB);
16141 }
16142
16143 if (tg3_asic_rev(tp) == ASIC_REV_5906)
16144 tp->phy_flags |= TG3_PHYFLG_IS_FET;
16145
16146 /* A few boards don't want Ethernet@WireSpeed phy feature */
16147 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16148 (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16149 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
16150 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
16151 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
16152 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
16153 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
16154
16155 if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
16156 tg3_chip_rev(tp) == CHIPREV_5704_AX)
16157 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
16158 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
16159 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
16160
16161 if (tg3_flag(tp, 5705_PLUS) &&
16162 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
16163 tg3_asic_rev(tp) != ASIC_REV_5785 &&
16164 tg3_asic_rev(tp) != ASIC_REV_57780 &&
16165 !tg3_flag(tp, 57765_PLUS)) {
16166 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16167 tg3_asic_rev(tp) == ASIC_REV_5787 ||
16168 tg3_asic_rev(tp) == ASIC_REV_5784 ||
16169 tg3_asic_rev(tp) == ASIC_REV_5761) {
16170 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
16171 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
16172 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
16173 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
16174 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
16175 } else
16176 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
16177 }
16178
16179 if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
16180 tg3_chip_rev(tp) != CHIPREV_5784_AX) {
16181 tp->phy_otp = tg3_read_otp_phycfg(tp);
16182 if (tp->phy_otp == 0)
16183 tp->phy_otp = TG3_OTP_DEFAULT;
16184 }
16185
16186 if (tg3_flag(tp, CPMU_PRESENT))
16187 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
16188 else
16189 tp->mi_mode = MAC_MI_MODE_BASE;
16190
16191 tp->coalesce_mode = 0;
16192 if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
16193 tg3_chip_rev(tp) != CHIPREV_5700_BX)
16194 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
16195
16196 /* Set these bits to enable statistics workaround. */
16197 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16198 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
16199 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
16200 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
16201 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
16202 }
16203
16204 if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
16205 tg3_asic_rev(tp) == ASIC_REV_57780)
16206 tg3_flag_set(tp, USE_PHYLIB);
16207
16208 err = tg3_mdio_init(tp);
16209 if (err)
16210 return err;
16211
16212 /* Initialize data/descriptor byte/word swapping. */
16213 val = tr32(GRC_MODE);
16214 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
16215 tg3_asic_rev(tp) == ASIC_REV_5762)
16216 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
16217 GRC_MODE_WORD_SWAP_B2HRX_DATA |
16218 GRC_MODE_B2HRX_ENABLE |
16219 GRC_MODE_HTX2B_ENABLE |
16220 GRC_MODE_HOST_STACKUP);
16221 else
16222 val &= GRC_MODE_HOST_STACKUP;
16223
16224 tw32(GRC_MODE, val | tp->grc_mode);
16225
16226 tg3_switch_clocks(tp);
16227
16228 /* Clear this out for sanity. */
16229 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
16230
16231 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16232 &pci_state_reg);
16233 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
16234 !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
16235 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16236 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16237 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
16238 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
16239 void __iomem *sram_base;
16240
16241 /* Write some dummy words into the SRAM status block
16242 * area, see if it reads back correctly. If the return
16243 * value is bad, force enable the PCIX workaround.
16244 */
16245 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
16246
16247 writel(0x00000000, sram_base);
16248 writel(0x00000000, sram_base + 4);
16249 writel(0xffffffff, sram_base + 4);
16250 if (readl(sram_base) != 0x00000000)
16251 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16252 }
16253 }
16254
16255 udelay(50);
16256 tg3_nvram_init(tp);
16257
16258 /* If the device has an NVRAM, no need to load patch firmware */
16259 if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
16260 !tg3_flag(tp, NO_NVRAM))
16261 tp->fw_needed = NULL;
16262
16263 grc_misc_cfg = tr32(GRC_MISC_CFG);
16264 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
16265
16266 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16267 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
16268 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
16269 tg3_flag_set(tp, IS_5788);
16270
16271 if (!tg3_flag(tp, IS_5788) &&
16272 tg3_asic_rev(tp) != ASIC_REV_5700)
16273 tg3_flag_set(tp, TAGGED_STATUS);
16274 if (tg3_flag(tp, TAGGED_STATUS)) {
16275 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
16276 HOSTCC_MODE_CLRTICK_TXBD);
16277
16278 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
16279 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16280 tp->misc_host_ctrl);
16281 }
16282
16283 /* Preserve the APE MAC_MODE bits */
16284 if (tg3_flag(tp, ENABLE_APE))
16285 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
16286 else
16287 tp->mac_mode = 0;
16288
16289 if (tg3_10_100_only_device(tp, ent))
16290 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
16291
16292 err = tg3_phy_probe(tp);
16293 if (err) {
16294 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
16295 /* ... but do not return immediately ... */
16296 tg3_mdio_fini(tp);
16297 }
16298
16299 tg3_read_vpd(tp);
16300 tg3_read_fw_ver(tp);
16301
16302 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
16303 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16304 } else {
16305 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16306 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16307 else
16308 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16309 }
16310
16311 /* 5700 {AX,BX} chips have a broken status block link
16312 * change bit implementation, so we must use the
16313 * status register in those cases.
16314 */
16315 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16316 tg3_flag_set(tp, USE_LINKCHG_REG);
16317 else
16318 tg3_flag_clear(tp, USE_LINKCHG_REG);
16319
16320 /* The led_ctrl is set during tg3_phy_probe, here we might
16321 * have to force the link status polling mechanism based
16322 * upon subsystem IDs.
16323 */
16324 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
16325 tg3_asic_rev(tp) == ASIC_REV_5701 &&
16326 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
16327 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16328 tg3_flag_set(tp, USE_LINKCHG_REG);
16329 }
16330
16331 /* For all SERDES we poll the MAC status register. */
16332 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
16333 tg3_flag_set(tp, POLL_SERDES);
16334 else
16335 tg3_flag_clear(tp, POLL_SERDES);
16336
16337 tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
16338 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
16339 if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
16340 tg3_flag(tp, PCIX_MODE)) {
16341 tp->rx_offset = NET_SKB_PAD;
16342 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
16343 tp->rx_copy_thresh = ~(u16)0;
16344 #endif
16345 }
16346
16347 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
16348 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
16349 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
16350
16351 tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
16352
16353 /* Increment the rx prod index on the rx std ring by at most
16354 * 8 for these chips to workaround hw errata.
16355 */
16356 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16357 tg3_asic_rev(tp) == ASIC_REV_5752 ||
16358 tg3_asic_rev(tp) == ASIC_REV_5755)
16359 tp->rx_std_max_post = 8;
16360
16361 if (tg3_flag(tp, ASPM_WORKAROUND))
16362 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
16363 PCIE_PWR_MGMT_L1_THRESH_MSK;
16364
16365 return err;
16366 }
16367
16368 #ifdef CONFIG_SPARC
16369 static int tg3_get_macaddr_sparc(struct tg3 *tp)
16370 {
16371 struct net_device *dev = tp->dev;
16372 struct pci_dev *pdev = tp->pdev;
16373 struct device_node *dp = pci_device_to_OF_node(pdev);
16374 const unsigned char *addr;
16375 int len;
16376
16377 addr = of_get_property(dp, "local-mac-address", &len);
16378 if (addr && len == 6) {
16379 memcpy(dev->dev_addr, addr, 6);
16380 return 0;
16381 }
16382 return -ENODEV;
16383 }
16384
16385 static int tg3_get_default_macaddr_sparc(struct tg3 *tp)
16386 {
16387 struct net_device *dev = tp->dev;
16388
16389 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
16390 return 0;
16391 }
16392 #endif
16393
16394 static int tg3_get_device_address(struct tg3 *tp)
16395 {
16396 struct net_device *dev = tp->dev;
16397 u32 hi, lo, mac_offset;
16398 int addr_ok = 0;
16399 int err;
16400
16401 #ifdef CONFIG_SPARC
16402 if (!tg3_get_macaddr_sparc(tp))
16403 return 0;
16404 #endif
16405
16406 if (tg3_flag(tp, IS_SSB_CORE)) {
16407 err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]);
16408 if (!err && is_valid_ether_addr(&dev->dev_addr[0]))
16409 return 0;
16410 }
16411
16412 mac_offset = 0x7c;
16413 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16414 tg3_flag(tp, 5780_CLASS)) {
16415 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
16416 mac_offset = 0xcc;
16417 if (tg3_nvram_lock(tp))
16418 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
16419 else
16420 tg3_nvram_unlock(tp);
16421 } else if (tg3_flag(tp, 5717_PLUS)) {
16422 if (tp->pci_fn & 1)
16423 mac_offset = 0xcc;
16424 if (tp->pci_fn > 1)
16425 mac_offset += 0x18c;
16426 } else if (tg3_asic_rev(tp) == ASIC_REV_5906)
16427 mac_offset = 0x10;
16428
16429 /* First try to get it from MAC address mailbox. */
16430 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
16431 if ((hi >> 16) == 0x484b) {
16432 dev->dev_addr[0] = (hi >> 8) & 0xff;
16433 dev->dev_addr[1] = (hi >> 0) & 0xff;
16434
16435 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
16436 dev->dev_addr[2] = (lo >> 24) & 0xff;
16437 dev->dev_addr[3] = (lo >> 16) & 0xff;
16438 dev->dev_addr[4] = (lo >> 8) & 0xff;
16439 dev->dev_addr[5] = (lo >> 0) & 0xff;
16440
16441 /* Some old bootcode may report a 0 MAC address in SRAM */
16442 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
16443 }
16444 if (!addr_ok) {
16445 /* Next, try NVRAM. */
16446 if (!tg3_flag(tp, NO_NVRAM) &&
16447 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
16448 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
16449 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
16450 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
16451 }
16452 /* Finally just fetch it out of the MAC control regs. */
16453 else {
16454 hi = tr32(MAC_ADDR_0_HIGH);
16455 lo = tr32(MAC_ADDR_0_LOW);
16456
16457 dev->dev_addr[5] = lo & 0xff;
16458 dev->dev_addr[4] = (lo >> 8) & 0xff;
16459 dev->dev_addr[3] = (lo >> 16) & 0xff;
16460 dev->dev_addr[2] = (lo >> 24) & 0xff;
16461 dev->dev_addr[1] = hi & 0xff;
16462 dev->dev_addr[0] = (hi >> 8) & 0xff;
16463 }
16464 }
16465
16466 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
16467 #ifdef CONFIG_SPARC
16468 if (!tg3_get_default_macaddr_sparc(tp))
16469 return 0;
16470 #endif
16471 return -EINVAL;
16472 }
16473 return 0;
16474 }
16475
16476 #define BOUNDARY_SINGLE_CACHELINE 1
16477 #define BOUNDARY_MULTI_CACHELINE 2
16478
16479 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
16480 {
16481 int cacheline_size;
16482 u8 byte;
16483 int goal;
16484
16485 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
16486 if (byte == 0)
16487 cacheline_size = 1024;
16488 else
16489 cacheline_size = (int) byte * 4;
16490
16491 /* On 5703 and later chips, the boundary bits have no
16492 * effect.
16493 */
16494 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16495 tg3_asic_rev(tp) != ASIC_REV_5701 &&
16496 !tg3_flag(tp, PCI_EXPRESS))
16497 goto out;
16498
16499 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
16500 goal = BOUNDARY_MULTI_CACHELINE;
16501 #else
16502 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
16503 goal = BOUNDARY_SINGLE_CACHELINE;
16504 #else
16505 goal = 0;
16506 #endif
16507 #endif
16508
16509 if (tg3_flag(tp, 57765_PLUS)) {
16510 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
16511 goto out;
16512 }
16513
16514 if (!goal)
16515 goto out;
16516
16517 /* PCI controllers on most RISC systems tend to disconnect
16518 * when a device tries to burst across a cache-line boundary.
16519 * Therefore, letting tg3 do so just wastes PCI bandwidth.
16520 *
16521 * Unfortunately, for PCI-E there are only limited
16522 * write-side controls for this, and thus for reads
16523 * we will still get the disconnects. We'll also waste
16524 * these PCI cycles for both read and write for chips
16525 * other than 5700 and 5701 which do not implement the
16526 * boundary bits.
16527 */
16528 if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
16529 switch (cacheline_size) {
16530 case 16:
16531 case 32:
16532 case 64:
16533 case 128:
16534 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16535 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
16536 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
16537 } else {
16538 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
16539 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
16540 }
16541 break;
16542
16543 case 256:
16544 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
16545 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
16546 break;
16547
16548 default:
16549 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
16550 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
16551 break;
16552 }
16553 } else if (tg3_flag(tp, PCI_EXPRESS)) {
16554 switch (cacheline_size) {
16555 case 16:
16556 case 32:
16557 case 64:
16558 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16559 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
16560 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
16561 break;
16562 }
16563 /* fallthrough */
16564 case 128:
16565 default:
16566 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
16567 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
16568 break;
16569 }
16570 } else {
16571 switch (cacheline_size) {
16572 case 16:
16573 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16574 val |= (DMA_RWCTRL_READ_BNDRY_16 |
16575 DMA_RWCTRL_WRITE_BNDRY_16);
16576 break;
16577 }
16578 /* fallthrough */
16579 case 32:
16580 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16581 val |= (DMA_RWCTRL_READ_BNDRY_32 |
16582 DMA_RWCTRL_WRITE_BNDRY_32);
16583 break;
16584 }
16585 /* fallthrough */
16586 case 64:
16587 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16588 val |= (DMA_RWCTRL_READ_BNDRY_64 |
16589 DMA_RWCTRL_WRITE_BNDRY_64);
16590 break;
16591 }
16592 /* fallthrough */
16593 case 128:
16594 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16595 val |= (DMA_RWCTRL_READ_BNDRY_128 |
16596 DMA_RWCTRL_WRITE_BNDRY_128);
16597 break;
16598 }
16599 /* fallthrough */
16600 case 256:
16601 val |= (DMA_RWCTRL_READ_BNDRY_256 |
16602 DMA_RWCTRL_WRITE_BNDRY_256);
16603 break;
16604 case 512:
16605 val |= (DMA_RWCTRL_READ_BNDRY_512 |
16606 DMA_RWCTRL_WRITE_BNDRY_512);
16607 break;
16608 case 1024:
16609 default:
16610 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
16611 DMA_RWCTRL_WRITE_BNDRY_1024);
16612 break;
16613 }
16614 }
16615
16616 out:
16617 return val;
16618 }
16619
16620 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
16621 int size, bool to_device)
16622 {
16623 struct tg3_internal_buffer_desc test_desc;
16624 u32 sram_dma_descs;
16625 int i, ret;
16626
16627 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
16628
16629 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
16630 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
16631 tw32(RDMAC_STATUS, 0);
16632 tw32(WDMAC_STATUS, 0);
16633
16634 tw32(BUFMGR_MODE, 0);
16635 tw32(FTQ_RESET, 0);
16636
16637 test_desc.addr_hi = ((u64) buf_dma) >> 32;
16638 test_desc.addr_lo = buf_dma & 0xffffffff;
16639 test_desc.nic_mbuf = 0x00002100;
16640 test_desc.len = size;
16641
16642 /*
16643 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
16644 * the *second* time the tg3 driver was getting loaded after an
16645 * initial scan.
16646 *
16647 * Broadcom tells me:
16648 * ...the DMA engine is connected to the GRC block and a DMA
16649 * reset may affect the GRC block in some unpredictable way...
16650 * The behavior of resets to individual blocks has not been tested.
16651 *
16652 * Broadcom noted the GRC reset will also reset all sub-components.
16653 */
16654 if (to_device) {
16655 test_desc.cqid_sqid = (13 << 8) | 2;
16656
16657 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
16658 udelay(40);
16659 } else {
16660 test_desc.cqid_sqid = (16 << 8) | 7;
16661
16662 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
16663 udelay(40);
16664 }
16665 test_desc.flags = 0x00000005;
16666
16667 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
16668 u32 val;
16669
16670 val = *(((u32 *)&test_desc) + i);
16671 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
16672 sram_dma_descs + (i * sizeof(u32)));
16673 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
16674 }
16675 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
16676
16677 if (to_device)
16678 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
16679 else
16680 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
16681
16682 ret = -ENODEV;
16683 for (i = 0; i < 40; i++) {
16684 u32 val;
16685
16686 if (to_device)
16687 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
16688 else
16689 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
16690 if ((val & 0xffff) == sram_dma_descs) {
16691 ret = 0;
16692 break;
16693 }
16694
16695 udelay(100);
16696 }
16697
16698 return ret;
16699 }
16700
16701 #define TEST_BUFFER_SIZE 0x2000
16702
16703 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
16704 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
16705 { },
16706 };
16707
16708 static int tg3_test_dma(struct tg3 *tp)
16709 {
16710 dma_addr_t buf_dma;
16711 u32 *buf, saved_dma_rwctrl;
16712 int ret = 0;
16713
16714 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
16715 &buf_dma, GFP_KERNEL);
16716 if (!buf) {
16717 ret = -ENOMEM;
16718 goto out_nofree;
16719 }
16720
16721 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
16722 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
16723
16724 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
16725
16726 if (tg3_flag(tp, 57765_PLUS))
16727 goto out;
16728
16729 if (tg3_flag(tp, PCI_EXPRESS)) {
16730 /* DMA read watermark not used on PCIE */
16731 tp->dma_rwctrl |= 0x00180000;
16732 } else if (!tg3_flag(tp, PCIX_MODE)) {
16733 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
16734 tg3_asic_rev(tp) == ASIC_REV_5750)
16735 tp->dma_rwctrl |= 0x003f0000;
16736 else
16737 tp->dma_rwctrl |= 0x003f000f;
16738 } else {
16739 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
16740 tg3_asic_rev(tp) == ASIC_REV_5704) {
16741 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
16742 u32 read_water = 0x7;
16743
16744 /* If the 5704 is behind the EPB bridge, we can
16745 * do the less restrictive ONE_DMA workaround for
16746 * better performance.
16747 */
16748 if (tg3_flag(tp, 40BIT_DMA_BUG) &&
16749 tg3_asic_rev(tp) == ASIC_REV_5704)
16750 tp->dma_rwctrl |= 0x8000;
16751 else if (ccval == 0x6 || ccval == 0x7)
16752 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
16753
16754 if (tg3_asic_rev(tp) == ASIC_REV_5703)
16755 read_water = 4;
16756 /* Set bit 23 to enable PCIX hw bug fix */
16757 tp->dma_rwctrl |=
16758 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
16759 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
16760 (1 << 23);
16761 } else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
16762 /* 5780 always in PCIX mode */
16763 tp->dma_rwctrl |= 0x00144000;
16764 } else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
16765 /* 5714 always in PCIX mode */
16766 tp->dma_rwctrl |= 0x00148000;
16767 } else {
16768 tp->dma_rwctrl |= 0x001b000f;
16769 }
16770 }
16771 if (tg3_flag(tp, ONE_DMA_AT_ONCE))
16772 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
16773
16774 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
16775 tg3_asic_rev(tp) == ASIC_REV_5704)
16776 tp->dma_rwctrl &= 0xfffffff0;
16777
16778 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16779 tg3_asic_rev(tp) == ASIC_REV_5701) {
16780 /* Remove this if it causes problems for some boards. */
16781 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
16782
16783 /* On 5700/5701 chips, we need to set this bit.
16784 * Otherwise the chip will issue cacheline transactions
16785 * to streamable DMA memory with not all the byte
16786 * enables turned on. This is an error on several
16787 * RISC PCI controllers, in particular sparc64.
16788 *
16789 * On 5703/5704 chips, this bit has been reassigned
16790 * a different meaning. In particular, it is used
16791 * on those chips to enable a PCI-X workaround.
16792 */
16793 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
16794 }
16795
16796 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16797
16798 #if 0
16799 /* Unneeded, already done by tg3_get_invariants. */
16800 tg3_switch_clocks(tp);
16801 #endif
16802
16803 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16804 tg3_asic_rev(tp) != ASIC_REV_5701)
16805 goto out;
16806
16807 /* It is best to perform DMA test with maximum write burst size
16808 * to expose the 5700/5701 write DMA bug.
16809 */
16810 saved_dma_rwctrl = tp->dma_rwctrl;
16811 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16812 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16813
16814 while (1) {
16815 u32 *p = buf, i;
16816
16817 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
16818 p[i] = i;
16819
16820 /* Send the buffer to the chip. */
16821 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true);
16822 if (ret) {
16823 dev_err(&tp->pdev->dev,
16824 "%s: Buffer write failed. err = %d\n",
16825 __func__, ret);
16826 break;
16827 }
16828
16829 #if 0
16830 /* validate data reached card RAM correctly. */
16831 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
16832 u32 val;
16833 tg3_read_mem(tp, 0x2100 + (i*4), &val);
16834 if (le32_to_cpu(val) != p[i]) {
16835 dev_err(&tp->pdev->dev,
16836 "%s: Buffer corrupted on device! "
16837 "(%d != %d)\n", __func__, val, i);
16838 /* ret = -ENODEV here? */
16839 }
16840 p[i] = 0;
16841 }
16842 #endif
16843 /* Now read it back. */
16844 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false);
16845 if (ret) {
16846 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
16847 "err = %d\n", __func__, ret);
16848 break;
16849 }
16850
16851 /* Verify it. */
16852 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
16853 if (p[i] == i)
16854 continue;
16855
16856 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
16857 DMA_RWCTRL_WRITE_BNDRY_16) {
16858 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16859 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
16860 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16861 break;
16862 } else {
16863 dev_err(&tp->pdev->dev,
16864 "%s: Buffer corrupted on read back! "
16865 "(%d != %d)\n", __func__, p[i], i);
16866 ret = -ENODEV;
16867 goto out;
16868 }
16869 }
16870
16871 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
16872 /* Success. */
16873 ret = 0;
16874 break;
16875 }
16876 }
16877 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
16878 DMA_RWCTRL_WRITE_BNDRY_16) {
16879 /* DMA test passed without adjusting DMA boundary,
16880 * now look for chipsets that are known to expose the
16881 * DMA bug without failing the test.
16882 */
16883 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
16884 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16885 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
16886 } else {
16887 /* Safe to use the calculated DMA boundary. */
16888 tp->dma_rwctrl = saved_dma_rwctrl;
16889 }
16890
16891 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16892 }
16893
16894 out:
16895 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
16896 out_nofree:
16897 return ret;
16898 }
16899
16900 static void tg3_init_bufmgr_config(struct tg3 *tp)
16901 {
16902 if (tg3_flag(tp, 57765_PLUS)) {
16903 tp->bufmgr_config.mbuf_read_dma_low_water =
16904 DEFAULT_MB_RDMA_LOW_WATER_5705;
16905 tp->bufmgr_config.mbuf_mac_rx_low_water =
16906 DEFAULT_MB_MACRX_LOW_WATER_57765;
16907 tp->bufmgr_config.mbuf_high_water =
16908 DEFAULT_MB_HIGH_WATER_57765;
16909
16910 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16911 DEFAULT_MB_RDMA_LOW_WATER_5705;
16912 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16913 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
16914 tp->bufmgr_config.mbuf_high_water_jumbo =
16915 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
16916 } else if (tg3_flag(tp, 5705_PLUS)) {
16917 tp->bufmgr_config.mbuf_read_dma_low_water =
16918 DEFAULT_MB_RDMA_LOW_WATER_5705;
16919 tp->bufmgr_config.mbuf_mac_rx_low_water =
16920 DEFAULT_MB_MACRX_LOW_WATER_5705;
16921 tp->bufmgr_config.mbuf_high_water =
16922 DEFAULT_MB_HIGH_WATER_5705;
16923 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16924 tp->bufmgr_config.mbuf_mac_rx_low_water =
16925 DEFAULT_MB_MACRX_LOW_WATER_5906;
16926 tp->bufmgr_config.mbuf_high_water =
16927 DEFAULT_MB_HIGH_WATER_5906;
16928 }
16929
16930 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16931 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
16932 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16933 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
16934 tp->bufmgr_config.mbuf_high_water_jumbo =
16935 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
16936 } else {
16937 tp->bufmgr_config.mbuf_read_dma_low_water =
16938 DEFAULT_MB_RDMA_LOW_WATER;
16939 tp->bufmgr_config.mbuf_mac_rx_low_water =
16940 DEFAULT_MB_MACRX_LOW_WATER;
16941 tp->bufmgr_config.mbuf_high_water =
16942 DEFAULT_MB_HIGH_WATER;
16943
16944 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16945 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
16946 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16947 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
16948 tp->bufmgr_config.mbuf_high_water_jumbo =
16949 DEFAULT_MB_HIGH_WATER_JUMBO;
16950 }
16951
16952 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
16953 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
16954 }
16955
16956 static char *tg3_phy_string(struct tg3 *tp)
16957 {
16958 switch (tp->phy_id & TG3_PHY_ID_MASK) {
16959 case TG3_PHY_ID_BCM5400: return "5400";
16960 case TG3_PHY_ID_BCM5401: return "5401";
16961 case TG3_PHY_ID_BCM5411: return "5411";
16962 case TG3_PHY_ID_BCM5701: return "5701";
16963 case TG3_PHY_ID_BCM5703: return "5703";
16964 case TG3_PHY_ID_BCM5704: return "5704";
16965 case TG3_PHY_ID_BCM5705: return "5705";
16966 case TG3_PHY_ID_BCM5750: return "5750";
16967 case TG3_PHY_ID_BCM5752: return "5752";
16968 case TG3_PHY_ID_BCM5714: return "5714";
16969 case TG3_PHY_ID_BCM5780: return "5780";
16970 case TG3_PHY_ID_BCM5755: return "5755";
16971 case TG3_PHY_ID_BCM5787: return "5787";
16972 case TG3_PHY_ID_BCM5784: return "5784";
16973 case TG3_PHY_ID_BCM5756: return "5722/5756";
16974 case TG3_PHY_ID_BCM5906: return "5906";
16975 case TG3_PHY_ID_BCM5761: return "5761";
16976 case TG3_PHY_ID_BCM5718C: return "5718C";
16977 case TG3_PHY_ID_BCM5718S: return "5718S";
16978 case TG3_PHY_ID_BCM57765: return "57765";
16979 case TG3_PHY_ID_BCM5719C: return "5719C";
16980 case TG3_PHY_ID_BCM5720C: return "5720C";
16981 case TG3_PHY_ID_BCM5762: return "5762C";
16982 case TG3_PHY_ID_BCM8002: return "8002/serdes";
16983 case 0: return "serdes";
16984 default: return "unknown";
16985 }
16986 }
16987
16988 static char *tg3_bus_string(struct tg3 *tp, char *str)
16989 {
16990 if (tg3_flag(tp, PCI_EXPRESS)) {
16991 strcpy(str, "PCI Express");
16992 return str;
16993 } else if (tg3_flag(tp, PCIX_MODE)) {
16994 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
16995
16996 strcpy(str, "PCIX:");
16997
16998 if ((clock_ctrl == 7) ||
16999 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
17000 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
17001 strcat(str, "133MHz");
17002 else if (clock_ctrl == 0)
17003 strcat(str, "33MHz");
17004 else if (clock_ctrl == 2)
17005 strcat(str, "50MHz");
17006 else if (clock_ctrl == 4)
17007 strcat(str, "66MHz");
17008 else if (clock_ctrl == 6)
17009 strcat(str, "100MHz");
17010 } else {
17011 strcpy(str, "PCI:");
17012 if (tg3_flag(tp, PCI_HIGH_SPEED))
17013 strcat(str, "66MHz");
17014 else
17015 strcat(str, "33MHz");
17016 }
17017 if (tg3_flag(tp, PCI_32BIT))
17018 strcat(str, ":32-bit");
17019 else
17020 strcat(str, ":64-bit");
17021 return str;
17022 }
17023
17024 static void tg3_init_coal(struct tg3 *tp)
17025 {
17026 struct ethtool_coalesce *ec = &tp->coal;
17027
17028 memset(ec, 0, sizeof(*ec));
17029 ec->cmd = ETHTOOL_GCOALESCE;
17030 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
17031 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
17032 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
17033 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
17034 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
17035 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
17036 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
17037 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
17038 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
17039
17040 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
17041 HOSTCC_MODE_CLRTICK_TXBD)) {
17042 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
17043 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
17044 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
17045 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
17046 }
17047
17048 if (tg3_flag(tp, 5705_PLUS)) {
17049 ec->rx_coalesce_usecs_irq = 0;
17050 ec->tx_coalesce_usecs_irq = 0;
17051 ec->stats_block_coalesce_usecs = 0;
17052 }
17053 }
17054
17055 static int tg3_init_one(struct pci_dev *pdev,
17056 const struct pci_device_id *ent)
17057 {
17058 struct net_device *dev;
17059 struct tg3 *tp;
17060 int i, err, pm_cap;
17061 u32 sndmbx, rcvmbx, intmbx;
17062 char str[40];
17063 u64 dma_mask, persist_dma_mask;
17064 netdev_features_t features = 0;
17065
17066 printk_once(KERN_INFO "%s\n", version);
17067
17068 err = pci_enable_device(pdev);
17069 if (err) {
17070 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
17071 return err;
17072 }
17073
17074 err = pci_request_regions(pdev, DRV_MODULE_NAME);
17075 if (err) {
17076 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
17077 goto err_out_disable_pdev;
17078 }
17079
17080 pci_set_master(pdev);
17081
17082 /* Find power-management capability. */
17083 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
17084 if (pm_cap == 0) {
17085 dev_err(&pdev->dev,
17086 "Cannot find Power Management capability, aborting\n");
17087 err = -EIO;
17088 goto err_out_free_res;
17089 }
17090
17091 err = pci_set_power_state(pdev, PCI_D0);
17092 if (err) {
17093 dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
17094 goto err_out_free_res;
17095 }
17096
17097 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
17098 if (!dev) {
17099 err = -ENOMEM;
17100 goto err_out_power_down;
17101 }
17102
17103 SET_NETDEV_DEV(dev, &pdev->dev);
17104
17105 tp = netdev_priv(dev);
17106 tp->pdev = pdev;
17107 tp->dev = dev;
17108 tp->pm_cap = pm_cap;
17109 tp->rx_mode = TG3_DEF_RX_MODE;
17110 tp->tx_mode = TG3_DEF_TX_MODE;
17111 tp->irq_sync = 1;
17112
17113 if (tg3_debug > 0)
17114 tp->msg_enable = tg3_debug;
17115 else
17116 tp->msg_enable = TG3_DEF_MSG_ENABLE;
17117
17118 if (pdev_is_ssb_gige_core(pdev)) {
17119 tg3_flag_set(tp, IS_SSB_CORE);
17120 if (ssb_gige_must_flush_posted_writes(pdev))
17121 tg3_flag_set(tp, FLUSH_POSTED_WRITES);
17122 if (ssb_gige_one_dma_at_once(pdev))
17123 tg3_flag_set(tp, ONE_DMA_AT_ONCE);
17124 if (ssb_gige_have_roboswitch(pdev))
17125 tg3_flag_set(tp, ROBOSWITCH);
17126 if (ssb_gige_is_rgmii(pdev))
17127 tg3_flag_set(tp, RGMII_MODE);
17128 }
17129
17130 /* The word/byte swap controls here control register access byte
17131 * swapping. DMA data byte swapping is controlled in the GRC_MODE
17132 * setting below.
17133 */
17134 tp->misc_host_ctrl =
17135 MISC_HOST_CTRL_MASK_PCI_INT |
17136 MISC_HOST_CTRL_WORD_SWAP |
17137 MISC_HOST_CTRL_INDIR_ACCESS |
17138 MISC_HOST_CTRL_PCISTATE_RW;
17139
17140 /* The NONFRM (non-frame) byte/word swap controls take effect
17141 * on descriptor entries, anything which isn't packet data.
17142 *
17143 * The StrongARM chips on the board (one for tx, one for rx)
17144 * are running in big-endian mode.
17145 */
17146 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
17147 GRC_MODE_WSWAP_NONFRM_DATA);
17148 #ifdef __BIG_ENDIAN
17149 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
17150 #endif
17151 spin_lock_init(&tp->lock);
17152 spin_lock_init(&tp->indirect_lock);
17153 INIT_WORK(&tp->reset_task, tg3_reset_task);
17154
17155 tp->regs = pci_ioremap_bar(pdev, BAR_0);
17156 if (!tp->regs) {
17157 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
17158 err = -ENOMEM;
17159 goto err_out_free_dev;
17160 }
17161
17162 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
17163 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
17164 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
17165 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
17166 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
17167 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
17168 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
17169 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
17170 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
17171 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
17172 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
17173 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727) {
17174 tg3_flag_set(tp, ENABLE_APE);
17175 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
17176 if (!tp->aperegs) {
17177 dev_err(&pdev->dev,
17178 "Cannot map APE registers, aborting\n");
17179 err = -ENOMEM;
17180 goto err_out_iounmap;
17181 }
17182 }
17183
17184 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
17185 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
17186
17187 dev->ethtool_ops = &tg3_ethtool_ops;
17188 dev->watchdog_timeo = TG3_TX_TIMEOUT;
17189 dev->netdev_ops = &tg3_netdev_ops;
17190 dev->irq = pdev->irq;
17191
17192 err = tg3_get_invariants(tp, ent);
17193 if (err) {
17194 dev_err(&pdev->dev,
17195 "Problem fetching invariants of chip, aborting\n");
17196 goto err_out_apeunmap;
17197 }
17198
17199 /* The EPB bridge inside 5714, 5715, and 5780 and any
17200 * device behind the EPB cannot support DMA addresses > 40-bit.
17201 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
17202 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
17203 * do DMA address check in tg3_start_xmit().
17204 */
17205 if (tg3_flag(tp, IS_5788))
17206 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
17207 else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
17208 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
17209 #ifdef CONFIG_HIGHMEM
17210 dma_mask = DMA_BIT_MASK(64);
17211 #endif
17212 } else
17213 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
17214
17215 /* Configure DMA attributes. */
17216 if (dma_mask > DMA_BIT_MASK(32)) {
17217 err = pci_set_dma_mask(pdev, dma_mask);
17218 if (!err) {
17219 features |= NETIF_F_HIGHDMA;
17220 err = pci_set_consistent_dma_mask(pdev,
17221 persist_dma_mask);
17222 if (err < 0) {
17223 dev_err(&pdev->dev, "Unable to obtain 64 bit "
17224 "DMA for consistent allocations\n");
17225 goto err_out_apeunmap;
17226 }
17227 }
17228 }
17229 if (err || dma_mask == DMA_BIT_MASK(32)) {
17230 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
17231 if (err) {
17232 dev_err(&pdev->dev,
17233 "No usable DMA configuration, aborting\n");
17234 goto err_out_apeunmap;
17235 }
17236 }
17237
17238 tg3_init_bufmgr_config(tp);
17239
17240 features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
17241
17242 /* 5700 B0 chips do not support checksumming correctly due
17243 * to hardware bugs.
17244 */
17245 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
17246 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
17247
17248 if (tg3_flag(tp, 5755_PLUS))
17249 features |= NETIF_F_IPV6_CSUM;
17250 }
17251
17252 /* TSO is on by default on chips that support hardware TSO.
17253 * Firmware TSO on older chips gives lower performance, so it
17254 * is off by default, but can be enabled using ethtool.
17255 */
17256 if ((tg3_flag(tp, HW_TSO_1) ||
17257 tg3_flag(tp, HW_TSO_2) ||
17258 tg3_flag(tp, HW_TSO_3)) &&
17259 (features & NETIF_F_IP_CSUM))
17260 features |= NETIF_F_TSO;
17261 if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
17262 if (features & NETIF_F_IPV6_CSUM)
17263 features |= NETIF_F_TSO6;
17264 if (tg3_flag(tp, HW_TSO_3) ||
17265 tg3_asic_rev(tp) == ASIC_REV_5761 ||
17266 (tg3_asic_rev(tp) == ASIC_REV_5784 &&
17267 tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
17268 tg3_asic_rev(tp) == ASIC_REV_5785 ||
17269 tg3_asic_rev(tp) == ASIC_REV_57780)
17270 features |= NETIF_F_TSO_ECN;
17271 }
17272
17273 dev->features |= features;
17274 dev->vlan_features |= features;
17275
17276 /*
17277 * Add loopback capability only for a subset of devices that support
17278 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
17279 * loopback for the remaining devices.
17280 */
17281 if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
17282 !tg3_flag(tp, CPMU_PRESENT))
17283 /* Add the loopback capability */
17284 features |= NETIF_F_LOOPBACK;
17285
17286 dev->hw_features |= features;
17287
17288 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
17289 !tg3_flag(tp, TSO_CAPABLE) &&
17290 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
17291 tg3_flag_set(tp, MAX_RXPEND_64);
17292 tp->rx_pending = 63;
17293 }
17294
17295 err = tg3_get_device_address(tp);
17296 if (err) {
17297 dev_err(&pdev->dev,
17298 "Could not obtain valid ethernet address, aborting\n");
17299 goto err_out_apeunmap;
17300 }
17301
17302 /*
17303 * Reset chip in case UNDI or EFI driver did not shutdown
17304 * DMA self test will enable WDMAC and we'll see (spurious)
17305 * pending DMA on the PCI bus at that point.
17306 */
17307 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
17308 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
17309 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
17310 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17311 }
17312
17313 err = tg3_test_dma(tp);
17314 if (err) {
17315 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
17316 goto err_out_apeunmap;
17317 }
17318
17319 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
17320 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
17321 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
17322 for (i = 0; i < tp->irq_max; i++) {
17323 struct tg3_napi *tnapi = &tp->napi[i];
17324
17325 tnapi->tp = tp;
17326 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
17327
17328 tnapi->int_mbox = intmbx;
17329 if (i <= 4)
17330 intmbx += 0x8;
17331 else
17332 intmbx += 0x4;
17333
17334 tnapi->consmbox = rcvmbx;
17335 tnapi->prodmbox = sndmbx;
17336
17337 if (i)
17338 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
17339 else
17340 tnapi->coal_now = HOSTCC_MODE_NOW;
17341
17342 if (!tg3_flag(tp, SUPPORT_MSIX))
17343 break;
17344
17345 /*
17346 * If we support MSIX, we'll be using RSS. If we're using
17347 * RSS, the first vector only handles link interrupts and the
17348 * remaining vectors handle rx and tx interrupts. Reuse the
17349 * mailbox values for the next iteration. The values we setup
17350 * above are still useful for the single vectored mode.
17351 */
17352 if (!i)
17353 continue;
17354
17355 rcvmbx += 0x8;
17356
17357 if (sndmbx & 0x4)
17358 sndmbx -= 0x4;
17359 else
17360 sndmbx += 0xc;
17361 }
17362
17363 tg3_init_coal(tp);
17364
17365 pci_set_drvdata(pdev, dev);
17366
17367 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
17368 tg3_asic_rev(tp) == ASIC_REV_5720 ||
17369 tg3_asic_rev(tp) == ASIC_REV_5762)
17370 tg3_flag_set(tp, PTP_CAPABLE);
17371
17372 if (tg3_flag(tp, 5717_PLUS)) {
17373 /* Resume a low-power mode */
17374 tg3_frob_aux_power(tp, false);
17375 }
17376
17377 tg3_timer_init(tp);
17378
17379 tg3_carrier_off(tp);
17380
17381 err = register_netdev(dev);
17382 if (err) {
17383 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
17384 goto err_out_apeunmap;
17385 }
17386
17387 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17388 tp->board_part_number,
17389 tg3_chip_rev_id(tp),
17390 tg3_bus_string(tp, str),
17391 dev->dev_addr);
17392
17393 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
17394 struct phy_device *phydev;
17395 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
17396 netdev_info(dev,
17397 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
17398 phydev->drv->name, dev_name(&phydev->dev));
17399 } else {
17400 char *ethtype;
17401
17402 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
17403 ethtype = "10/100Base-TX";
17404 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
17405 ethtype = "1000Base-SX";
17406 else
17407 ethtype = "10/100/1000Base-T";
17408
17409 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
17410 "(WireSpeed[%d], EEE[%d])\n",
17411 tg3_phy_string(tp), ethtype,
17412 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
17413 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
17414 }
17415
17416 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
17417 (dev->features & NETIF_F_RXCSUM) != 0,
17418 tg3_flag(tp, USE_LINKCHG_REG) != 0,
17419 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
17420 tg3_flag(tp, ENABLE_ASF) != 0,
17421 tg3_flag(tp, TSO_CAPABLE) != 0);
17422 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
17423 tp->dma_rwctrl,
17424 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
17425 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
17426
17427 pci_save_state(pdev);
17428
17429 return 0;
17430
17431 err_out_apeunmap:
17432 if (tp->aperegs) {
17433 iounmap(tp->aperegs);
17434 tp->aperegs = NULL;
17435 }
17436
17437 err_out_iounmap:
17438 if (tp->regs) {
17439 iounmap(tp->regs);
17440 tp->regs = NULL;
17441 }
17442
17443 err_out_free_dev:
17444 free_netdev(dev);
17445
17446 err_out_power_down:
17447 pci_set_power_state(pdev, PCI_D3hot);
17448
17449 err_out_free_res:
17450 pci_release_regions(pdev);
17451
17452 err_out_disable_pdev:
17453 pci_disable_device(pdev);
17454 pci_set_drvdata(pdev, NULL);
17455 return err;
17456 }
17457
17458 static void tg3_remove_one(struct pci_dev *pdev)
17459 {
17460 struct net_device *dev = pci_get_drvdata(pdev);
17461
17462 if (dev) {
17463 struct tg3 *tp = netdev_priv(dev);
17464
17465 release_firmware(tp->fw);
17466
17467 tg3_reset_task_cancel(tp);
17468
17469 if (tg3_flag(tp, USE_PHYLIB)) {
17470 tg3_phy_fini(tp);
17471 tg3_mdio_fini(tp);
17472 }
17473
17474 unregister_netdev(dev);
17475 if (tp->aperegs) {
17476 iounmap(tp->aperegs);
17477 tp->aperegs = NULL;
17478 }
17479 if (tp->regs) {
17480 iounmap(tp->regs);
17481 tp->regs = NULL;
17482 }
17483 free_netdev(dev);
17484 pci_release_regions(pdev);
17485 pci_disable_device(pdev);
17486 pci_set_drvdata(pdev, NULL);
17487 }
17488 }
17489
17490 #ifdef CONFIG_PM_SLEEP
17491 static int tg3_suspend(struct device *device)
17492 {
17493 struct pci_dev *pdev = to_pci_dev(device);
17494 struct net_device *dev = pci_get_drvdata(pdev);
17495 struct tg3 *tp = netdev_priv(dev);
17496 int err;
17497
17498 if (!netif_running(dev))
17499 return 0;
17500
17501 tg3_reset_task_cancel(tp);
17502 tg3_phy_stop(tp);
17503 tg3_netif_stop(tp);
17504
17505 tg3_timer_stop(tp);
17506
17507 tg3_full_lock(tp, 1);
17508 tg3_disable_ints(tp);
17509 tg3_full_unlock(tp);
17510
17511 netif_device_detach(dev);
17512
17513 tg3_full_lock(tp, 0);
17514 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17515 tg3_flag_clear(tp, INIT_COMPLETE);
17516 tg3_full_unlock(tp);
17517
17518 err = tg3_power_down_prepare(tp);
17519 if (err) {
17520 int err2;
17521
17522 tg3_full_lock(tp, 0);
17523
17524 tg3_flag_set(tp, INIT_COMPLETE);
17525 err2 = tg3_restart_hw(tp, true);
17526 if (err2)
17527 goto out;
17528
17529 tg3_timer_start(tp);
17530
17531 netif_device_attach(dev);
17532 tg3_netif_start(tp);
17533
17534 out:
17535 tg3_full_unlock(tp);
17536
17537 if (!err2)
17538 tg3_phy_start(tp);
17539 }
17540
17541 return err;
17542 }
17543
17544 static int tg3_resume(struct device *device)
17545 {
17546 struct pci_dev *pdev = to_pci_dev(device);
17547 struct net_device *dev = pci_get_drvdata(pdev);
17548 struct tg3 *tp = netdev_priv(dev);
17549 int err;
17550
17551 if (!netif_running(dev))
17552 return 0;
17553
17554 netif_device_attach(dev);
17555
17556 tg3_full_lock(tp, 0);
17557
17558 tg3_flag_set(tp, INIT_COMPLETE);
17559 err = tg3_restart_hw(tp,
17560 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN));
17561 if (err)
17562 goto out;
17563
17564 tg3_timer_start(tp);
17565
17566 tg3_netif_start(tp);
17567
17568 out:
17569 tg3_full_unlock(tp);
17570
17571 if (!err)
17572 tg3_phy_start(tp);
17573
17574 return err;
17575 }
17576 #endif /* CONFIG_PM_SLEEP */
17577
17578 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
17579
17580 /**
17581 * tg3_io_error_detected - called when PCI error is detected
17582 * @pdev: Pointer to PCI device
17583 * @state: The current pci connection state
17584 *
17585 * This function is called after a PCI bus error affecting
17586 * this device has been detected.
17587 */
17588 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
17589 pci_channel_state_t state)
17590 {
17591 struct net_device *netdev = pci_get_drvdata(pdev);
17592 struct tg3 *tp = netdev_priv(netdev);
17593 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
17594
17595 netdev_info(netdev, "PCI I/O error detected\n");
17596
17597 rtnl_lock();
17598
17599 if (!netif_running(netdev))
17600 goto done;
17601
17602 tg3_phy_stop(tp);
17603
17604 tg3_netif_stop(tp);
17605
17606 tg3_timer_stop(tp);
17607
17608 /* Want to make sure that the reset task doesn't run */
17609 tg3_reset_task_cancel(tp);
17610
17611 netif_device_detach(netdev);
17612
17613 /* Clean up software state, even if MMIO is blocked */
17614 tg3_full_lock(tp, 0);
17615 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
17616 tg3_full_unlock(tp);
17617
17618 done:
17619 if (state == pci_channel_io_perm_failure)
17620 err = PCI_ERS_RESULT_DISCONNECT;
17621 else
17622 pci_disable_device(pdev);
17623
17624 rtnl_unlock();
17625
17626 return err;
17627 }
17628
17629 /**
17630 * tg3_io_slot_reset - called after the pci bus has been reset.
17631 * @pdev: Pointer to PCI device
17632 *
17633 * Restart the card from scratch, as if from a cold-boot.
17634 * At this point, the card has exprienced a hard reset,
17635 * followed by fixups by BIOS, and has its config space
17636 * set up identically to what it was at cold boot.
17637 */
17638 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
17639 {
17640 struct net_device *netdev = pci_get_drvdata(pdev);
17641 struct tg3 *tp = netdev_priv(netdev);
17642 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
17643 int err;
17644
17645 rtnl_lock();
17646
17647 if (pci_enable_device(pdev)) {
17648 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
17649 goto done;
17650 }
17651
17652 pci_set_master(pdev);
17653 pci_restore_state(pdev);
17654 pci_save_state(pdev);
17655
17656 if (!netif_running(netdev)) {
17657 rc = PCI_ERS_RESULT_RECOVERED;
17658 goto done;
17659 }
17660
17661 err = tg3_power_up(tp);
17662 if (err)
17663 goto done;
17664
17665 rc = PCI_ERS_RESULT_RECOVERED;
17666
17667 done:
17668 rtnl_unlock();
17669
17670 return rc;
17671 }
17672
17673 /**
17674 * tg3_io_resume - called when traffic can start flowing again.
17675 * @pdev: Pointer to PCI device
17676 *
17677 * This callback is called when the error recovery driver tells
17678 * us that its OK to resume normal operation.
17679 */
17680 static void tg3_io_resume(struct pci_dev *pdev)
17681 {
17682 struct net_device *netdev = pci_get_drvdata(pdev);
17683 struct tg3 *tp = netdev_priv(netdev);
17684 int err;
17685
17686 rtnl_lock();
17687
17688 if (!netif_running(netdev))
17689 goto done;
17690
17691 tg3_full_lock(tp, 0);
17692 tg3_flag_set(tp, INIT_COMPLETE);
17693 err = tg3_restart_hw(tp, true);
17694 if (err) {
17695 tg3_full_unlock(tp);
17696 netdev_err(netdev, "Cannot restart hardware after reset.\n");
17697 goto done;
17698 }
17699
17700 netif_device_attach(netdev);
17701
17702 tg3_timer_start(tp);
17703
17704 tg3_netif_start(tp);
17705
17706 tg3_full_unlock(tp);
17707
17708 tg3_phy_start(tp);
17709
17710 done:
17711 rtnl_unlock();
17712 }
17713
17714 static const struct pci_error_handlers tg3_err_handler = {
17715 .error_detected = tg3_io_error_detected,
17716 .slot_reset = tg3_io_slot_reset,
17717 .resume = tg3_io_resume
17718 };
17719
17720 static struct pci_driver tg3_driver = {
17721 .name = DRV_MODULE_NAME,
17722 .id_table = tg3_pci_tbl,
17723 .probe = tg3_init_one,
17724 .remove = tg3_remove_one,
17725 .err_handler = &tg3_err_handler,
17726 .driver.pm = &tg3_pm_ops,
17727 };
17728
17729 static int __init tg3_init(void)
17730 {
17731 return pci_register_driver(&tg3_driver);
17732 }
17733
17734 static void __exit tg3_cleanup(void)
17735 {
17736 pci_unregister_driver(&tg3_driver);
17737 }
17738
17739 module_init(tg3_init);
17740 module_exit(tg3_cleanup);